/linux-4.1.27/arch/blackfin/mm/ |
D | sram-alloc.c | 78 per_cpu(free_l1_ssram_head, cpu).next = in l1sram_init() 80 if (!per_cpu(free_l1_ssram_head, cpu).next) { in l1sram_init() 85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; in l1sram_init() 86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; in l1sram_init() 87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; in l1sram_init() 88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; in l1sram_init() 90 per_cpu(used_l1_ssram_head, cpu).next = NULL; in l1sram_init() 93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); in l1sram_init() 106 per_cpu(free_l1_data_A_sram_head, cpu).next = in l1_data_sram_init() 108 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { in l1_data_sram_init() [all …]
|
/linux-4.1.27/arch/x86/kernel/apic/ |
D | x2apic_cluster.c | 23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16; in x2apic_cluster() 54 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu); in __x2apic_send_IPI_mask() 60 dest |= per_cpu(x86_cpu_to_logical_apicid, i); in __x2apic_send_IPI_mask() 110 dest = per_cpu(x86_cpu_to_logical_apicid, i); in x2apic_cpu_mask_to_apicid_and() 123 dest |= per_cpu(x86_cpu_to_logical_apicid, i); in x2apic_cpu_mask_to_apicid_and() 136 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); in init_x2apic_ldr() 138 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr() 142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); in init_x2apic_ldr() 143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); in init_x2apic_ldr() 159 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu), in update_clusterinfo() [all …]
|
D | ipi.c | 33 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, in default_send_IPI_mask_sequence_phys() 52 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, in default_send_IPI_mask_allbutself_phys() 145 if (per_cpu(x86_cpu_to_apicid, i) == apic_id) in convert_apicid_to_cpu()
|
D | vector.c | 170 if (per_cpu(vector_irq, new_cpu)[vector] > in __assign_irq_vector() 183 per_cpu(vector_irq, new_cpu)[vector] = irq; in __assign_irq_vector() 215 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; in clear_irq_vector() 228 if (per_cpu(vector_irq, cpu)[vector] != irq) in clear_irq_vector() 230 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; in clear_irq_vector() 287 per_cpu(vector_irq, cpu)[vector] = irq; in __setup_vector_irq() 291 irq = per_cpu(vector_irq, cpu)[vector]; in __setup_vector_irq() 297 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; in __setup_vector_irq() 317 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; in setup_vector_irq()
|
D | hw_nmi.c | 82 s = &per_cpu(nmi_print_seq, cpu); in arch_trigger_all_cpu_backtrace() 107 s = &per_cpu(nmi_print_seq, cpu); in arch_trigger_all_cpu_backtrace()
|
D | bigsmp_32.c | 45 id = per_cpu(x86_bios_cpu_apicid, cpu); in calculate_ldr() 78 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); in bigsmp_cpu_present_to_apicid()
|
D | x2apic_phys.c | 52 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), in __x2apic_send_IPI_mask()
|
/linux-4.1.27/arch/x86/xen/ |
D | smp.c | 117 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { in xen_smp_intr_free() 118 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); in xen_smp_intr_free() 119 per_cpu(xen_resched_irq, cpu).irq = -1; in xen_smp_intr_free() 120 kfree(per_cpu(xen_resched_irq, cpu).name); in xen_smp_intr_free() 121 per_cpu(xen_resched_irq, cpu).name = NULL; in xen_smp_intr_free() 123 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { in xen_smp_intr_free() 124 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); in xen_smp_intr_free() 125 per_cpu(xen_callfunc_irq, cpu).irq = -1; in xen_smp_intr_free() 126 kfree(per_cpu(xen_callfunc_irq, cpu).name); in xen_smp_intr_free() 127 per_cpu(xen_callfunc_irq, cpu).name = NULL; in xen_smp_intr_free() [all …]
|
D | spinlock.c | 209 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); in xen_unlock_kick() 235 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", in xen_init_lock_cpu() 236 cpu, per_cpu(lock_kicker_irq, cpu)); in xen_init_lock_cpu() 248 per_cpu(lock_kicker_irq, cpu) = irq; in xen_init_lock_cpu() 249 per_cpu(irq_name, cpu) = name; in xen_init_lock_cpu() 260 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); in xen_uninit_lock_cpu() 261 per_cpu(lock_kicker_irq, cpu) = -1; in xen_uninit_lock_cpu() 262 kfree(per_cpu(irq_name, cpu)); in xen_uninit_lock_cpu() 263 per_cpu(irq_name, cpu) = NULL; in xen_uninit_lock_cpu()
|
D | time.c | 101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; in xen_vcpu_stolen() 108 area.addr.v = &per_cpu(xen_runstate, cpu); in xen_setup_runstate_info() 418 evt = &per_cpu(xen_clock_events, cpu).evt; in xen_teardown_timer() 428 struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); in xen_setup_timer()
|
D | enlighten.c | 200 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) in xen_vcpu_setup() 204 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; in xen_vcpu_setup() 212 vcpup = &per_cpu(xen_vcpu_info, cpu); in xen_vcpu_setup() 233 per_cpu(xen_vcpu, cpu) = vcpup; in xen_vcpu_setup() 680 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; in load_TLS_descriptor() 1595 xen_initial_gdt = &per_cpu(gdt_page, 0); in xen_start_kernel() 1609 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; in xen_start_kernel() 1753 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; in xen_hvm_init_shared_info()
|
/linux-4.1.27/kernel/ |
D | profile.c | 240 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); in __profile_flip_buffers() 248 j = per_cpu(cpu_profile_flip, get_cpu()); in profile_flip_buffers() 252 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; in profile_flip_buffers() 271 i = per_cpu(cpu_profile_flip, get_cpu()); in profile_discard_flip_buffers() 275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; in profile_discard_flip_buffers() 291 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; in do_profile_hits() 340 per_cpu(cpu_profile_flip, cpu) = 0; in profile_cpu_callback() 341 if (!per_cpu(cpu_profile_hits, cpu)[1]) { in profile_cpu_callback() 347 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); in profile_cpu_callback() 349 if (!per_cpu(cpu_profile_hits, cpu)[0]) { in profile_cpu_callback() [all …]
|
D | smpboot.c | 30 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_thread_get() 40 per_cpu(idle_threads, smp_processor_id()) = current; in idle_thread_set_boot_cpu() 51 struct task_struct *tsk = per_cpu(idle_threads, cpu); in idle_init() 58 per_cpu(idle_threads, cpu) = tsk; in idle_init() 327 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); in cpu_report_state() 345 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare() 349 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { in cpu_check_up_prepare() 354 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); in cpu_check_up_prepare() 400 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); in cpu_set_state_online() 418 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) in cpu_wait_death() [all …]
|
D | softirq.c | 639 per_cpu(tasklet_vec, cpu).tail = in softirq_init() 640 &per_cpu(tasklet_vec, cpu).head; in softirq_init() 641 per_cpu(tasklet_hi_vec, cpu).tail = in softirq_init() 642 &per_cpu(tasklet_hi_vec, cpu).head; in softirq_init() 691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { in tasklet_kill_immediate() 696 per_cpu(tasklet_vec, cpu).tail = i; in tasklet_kill_immediate() 709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { in takeover_tasklets() 710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets() 711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); in takeover_tasklets() 712 per_cpu(tasklet_vec, cpu).head = NULL; in takeover_tasklets() [all …]
|
D | stop_machine.c | 76 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_queue_work() 77 struct task_struct *p = per_cpu(cpu_stopper_task, cpu); in cpu_stop_queue_work() 339 work = &per_cpu(stop_cpus_work, cpu); in queue_stop_cpus_work() 352 cpu_stop_queue_work(cpu, &per_cpu(stop_cpus_work, cpu)); in queue_stop_cpus_work() 438 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_should_run() 450 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stopper_thread() 493 sched_set_stop_task(cpu, per_cpu(cpu_stopper_task, cpu)); in cpu_stop_create() 498 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_park() 512 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_unpark() 536 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); in cpu_stop_init()
|
D | watchdog.c | 211 per_cpu(watchdog_touch_ts, cpu) = 0; in touch_all_softlockup_watchdogs() 522 struct perf_event *event = per_cpu(watchdog_ev, cpu); in watchdog_nmi_enable() 585 per_cpu(watchdog_ev, cpu) = event; in watchdog_nmi_enable() 587 perf_event_enable(per_cpu(watchdog_ev, cpu)); in watchdog_nmi_enable() 594 struct perf_event *event = per_cpu(watchdog_ev, cpu); in watchdog_nmi_disable() 598 per_cpu(watchdog_ev, cpu) = NULL; in watchdog_nmi_disable()
|
D | context_tracking.c | 35 if (!per_cpu(context_tracking.active, cpu)) { in context_tracking_cpu_set() 36 per_cpu(context_tracking.active, cpu) = true; in context_tracking_cpu_set()
|
D | smp.c | 40 struct call_function_data *cfd = &per_cpu(cfd_data, cpu); in hotplug_cfd() 95 init_llist_head(&per_cpu(call_single_queue, i)); in call_function_init() 181 if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu))) in generic_exec_single() 456 llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)); in smp_call_function_many()
|
D | taskstats.c | 306 listeners = &per_cpu(listener_array, cpu); in add_del_listener() 324 listeners = &per_cpu(listener_array, cpu); in add_del_listener() 688 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); in taskstats_init_early() 689 init_rwsem(&(per_cpu(listener_array, i).sem)); in taskstats_init_early()
|
/linux-4.1.27/arch/x86/oprofile/ |
D | nmi_int.c | 156 kfree(per_cpu(cpu_msrs, i).multiplex); in nmi_shutdown_mux() 157 per_cpu(cpu_msrs, i).multiplex = NULL; in nmi_shutdown_mux() 158 per_cpu(switch_index, i) = 0; in nmi_shutdown_mux() 172 per_cpu(cpu_msrs, i).multiplex = in nmi_setup_mux() 174 if (!per_cpu(cpu_msrs, i).multiplex) in nmi_setup_mux() 197 per_cpu(switch_index, cpu) = 0; in nmi_cpu_setup_mux() 229 int si = per_cpu(switch_index, cpu); in nmi_cpu_switch() 230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); in nmi_cpu_switch() 238 per_cpu(switch_index, cpu) = 0; in nmi_cpu_switch() 240 per_cpu(switch_index, cpu) = si; in nmi_cpu_switch() [all …]
|
/linux-4.1.27/arch/s390/include/asm/ |
D | topology.h | 23 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) 24 #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) 25 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask) 26 #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) 27 #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) 28 #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) 29 #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
|
/linux-4.1.27/drivers/cpufreq/ |
D | speedstep-centrino.c | 259 per_cpu(centrino_model, policy->cpu) = model; in centrino_cpu_init_table() 294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || in extract_clock() 295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || in extract_clock() 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { in extract_clock() 301 if ((!per_cpu(centrino_model, cpu)) || in extract_clock() 302 (!per_cpu(centrino_model, cpu)->op_points)) in extract_clock() 307 per_cpu(centrino_model, cpu)->op_points[i].frequency in extract_clock() 310 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) in extract_clock() 311 return per_cpu(centrino_model, cpu)-> in extract_clock() 315 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; in extract_clock() [all …]
|
D | arm_big_little.c | 85 cpu_freq = per_cpu(cpu_last_req_freq, j); in find_cluster_maxfreq() 87 if ((cluster == per_cpu(physical_cluster, j)) && in find_cluster_maxfreq() 100 u32 cur_cluster = per_cpu(physical_cluster, cpu); in clk_get_cpu_rate() 116 pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, in bL_cpufreq_get_rate() 119 return per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_get_rate() 135 prev_rate = per_cpu(cpu_last_req_freq, cpu); in bL_cpufreq_set_rate() 136 per_cpu(cpu_last_req_freq, cpu) = rate; in bL_cpufreq_set_rate() 137 per_cpu(physical_cluster, cpu) = new_cluster; in bL_cpufreq_set_rate() 153 per_cpu(cpu_last_req_freq, cpu) = prev_rate; in bL_cpufreq_set_rate() 154 per_cpu(physical_cluster, cpu) = old_cluster; in bL_cpufreq_set_rate() [all …]
|
D | sh-cpufreq.c | 35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; in sh_cpufreq_get() 46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_target() 83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); in sh_cpufreq_verify() 102 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_init() 144 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); in sh_cpufreq_cpu_exit()
|
D | cpufreq_userspace.c | 38 if (!per_cpu(cpu_is_managed, policy->cpu)) in cpufreq_set() 64 per_cpu(cpu_is_managed, cpu) = 1; in cpufreq_governor_userspace() 71 per_cpu(cpu_is_managed, cpu) = 0; in cpufreq_governor_userspace()
|
D | cpufreq_ondemand.c | 42 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); in ondemand_powersave_bias_init_cpu() 83 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in generic_powersave_bias_target() 157 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); in od_check_cpu() 199 struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, in od_dbs_timer() 273 dbs_info = &per_cpu(od_cpu_dbs_info, cpu); in update_sampling_rate() 328 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in store_io_is_busy() 367 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, in store_sampling_down_factor() 398 dbs_info = &per_cpu(od_cpu_dbs_info, j); in store_ignore_nice_load() 562 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; in od_set_powersave_bias()
|
D | acpi-cpufreq.c | 147 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); in show_freqdomain_cpus() 338 switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { in get_cur_val() 349 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; in get_cur_val() 367 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); in get_cur_freq_on_cpu() 411 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); in acpi_cpufreq_target() 676 per_cpu(acfreq_data, cpu) = data; in acpi_cpufreq_cpu_init() 845 per_cpu(acfreq_data, cpu) = NULL; in acpi_cpufreq_cpu_init() 852 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); in acpi_cpufreq_cpu_exit() 857 per_cpu(acfreq_data, policy->cpu) = NULL; in acpi_cpufreq_cpu_exit() 870 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); in acpi_cpufreq_resume()
|
D | cpufreq_conservative.c | 49 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); in cs_check_cpu() 110 struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, in cs_dbs_timer() 132 &per_cpu(cs_cpu_dbs_info, freq->cpu); in dbs_cpufreq_notifier() 238 dbs_info = &per_cpu(cs_cpu_dbs_info, j); in store_ignore_nice_load()
|
D | cpufreq.c | 192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_generic_get() 207 return per_cpu(cpufreq_cpu_data, cpu); in cpufreq_cpu_get_raw() 226 policy = per_cpu(cpufreq_cpu_data, cpu); in cpufreq_cpu_get() 940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); in cpufreq_init_policy() 981 per_cpu(cpufreq_cpu_data, cpu) = policy; in cpufreq_add_policy_cpu() 1007 policy = per_cpu(cpufreq_cpu_data_fallback, cpu); in cpufreq_policy_restore() 1196 per_cpu(cpufreq_cpu_data, j) = policy; in __cpufreq_add_dev() 1286 per_cpu(cpufreq_cpu_data, j) = NULL; in __cpufreq_add_dev() 1301 per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL; in __cpufreq_add_dev() 1338 policy = per_cpu(cpufreq_cpu_data, cpu); in __cpufreq_remove_dev_prepare() [all …]
|
D | cpufreq_governor.h | 114 return &per_cpu(_dbs_info, cpu).cdbs; \ 119 return &per_cpu(_dbs_info, cpu); \
|
/linux-4.1.27/arch/mips/kernel/ |
D | mips-cpc.c | 51 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); in mips_cpc_probe() 69 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_lock_other() 70 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_lock_other() 77 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), in mips_cpc_unlock_other() 78 per_cpu(cpc_core_lock_flags, curr_core)); in mips_cpc_unlock_other()
|
D | cevt-sb1250.c | 114 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); in sb1250_clockevent_init() 115 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); in sb1250_clockevent_init() 116 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); in sb1250_clockevent_init()
|
D | cevt-bcm1480.c | 116 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); in sb1480_clockevent_init() 117 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); in sb1480_clockevent_init() 118 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); in sb1480_clockevent_init()
|
D | pm-cps.c | 132 entry = per_cpu(nc_asm_enter, core)[state]; in cps_pm_enter_state() 168 core_ready_count = per_cpu(ready_count, core); in cps_pm_enter_state() 176 coupled_barrier(&per_cpu(pm_barrier, core), online); in cps_pm_enter_state() 631 if (per_cpu(nc_asm_enter, core)[state]) in cps_gen_core_entries() 643 per_cpu(nc_asm_enter, core)[state] = entry_fn; in cps_gen_core_entries() 646 if (!per_cpu(ready_count, core)) { in cps_gen_core_entries() 652 per_cpu(ready_count_alloc, core) = core_rc; in cps_gen_core_entries() 657 per_cpu(ready_count, core) = core_rc; in cps_gen_core_entries()
|
D | smp.c | 477 count = &per_cpu(tick_broadcast_count, cpu); in tick_broadcast() 478 csd = &per_cpu(tick_broadcast_csd, cpu); in tick_broadcast() 489 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); in tick_broadcast_callee() 498 csd = &per_cpu(tick_broadcast_csd, cpu); in tick_broadcast_init()
|
D | cevt-r4k.c | 81 cd = &per_cpu(mips_clockevent_device, cpu); in c0_compare_interrupt() 198 cd = &per_cpu(mips_clockevent_device, cpu); in r4k_clockevent_init()
|
D | topology.c | 20 struct cpu *c = &per_cpu(cpu_devices, i); in topology_init()
|
D | smp-bmips.c | 320 per_cpu(ipi_action_mask, cpu) |= action; in bmips43xx_send_ipi_single() 332 per_cpu(ipi_action_mask, cpu) = 0; in bmips43xx_ipi_interrupt()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | irq.c | 361 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); in arch_show_interrupts() 366 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); in arch_show_interrupts() 371 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); in arch_show_interrupts() 376 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); in arch_show_interrupts() 381 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); in arch_show_interrupts() 388 per_cpu(irq_stat, j).hmi_exceptions); in arch_show_interrupts() 396 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); in arch_show_interrupts() 409 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; in arch_irq_stat_cpu() 411 sum += per_cpu(irq_stat, cpu).pmu_irqs; in arch_irq_stat_cpu() 412 sum += per_cpu(irq_stat, cpu).mce_exceptions; in arch_irq_stat_cpu() [all …]
|
D | smp.c | 216 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_set_data() 223 struct cpu_messages *info = &per_cpu(ipi_message, cpu); in smp_muxed_ipi_message_pass() 352 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); in smp_store_cpu_info() 354 per_cpu(next_tlbcam_idx, id) in smp_store_cpu_info() 376 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), in smp_prepare_cpus() 378 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), in smp_prepare_cpus() 430 if (per_cpu(cpu_state, cpu) == CPU_DEAD) in generic_cpu_die() 439 per_cpu(cpu_state, cpu) = CPU_DEAD; in generic_set_cpu_dead() 449 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in generic_set_cpu_up() 454 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; in generic_check_cpu_restart()
|
D | sysfs.c | 53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; in store_smt_snooze_delay() 63 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); in show_smt_snooze_delay() 79 per_cpu(smt_snooze_delay, cpu) = snooze; in setup_smt_snooze_delay() 670 struct cpu *c = &per_cpu(cpu_devices, cpu); in register_cpu_online() 752 struct cpu *c = &per_cpu(cpu_devices, cpu); in unregister_cpu_online() 995 struct cpu *c = &per_cpu(cpu_devices, cpu); in topology_init()
|
D | cacheinfo.c | 499 WARN_ON_ONCE(per_cpu(cache_dir_pcpu, cpu_id) != NULL); in cacheinfo_create_cache_dir() 501 per_cpu(cache_dir_pcpu, cpu_id) = cache_dir; in cacheinfo_create_cache_dir() 856 cache_dir = per_cpu(cache_dir_pcpu, cpu_id); in cacheinfo_cpu_offline() 862 per_cpu(cache_dir_pcpu, cpu_id) = NULL; in cacheinfo_cpu_offline()
|
/linux-4.1.27/arch/ia64/mm/ |
D | tlb.c | 93 per_cpu(ia64_need_tlb_flush, i) = 1; in wrap_mmu_context() 368 per_cpu(ia64_tr_num, cpu) = 8; in ia64_tlb_init() 371 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; in ia64_tlb_init() 372 if (per_cpu(ia64_tr_num, cpu) > in ia64_tlb_init() 374 per_cpu(ia64_tr_num, cpu) = in ia64_tlb_init() 376 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { in ia64_tlb_init() 378 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; in ia64_tlb_init() 442 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry() 454 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); in ia64_itr_entry() 465 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { in ia64_itr_entry() [all …]
|
/linux-4.1.27/arch/x86/kernel/ |
D | setup_percpu.c | 223 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); in setup_per_cpu_areas() 224 per_cpu(cpu_number, cpu) = cpu; in setup_per_cpu_areas() 235 per_cpu(x86_cpu_to_apicid, cpu) = in setup_per_cpu_areas() 237 per_cpu(x86_bios_cpu_apicid, cpu) = in setup_per_cpu_areas() 241 per_cpu(x86_cpu_to_logical_apicid, cpu) = in setup_per_cpu_areas() 245 per_cpu(irq_stack_ptr, cpu) = in setup_per_cpu_areas() 246 per_cpu(irq_stack_union.irq_stack, cpu) + in setup_per_cpu_areas() 250 per_cpu(x86_cpu_to_node_map, cpu) = in setup_per_cpu_areas()
|
D | irq_32.c | 121 if (per_cpu(hardirq_stack, cpu)) in irq_ctx_init() 127 per_cpu(hardirq_stack, cpu) = irqstk; in irq_ctx_init() 132 per_cpu(softirq_stack, cpu) = irqstk; in irq_ctx_init() 135 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); in irq_ctx_init()
|
D | topology.c | 140 per_cpu(cpu_devices, num).cpu.hotpluggable = 1; in arch_register_cpu() 142 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu() 148 unregister_cpu(&per_cpu(cpu_devices, num).cpu); in arch_unregister_cpu() 155 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); in arch_register_cpu()
|
D | irq.c | 51 #define irq_stats(x) (&per_cpu(irq_stat, x)) 122 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); in arch_show_interrupts() 126 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); in arch_show_interrupts() 169 sum += per_cpu(mce_exception_count, cpu); in arch_irq_stat_cpu() 170 sum += per_cpu(mce_poll_count, cpu); in arch_irq_stat_cpu() 350 per_cpu(vector_irq, cpu)[vector] < 0) in check_irq_vectors_for_cpu_disable()
|
D | smpboot.c | 329 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && in match_smt() 345 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && in match_llc() 346 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) in match_llc() 784 per_cpu(current_task, cpu) = idle; in common_cpu_up() 789 per_cpu(cpu_current_top_of_stack, cpu) = in common_cpu_up() 795 per_cpu(kernel_stack, cpu) = in common_cpu_up() 1139 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); in native_smp_prepare_cpus() 1140 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); in native_smp_prepare_cpus() 1141 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); in native_smp_prepare_cpus()
|
D | dumpstack_64.c | 44 unsigned long end = per_cpu(orig_ist, cpu).ist[k]; in in_exception_stack() 157 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); in dump_trace() 261 irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); in show_stack_log_lvl() 262 irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); in show_stack_log_lvl()
|
D | irqinit.c | 63 if (per_cpu(vector_irq, cpu)[vector] > VECTOR_UNDEFINED) in vector_used_by_percpu_irq() 97 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i; in init_IRQ()
|
D | dumpstack_32.c | 29 void *irq = per_cpu(hardirq_stack, cpu); in is_hardirq_stack() 36 void *irq = per_cpu(softirq_stack, cpu); in is_softirq_stack()
|
D | tsc.c | 125 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); in cyc2ns_write_begin() 146 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); in cyc2ns_write_end() 192 struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu); in cyc2ns_init() 872 per_cpu(cyc2ns.data[0].cyc2ns_offset, cpu) = offset; in tsc_restore_sched_clock_state() 873 per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset; in tsc_restore_sched_clock_state()
|
D | kvm.c | 306 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); in kvm_register_steal_time() 408 src = &per_cpu(steal_time, cpu); in kvm_steal_clock() 583 apicid = per_cpu(x86_cpu_to_apicid, cpu); in kvm_kick_cpu() 810 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); in kvm_unlock_kick()
|
D | apb_timer.c | 226 struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); in apbt_cpuhp_notify() 346 adev = &per_cpu(cpu_apbt_dev, i); in apbt_time_init()
|
D | ioport.c | 57 tss = &per_cpu(cpu_tss, get_cpu()); in sys_ioperm()
|
/linux-4.1.27/arch/arm/mm/ |
D | context.c | 70 asid = per_cpu(active_asids, cpu).counter; in a15_erratum_get_cpumask() 72 asid = per_cpu(reserved_asids, cpu); in a15_erratum_get_cpumask() 147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); in flush_context() 156 asid = per_cpu(reserved_asids, i); in flush_context() 158 per_cpu(reserved_asids, i) = asid; in flush_context() 172 if (per_cpu(reserved_asids, cpu) == asid) in is_reserved_asid() 244 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) in check_and_switch_context() 260 atomic64_set(&per_cpu(active_asids, cpu), asid); in check_and_switch_context()
|
/linux-4.1.27/arch/parisc/kernel/ |
D | irq.c | 89 per_cpu(local_ack_eiem, cpu) &= ~mask; in cpu_ack_irq() 92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_ack_irq() 104 per_cpu(local_ack_eiem, cpu) |= mask; in cpu_eoi_irq() 107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); in cpu_eoi_irq() 156 #define irq_stats(x) (&per_cpu(irq_stat, x)) 345 return per_cpu(cpu_data, cpu).txn_addr; in txn_affinity_addr() 357 (!per_cpu(cpu_data, next_cpu).txn_addr || in txn_alloc_addr() 423 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; in stack_overflow_check() 426 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); in stack_overflow_check() 442 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); in stack_overflow_check() [all …]
|
D | smp.c | 124 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); in ipi_interrupt() 129 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); in ipi_interrupt() 191 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); in ipi_send() 192 spinlock_t *lock = &per_cpu(ipi_lock, cpu); in ipi_send() 319 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); in smp_boot_one_cpu() 377 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; in smp_prepare_boot_cpu() 397 spin_lock_init(&per_cpu(ipi_lock, cpu)); in smp_prepare_cpus()
|
D | processor.c | 81 p = &per_cpu(cpu_data, cpunum); in init_percpu_prof() 172 p = &per_cpu(cpu_data, cpuid); in processor_probe() 316 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; in init_per_cpu() 317 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; in init_per_cpu() 356 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in show_cpuinfo()
|
D | time.c | 65 struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); in timer_interrupt() 231 per_cpu(cpu_data, cpu).it_value = next_tick; in start_cpu_itimer()
|
D | topology.c | 32 register_cpu(&per_cpu(cpu_devices, num), num); in topology_init()
|
D | setup.c | 392 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; in start_parisc() 393 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; in start_parisc()
|
/linux-4.1.27/arch/s390/kernel/ |
D | topology.c | 93 per_cpu(cpu_topology, lcpu + i).book_id = book->id; in add_cpus_to_mask() 94 per_cpu(cpu_topology, lcpu + i).core_id = rcore; in add_cpus_to_mask() 95 per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; in add_cpus_to_mask() 99 per_cpu(cpu_topology, lcpu + i).socket_id = rcore; in add_cpus_to_mask() 101 per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; in add_cpus_to_mask() 252 per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); in update_cpu_masks() 253 per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); in update_cpu_masks() 254 per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); in update_cpu_masks() 256 per_cpu(cpu_topology, cpu).thread_id = cpu; in update_cpu_masks() 257 per_cpu(cpu_topology, cpu).core_id = cpu; in update_cpu_masks() [all …]
|
D | idle.c | 54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); in show_idle_count() 71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); in show_idle_time() 89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); in arch_cpu_idle_time()
|
D | processor.c | 71 struct cpuid *id = &per_cpu(cpu_id, n); in show_cpuinfo()
|
/linux-4.1.27/drivers/oprofile/ |
D | oprofile_perf.c | 42 if (per_cpu(perf_events, cpu)[id] == event) in op_overflow_handler() 78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) in op_create_counter() 95 per_cpu(perf_events, cpu)[event] = pevent; in op_create_counter() 102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; in op_destroy_counter() 106 per_cpu(perf_events, cpu)[event] = NULL; in op_destroy_counter() 261 event = per_cpu(perf_events, cpu)[id]; in oprofile_perf_exit() 266 kfree(per_cpu(perf_events, cpu)); in oprofile_perf_exit() 300 per_cpu(perf_events, cpu) = kcalloc(num_counters, in oprofile_perf_init() 302 if (!per_cpu(perf_events, cpu)) { in oprofile_perf_init()
|
D | nmi_timer_int.c | 38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_start_cpu() 45 per_cpu(nmi_timer_events, cpu) = event; in nmi_timer_start_cpu() 56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); in nmi_timer_stop_cpu() 114 event = per_cpu(nmi_timer_events, cpu); in nmi_timer_shutdown() 118 per_cpu(nmi_timer_events, cpu) = NULL; in nmi_timer_shutdown()
|
D | oprofile_stats.c | 26 cpu_buf = &per_cpu(op_cpu_buffer, i); in oprofile_reset_stats() 54 cpu_buf = &per_cpu(op_cpu_buffer, i); in oprofile_create_stats_files()
|
D | cpu_buffer.c | 75 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); in alloc_cpu_buffers() 102 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); in start_cpu_work() 122 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); in flush_cpu_work()
|
D | cpu_buffer.h | 64 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); in op_cpu_buffer_reset()
|
D | timer_int.c | 58 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); in __oprofile_hrtimer_stop()
|
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/ |
D | mce_amd.c | 241 per_cpu(bank_map, cpu) |= (1 << bank); in mce_amd_feature_init() 285 if (!(per_cpu(bank_map, cpu) & (1 << bank))) in amd_threshold_interrupt() 512 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { in allocate_threshold_blocks() 514 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); in allocate_threshold_blocks() 516 per_cpu(threshold_banks, cpu)[bank]->blocks = b; in allocate_threshold_blocks() 520 per_cpu(threshold_banks, cpu)[bank]->kobj, in allocate_threshold_blocks() 578 struct device *dev = per_cpu(mce_device, cpu); in threshold_create_bank() 595 per_cpu(threshold_banks, cpu)[bank] = b; in threshold_create_bank() 616 per_cpu(threshold_banks, cpu)[bank] = b; in threshold_create_bank() 651 per_cpu(threshold_banks, cpu) = bp; in threshold_create_device() [all …]
|
D | mce-inject.c | 34 struct mce *i = &per_cpu(injectm, m->extcpu); in inject_mce() 164 struct mce *mcpu = &per_cpu(injectm, cpu); in raise_mce()
|
D | therm_throt.c | 101 per_cpu(thermal_state, cpu).event.name); \ 157 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); in therm_throt_process() 214 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); in thresh_event_valid()
|
D | mce_intel.c | 113 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE) in mce_intel_hcpu_update() 116 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; in mce_intel_hcpu_update()
|
D | mce.c | 806 int severity = mce_severity(&per_cpu(mces_seen, cpu), in mce_reign() 812 m = &per_cpu(mces_seen, cpu); in mce_reign() 842 memset(&per_cpu(mces_seen, cpu), 0, sizeof(struct mce)); in mce_reign() 1350 del_timer_sync(&per_cpu(mce_timer, cpu)); in mce_timer_delete_all() 1659 per_cpu(mce_next_interval, cpu) = iv; in mce_start_timer() 2305 per_cpu(mce_device, cpu) = dev; in mce_device_create() 2322 struct device *dev = per_cpu(mce_device, cpu); in mce_device_remove() 2336 per_cpu(mce_device, cpu) = NULL; in mce_device_remove() 2381 struct timer_list *t = &per_cpu(mce_timer, cpu); in mce_cpu_callback()
|
/linux-4.1.27/arch/s390/oprofile/ |
D | hwsampler.c | 86 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_stop() 116 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_deactivate() 142 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_ssctl_enable_activate() 170 cb = &per_cpu(sampler_cpu_buffer, cpu); in smp_ctl_qsi() 207 cb = &per_cpu(sampler_cpu_buffer, cpu); in init_all_cpu_buffers() 218 cb = &per_cpu(sampler_cpu_buffer, cpu); in prepare_cpu_buffers() 251 cb = &per_cpu(sampler_cpu_buffer, cpu); in allocate_sdbt() 338 cb = &per_cpu(sampler_cpu_buffer, cpu); in deallocate_sdbt() 382 cb = &per_cpu(sampler_cpu_buffer, cpu); in start_sampling() 419 cb = &per_cpu(sampler_cpu_buffer, cpu); in stop_sampling() [all …]
|
/linux-4.1.27/arch/arm/kernel/ |
D | smp.c | 322 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info() 399 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done() 556 per_cpu(cpu_completion, cpu) = completion; in register_ipi_completion() 562 complete(per_cpu(cpu_completion, cpu)); in ipi_complete() 690 if (!per_cpu(l_p_j_ref, cpu)) { in cpufreq_callback() 691 per_cpu(l_p_j_ref, cpu) = in cpufreq_callback() 692 per_cpu(cpu_data, cpu).loops_per_jiffy; in cpufreq_callback() 693 per_cpu(l_p_j_ref_freq, cpu) = freq->old; in cpufreq_callback() 705 per_cpu(cpu_data, cpu).loops_per_jiffy = in cpufreq_callback() 706 cpufreq_scale(per_cpu(l_p_j_ref, cpu), in cpufreq_callback() [all …]
|
D | topology.c | 47 return per_cpu(cpu_scale, cpu); in arch_scale_cpu_capacity() 52 per_cpu(cpu_scale, cpu) = capacity; in set_capacity_scale()
|
D | smp_twd.c | 279 if (per_cpu(percpu_setup_called, cpu)) { in twd_timer_setup() 285 per_cpu(percpu_setup_called, cpu) = true; in twd_timer_setup()
|
D | setup.c | 994 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); in topology_init() 1063 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id(); in c_show() 1069 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), in c_show() 1070 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); in c_show()
|
/linux-4.1.27/arch/tile/kernel/ |
D | smpboot.c | 97 per_cpu(boot_sp, cpu) = 0; in smp_prepare_cpus() 98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; in smp_prepare_cpus() 109 per_cpu(boot_sp, cpu) = task_ksp0(idle); in smp_prepare_cpus() 110 per_cpu(boot_pc, cpu) = idle->thread.pc; in smp_prepare_cpus() 228 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in __cpu_up()
|
D | tlb.c | 42 asid->asid = per_cpu(current_asid, cpu); in flush_tlb_mm()
|
/linux-4.1.27/arch/arm/mach-omap2/ |
D | omap-mpuss-lowpower.c | 121 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in set_cpu_wakeup_addr() 132 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in scu_pwrst_prepare() 186 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); in l2x0_pwrst_prepare() 229 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); in omap4_enter_lowpower() 312 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); in omap4_hotplug_cpu() 373 pm_info = &per_cpu(omap4_pm_info, 0x0); in omap4_mpuss_init() 393 pm_info = &per_cpu(omap4_pm_info, 0x1); in omap4_mpuss_init()
|
/linux-4.1.27/arch/powerpc/platforms/powernv/ |
D | subcore.c | 156 while(per_cpu(split_state, i).step < step) in wait_for_sync_step() 187 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; in unsplit_core() 221 split_core_secondary_loop(&per_cpu(split_state, cpu).step); in split_core() 253 per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED; in cpu_do_split() 311 while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED) in cpu_update_split_mode() 346 state = &per_cpu(split_state, cpu); in set_subcores_per_core()
|
D | rng.c | 94 if (per_cpu(powernv_rng, cpu) == NULL || in rng_init_per_cpu() 96 per_cpu(powernv_rng, cpu) = rng; in rng_init_per_cpu()
|
/linux-4.1.27/include/linux/ |
D | topology.h | 84 return per_cpu(numa_node, cpu); in cpu_to_node() 98 per_cpu(numa_node, cpu) = node; in set_cpu_numa_node() 150 return per_cpu(_numa_mem_, cpu); in cpu_to_mem() 157 per_cpu(_numa_mem_, cpu) = node; in set_cpu_numa_mem()
|
D | kernel_stat.h | 49 #define kstat_cpu(cpu) per_cpu(kstat, cpu) 50 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
|
/linux-4.1.27/arch/metag/kernel/ |
D | smp.c | 328 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); in smp_store_cpu_info() 363 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); in secondary_start_kernel() 365 if (!per_cpu(pTBI, cpu)) in secondary_start_kernel() 406 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; in smp_cpus_done() 429 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); in smp_prepare_boot_cpu() 431 if (!per_cpu(pTBI, cpu)) in smp_prepare_boot_cpu() 447 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi_message() 503 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); in show_ipi_list() 522 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in do_IPI()
|
D | setup.c | 290 per_cpu(pTBI, cpu) = _pTBI; in setup_arch() 292 if (!per_cpu(pTBI, cpu)) in setup_arch() 481 lpj = per_cpu(cpu_data, i).loops_per_jiffy; in show_cpuinfo() 578 return per_cpu(pTBI, cpu); in pTBI_get()
|
D | traps.c | 754 PTBI _pTBI = per_cpu(pTBI, cpu); in traps_save_context() 770 PTBI _pTBI = per_cpu(pTBI, cpu); in traps_restore_context() 786 return per_cpu(trigger_mask, cpu); in _get_trigger_mask() 798 per_cpu(trigger_mask, cpu) = mask; in set_trigger_mask() 837 PTBI _pTBI = per_cpu(pTBI, cpu); in trap_init() 857 PTBI _pTBI = per_cpu(pTBI, cpu); in tbi_startup_interrupt() 873 PTBI _pTBI = per_cpu(pTBI, cpu); in tbi_shutdown_interrupt()
|
D | topology.c | 54 struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i); in topology_init()
|
/linux-4.1.27/arch/m32r/kernel/ |
D | smp.c | 669 if (--per_cpu(prof_counter, cpu_id) <= 0) { in smp_local_timer_interrupt() 678 per_cpu(prof_counter, cpu_id) in smp_local_timer_interrupt() 679 = per_cpu(prof_multiplier, cpu_id); in smp_local_timer_interrupt() 680 if (per_cpu(prof_counter, cpu_id) in smp_local_timer_interrupt() 681 != per_cpu(prof_old_multiplier, cpu_id)) in smp_local_timer_interrupt() 683 per_cpu(prof_old_multiplier, cpu_id) in smp_local_timer_interrupt() 684 = per_cpu(prof_counter, cpu_id); in smp_local_timer_interrupt()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | smpboot.c | 389 per_cpu(cpu_state, cpuid) = CPU_ONLINE; in smp_callin() 571 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; in smp_prepare_boot_cpu() 581 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) in clear_cpu_sibling_map() 582 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); in clear_cpu_sibling_map() 586 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; in clear_cpu_sibling_map() 597 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); in remove_siblinginfo() 687 if (per_cpu(cpu_state, cpu) == CPU_DEAD) in __cpu_die() 726 &per_cpu(cpu_sibling_map, cpu)); in set_cpu_sibling_map() 728 &per_cpu(cpu_sibling_map, i)); in set_cpu_sibling_map() 751 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in __cpu_up() [all …]
|
D | irq_ia64.c | 142 per_cpu(vector_irq, cpu)[vector] = irq; in __bind_irq_vector() 172 per_cpu(vector_irq, cpu)[vector] = -1; in __clear_irq_vector() 242 per_cpu(vector_irq, cpu)[vector] = -1; in __setup_vector_irq() 248 per_cpu(vector_irq, cpu)[vector] = irq; in __setup_vector_irq()
|
/linux-4.1.27/drivers/acpi/ |
D | acpi_processor.c | 386 if (per_cpu(processor_device_array, pr->id) != NULL && in acpi_processor_add() 387 per_cpu(processor_device_array, pr->id) != device) { in acpi_processor_add() 398 per_cpu(processor_device_array, pr->id) = device; in acpi_processor_add() 399 per_cpu(processors, pr->id) = pr; in acpi_processor_add() 423 per_cpu(processors, pr->id) = NULL; in acpi_processor_add() 457 per_cpu(processor_device_array, pr->id) = NULL; in acpi_processor_remove() 458 per_cpu(processors, pr->id) = NULL; in acpi_processor_remove()
|
D | processor_idle.c | 711 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_play_dead() 793 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter() 803 cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter() 810 cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter() 833 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); in acpi_idle_enter_freeze() 890 per_cpu(acpi_cstate[count], dev->cpu) = cx; in acpi_processor_setup_cpuidle_cx() 986 dev = per_cpu(acpi_cpuidle_device, pr->id); in acpi_processor_hotplug() 1028 _pr = per_cpu(processors, cpu); in acpi_processor_cst_has_changed() 1031 dev = per_cpu(acpi_cpuidle_device, cpu); in acpi_processor_cst_has_changed() 1041 _pr = per_cpu(processors, cpu); in acpi_processor_cst_has_changed() [all …]
|
D | processor_perflib.c | 95 pr = per_cpu(processors, policy->cpu); in acpi_processor_ppc_notifier() 199 pr = per_cpu(processors, cpu); in acpi_processor_get_bios_limit() 620 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 639 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 658 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 684 match_pr = per_cpu(processors, j); in acpi_processor_preregister_performance() 712 match_pr = per_cpu(processors, j); in acpi_processor_preregister_performance() 729 pr = per_cpu(processors, i); in acpi_processor_preregister_performance() 760 pr = per_cpu(processors, cpu); in acpi_processor_register_performance() 795 pr = per_cpu(processors, cpu); in acpi_processor_unregister_performance()
|
D | processor_throttling.c | 90 pr = per_cpu(processors, i); in acpi_processor_update_tsd_coord() 111 pr = per_cpu(processors, i); in acpi_processor_update_tsd_coord() 137 match_pr = per_cpu(processors, j); in acpi_processor_update_tsd_coord() 170 match_pr = per_cpu(processors, j); in acpi_processor_update_tsd_coord() 192 pr = per_cpu(processors, i); in acpi_processor_update_tsd_coord() 235 pr = per_cpu(processors, cpu); in acpi_processor_throttling_notifier() 1137 match_pr = per_cpu(processors, i); in acpi_processor_set_throttling()
|
/linux-4.1.27/drivers/xen/events/ |
D | events_fifo.c | 105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in init_control_block() 286 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); in consume_one_event() 328 control_block = per_cpu(cpu_control_block, cpu); in evtchn_fifo_handle_events() 344 void *control_block = per_cpu(cpu_control_block, cpu); in evtchn_fifo_resume() 357 per_cpu(cpu_control_block, cpu) = NULL; in evtchn_fifo_resume() 402 per_cpu(cpu_control_block, cpu) = control_block; in evtchn_fifo_alloc_control_block() 420 if (!per_cpu(cpu_control_block, cpu)) in evtchn_fifo_cpu_notification()
|
D | events_2l.c | 51 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); in evtchn_2l_bind_to_cpu() 52 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); in evtchn_2l_bind_to_cpu() 149 per_cpu(cpu_evtchn_mask, cpu)[idx] & in active_evtchns() 268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); in xen_debug_interrupt() 280 v = per_cpu(xen_vcpu, i); in xen_debug_interrupt() 289 v = per_cpu(xen_vcpu, cpu); in xen_debug_interrupt() 353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * in evtchn_2l_resume()
|
D | events_base.c | 202 per_cpu(ipi_to_irq, cpu)[ipi] = irq; in xen_irq_info_ipi_setup() 216 per_cpu(virq_to_irq, cpu)[virq] = irq; in xen_irq_info_virq_setup() 263 return per_cpu(virq_to_irq, cpu)[virq]; in irq_from_virq() 627 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; in __unbind_from_irq() 630 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; in __unbind_from_irq() 887 irq = per_cpu(ipi_to_irq, cpu)[ipi]; in bind_ipi_to_irq() 978 irq = per_cpu(virq_to_irq, cpu)[virq]; in bind_virq_to_irq() 1219 irq = per_cpu(ipi_to_irq, cpu)[vector]; in xen_send_IPI_one() 1457 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) in restore_cpu_virqs() 1482 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) in restore_cpu_ipis()
|
/linux-4.1.27/fs/xfs/ |
D | xfs_stats.c | 28 val += *(((__u32 *)&per_cpu(xfsstats, cpu) + idx)); in counter_val() 77 xs_xstrat_bytes += per_cpu(xfsstats, i).xs_xstrat_bytes; in xfs_stat_proc_show() 78 xs_write_bytes += per_cpu(xfsstats, i).xs_write_bytes; in xfs_stat_proc_show() 79 xs_read_bytes += per_cpu(xfsstats, i).xs_read_bytes; in xfs_stat_proc_show()
|
D | xfs_sysctl.c | 44 vn_active = per_cpu(xfsstats, c).vn_active; in xfs_stats_clear_proc_handler() 45 memset(&per_cpu(xfsstats, c), 0, in xfs_stats_clear_proc_handler() 47 per_cpu(xfsstats, c).vn_active = vn_active; in xfs_stats_clear_proc_handler()
|
D | xfs_stats.h | 224 #define XFS_STATS_INC(v) (per_cpu(xfsstats, current_cpu()).v++) 225 #define XFS_STATS_DEC(v) (per_cpu(xfsstats, current_cpu()).v--) 226 #define XFS_STATS_ADD(v, inc) (per_cpu(xfsstats, current_cpu()).v += (inc))
|
/linux-4.1.27/arch/x86/include/asm/ |
D | smp.h | 42 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask() 47 return per_cpu(cpu_core_map, cpu); in cpu_core_mask() 52 return per_cpu(cpu_llc_shared_map, cpu); in cpu_llc_shared_mask() 172 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
|
D | topology.h | 126 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) 127 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
|
D | stackprotector.h | 87 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); in setup_stack_canary_segment()
|
D | preempt.h | 39 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
|
/linux-4.1.27/arch/powerpc/platforms/ps3/ |
D | smp.c | 52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; in ps3_smp_message_pass() 66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); in ps3_smp_probe() 107 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); in ps3_smp_cleanup_cpu()
|
D | interrupt.c | 191 pd = &per_cpu(ps3_private, cpu); in ps3_virq_setup() 694 struct ps3_private *pd = &per_cpu(ps3_private, cpu); in ps3_register_ipi_debug_brk() 704 struct ps3_private *pd = &per_cpu(ps3_private, cpu); in ps3_register_ipi_irq() 729 dump_bmp(&per_cpu(ps3_private, 0)); in ps3_get_irq() 730 dump_bmp(&per_cpu(ps3_private, 1)); in ps3_get_irq() 736 dump_bmp(&per_cpu(ps3_private, 0)); in ps3_get_irq() 737 dump_bmp(&per_cpu(ps3_private, 1)); in ps3_get_irq() 760 struct ps3_private *pd = &per_cpu(ps3_private, cpu); in ps3_init_IRQ()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | nmi.c | 60 if (per_cpu(nmi_touch, cpu) != 1) in touch_nmi_watchdog() 61 per_cpu(nmi_touch, cpu) = 1; in touch_nmi_watchdog() 150 per_cpu(wd_enabled, cpu) = 0; in report_broken_nmi() 185 if (!per_cpu(wd_enabled, cpu)) in check_nmi_watchdog()
|
D | sun4d_smp.c | 200 work = &per_cpu(sun4d_ipi_work, cpu); in smp4d_ipi_init() 238 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_single() 249 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_mask_one() 260 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); in sun4d_ipi_resched() 383 ce = &per_cpu(sparc32_clockevent, cpu); in smp4d_percpu_timer_interrupt()
|
D | sysfs.c | 22 struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ 135 ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); in write_mmustat_enable() 226 struct cpu *c = &per_cpu(cpu_devices, cpu); in register_cpu_online() 239 struct cpu *c = &per_cpu(cpu_devices, cpu); in unregister_cpu_online() 306 struct cpu *c = &per_cpu(cpu_devices, cpu); in topology_init()
|
D | leon_smp.c | 299 work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_init() 313 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_single() 324 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_mask_one() 335 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); in leon_ipi_resched()
|
D | smp_64.c | 1262 cpumask_clear(&per_cpu(cpu_sibling_map, i)); in smp_fill_in_sib_core_maps() 1264 cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); in smp_fill_in_sib_core_maps() 1271 cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); in smp_fill_in_sib_core_maps() 1343 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) in __cpu_disable() 1344 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); in __cpu_disable() 1345 cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); in __cpu_disable()
|
D | time_64.c | 637 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); in sparc64_get_clock_tick() 652 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); in sparc64_cpufreq_notifier() 725 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); in timer_interrupt()
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
D | hotplug-cpu.c | 66 return per_cpu(current_state, cpu); in get_cpu_current_state() 71 per_cpu(current_state, cpu) = state; in set_cpu_current_state() 76 return per_cpu(preferred_offline_state, cpu); in get_preferred_offline_state() 81 per_cpu(preferred_offline_state, cpu) = state; in set_preferred_offline_state() 86 per_cpu(preferred_offline_state, cpu) = default_offline_state; in set_default_offline_state()
|
D | dtl.c | 104 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); in dtl_start() 125 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); in dtl_stop() 141 return per_cpu(dtl_rings, dtl->cpu).write_index; in dtl_current_index() 379 struct dtl *dtl = &per_cpu(cpu_dtl, i); in dtl_init()
|
/linux-4.1.27/arch/mn10300/kernel/ |
D | cevt-mn10300.c | 63 cd = &per_cpu(mn10300_clockevent_device, cpu); in timer_interrupt() 89 cd = &per_cpu(mn10300_clockevent_device, cpu); in init_clockevents() 115 iact = &per_cpu(timer_irq, cpu); in init_clockevents()
|
/linux-4.1.27/kernel/trace/ |
D | trace_stack.c | 192 if (per_cpu(trace_active, cpu)++ != 0) in stack_trace_call() 217 per_cpu(trace_active, cpu)--; in stack_trace_call() 263 per_cpu(trace_active, cpu)++; in stack_max_size_write() 269 per_cpu(trace_active, cpu)--; in stack_max_size_write() 308 per_cpu(trace_active, cpu)++; in t_start() 325 per_cpu(trace_active, cpu)--; in t_stop()
|
D | trace_irqsoff.c | 115 if (likely(!per_cpu(tracing_cpu, cpu))) in func_prolog_dec() 169 per_cpu(tracing_cpu, cpu) = 0; in irqsoff_set_flag() 375 if (per_cpu(tracing_cpu, cpu)) in start_critical_timing() 393 per_cpu(tracing_cpu, cpu) = 1; in start_critical_timing() 408 if (unlikely(per_cpu(tracing_cpu, cpu))) in stop_critical_timing() 409 per_cpu(tracing_cpu, cpu) = 0; in stop_critical_timing()
|
/linux-4.1.27/Documentation/locking/ |
D | lglock.txt | 14 distributed over all CPUs as per_cpu elements. 17 as per_cpu elements but can be mostly handled by CPU local actions 24 - very fast access to the local per_cpu data 25 - reasonably fast access to specific per_cpu data on a different 34 Basically it is an array of per_cpu spinlocks with the 100 locality aware spinlock. lg_local_* behaves like a per_cpu 105 access to protected per_cpu object on this CPU 109 access to protected per_cpu object on other CPU cpu 113 access all protected per_cpu objects on all CPUs
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
D | perf_event_intel_rapl.c | 526 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_exit() 584 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_prepare() 620 per_cpu(rapl_pmu, cpu) = pmu; in rapl_cpu_prepare() 621 per_cpu(rapl_pmu_to_free, cpu) = NULL; in rapl_cpu_prepare() 628 struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); in rapl_cpu_kfree() 632 per_cpu(rapl_pmu_to_free, cpu) = NULL; in rapl_cpu_kfree() 637 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); in rapl_cpu_dying() 642 per_cpu(rapl_pmu, cpu) = NULL; in rapl_cpu_dying() 644 per_cpu(rapl_pmu_to_free, cpu) = pmu; in rapl_cpu_dying()
|
D | perf_event_intel_ds.c | 229 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in init_debug_store_on_cpu() 241 if (!per_cpu(cpu_hw_events, cpu).ds) in fini_debug_store_on_cpu() 251 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in alloc_pebs_buffer() 273 per_cpu(insn_buffer, cpu) = ibuffer; in alloc_pebs_buffer() 291 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in release_pebs_buffer() 296 kfree(per_cpu(insn_buffer, cpu)); in release_pebs_buffer() 297 per_cpu(insn_buffer, cpu) = NULL; in release_pebs_buffer() 305 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in alloc_bts_buffer() 334 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; in release_bts_buffer() 352 per_cpu(cpu_hw_events, cpu).ds = ds; in alloc_ds_buffer() [all …]
|
D | perf_event_amd.c | 368 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in amd_pmu_cpu_prepare() 384 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); in amd_pmu_cpu_starting() 398 nb = per_cpu(cpu_hw_events, i).amd_nb; in amd_pmu_cpu_starting() 420 cpuhw = &per_cpu(cpu_hw_events, cpu); in amd_pmu_cpu_dead()
|
D | common.c | 395 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); in load_percpu_segment() 1011 tss = &per_cpu(cpu_tss, cpu); in enable_sep_cpu() 1369 t = &per_cpu(cpu_tss, cpu); in cpu_init() 1370 oist = &per_cpu(orig_ist, cpu); in cpu_init() 1408 char *estacks = per_cpu(exception_stacks, cpu); in cpu_init() 1415 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; in cpu_init() 1453 struct tss_struct *t = &per_cpu(cpu_tss, cpu); in cpu_init()
|
D | amd.c | 329 per_cpu(cpu_llc_id, cpu) = node_id; in amd_get_topology() 354 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; in amd_detect_cmp() 363 id = per_cpu(cpu_llc_id, cpu); in amd_get_nb_id() 378 node = per_cpu(cpu_llc_id, cpu); in srat_detect_node()
|
/linux-4.1.27/scripts/gdb/linux/ |
D | cpus.py | 36 def per_cpu(var_ptr, cpu): function 115 return per_cpu(var_ptr, cpu) 132 return per_cpu(var_ptr, cpu).dereference()
|
/linux-4.1.27/arch/powerpc/platforms/cell/ |
D | cpufreq_spudemand.c | 95 info = &per_cpu(spu_gov_info, cpu); in spu_gov_govern() 113 affected_info = &per_cpu(spu_gov_info, i); in spu_gov_govern() 130 info = &per_cpu(spu_gov_info, i); in spu_gov_govern()
|
D | interrupt.c | 171 return per_cpu(cpu_iic, cpu).target_id; in iic_get_target_id() 186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); in iic_message_pass() 300 struct iic *iic = &per_cpu(cpu_iic, hw_cpu); in init_one_iic()
|
/linux-4.1.27/net/rds/ |
D | page.c | 119 rem = &per_cpu(rds_page_remainders, get_cpu()); in rds_page_remainder_alloc() 153 rem = &per_cpu(rds_page_remainders, get_cpu()); in rds_page_remainder_alloc() 188 rem = &per_cpu(rds_page_remainders, cpu); in rds_page_remainder_cpu_notify()
|
D | tcp_stats.c | 64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); in rds_tcp_stats_info_copy()
|
D | iw_stats.c | 85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); in rds_iw_stats_info_copy()
|
D | ib_stats.c | 87 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); in rds_ib_stats_info_copy()
|
D | stats.c | 127 src = (uint64_t *)&(per_cpu(rds_stats, cpu)); in rds_stats_info()
|
/linux-4.1.27/mm/ |
D | swap.c | 525 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); in activate_page_drain() 533 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; in need_activate_page_drain() 807 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); in lru_add_drain_cpu() 812 pvec = &per_cpu(lru_rotate_pvecs, cpu); in lru_add_drain_cpu() 822 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); in lru_add_drain_cpu() 879 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); in lru_add_drain_all() 881 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || in lru_add_drain_all() 882 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || in lru_add_drain_all() 883 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || in lru_add_drain_all() 892 flush_work(&per_cpu(lru_add_drain_work, cpu)); in lru_add_drain_all()
|
D | kmemleak-test.c | 89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); in kmemleak_test_init() 91 per_cpu(kmemleak_test_pointer, i)); in kmemleak_test_init()
|
D | quicklist.c | 96 ql = per_cpu(quicklist, cpu); in quicklist_total_size()
|
/linux-4.1.27/arch/x86/platform/uv/ |
D | tlb_uv.c | 136 bcp = &per_cpu(bau_control, cpu); in set_bau_on() 151 bcp = &per_cpu(bau_control, cpu); in set_bau_off() 183 return per_cpu(x86_cpu_to_apicid, cpu); in uvhub_to_first_apicid() 361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); in do_reset() 767 tbcp = &per_cpu(bau_control, tcpu); in disable_for_period() 975 tbcp = &per_cpu(bau_control, tcpu); in check_enable() 1094 bcp = &per_cpu(bau_control, cpu); in uv_flush_tlb_others() 1125 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); in uv_flush_tlb_others() 1254 bcp = &per_cpu(bau_control, smp_processor_id()); in uv_bau_message_interrupt() 1384 bcp = &per_cpu(bau_control, cpu); in ptc_seq_show() [all …]
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | topology.h | 90 #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 91 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
D | smp.h | 98 return per_cpu(cpu_sibling_map, cpu); in cpu_sibling_mask() 103 return per_cpu(cpu_core_map, cpu); in cpu_core_mask()
|
/linux-4.1.27/arch/mips/cavium-octeon/ |
D | octeon-irq.c | 261 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); in octeon_irq_ciu_enable() 267 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); in octeon_irq_ciu_enable() 276 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); in octeon_irq_ciu_enable() 364 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); in octeon_irq_ciu_disable_all() 366 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); in octeon_irq_ciu_disable_all() 368 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); in octeon_irq_ciu_disable_all() 397 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); in octeon_irq_ciu_enable_all() 399 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); in octeon_irq_ciu_enable_all() 401 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); in octeon_irq_ciu_enable_all() 437 set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); in octeon_irq_ciu_enable_v2() [all …]
|
D | smp.c | 257 while (per_cpu(cpu_state, cpu) != CPU_DEAD) in octeon_cpu_die() 295 per_cpu(cpu_state, cpu) = CPU_DEAD; in play_dead()
|
/linux-4.1.27/arch/alpha/kernel/ |
D | time.c | 93 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in rtc_timer_interrupt() 125 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in init_rtc_clockevent() 183 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in qemu_timer_interrupt() 193 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); in init_qemu_clockevent()
|
D | irq.c | 80 seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); in arch_show_interrupts()
|
/linux-4.1.27/arch/ia64/sn/kernel/sn2/ |
D | sn2_smp.c | 494 stat = &per_cpu(ptcstats, cpu); in sn2_ptc_seq_show() 498 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, in sn2_ptc_seq_show() 499 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, in sn2_ptc_seq_show() 500 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, in sn2_ptc_seq_show() 504 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); in sn2_ptc_seq_show() 522 memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); in sn2_ptc_proc_write()
|
/linux-4.1.27/drivers/cpuidle/ |
D | driver.c | 35 return per_cpu(cpuidle_drivers, cpu); in __cpuidle_get_cpu_driver() 55 per_cpu(cpuidle_drivers, cpu) = NULL; in __cpuidle_unset_driver() 79 per_cpu(cpuidle_drivers, cpu) = drv; in __cpuidle_set_driver()
|
D | cpuidle-cps.c | 112 device = &per_cpu(cpuidle_dev, cpu); in cps_cpuidle_unregister() 164 device = &per_cpu(cpuidle_dev, cpu); in cps_cpuidle_init()
|
D | coupled.c | 321 struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu); in cpuidle_coupled_poke() 640 other_dev = per_cpu(cpuidle_devices, cpu); in cpuidle_coupled_register_device() 663 csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu); in cpuidle_coupled_register_device() 760 dev = per_cpu(cpuidle_devices, cpu); in cpuidle_coupled_cpu_notify()
|
D | cpuidle.c | 405 per_cpu(cpuidle_devices, dev->cpu) = NULL; in __cpuidle_unregister_device() 432 per_cpu(cpuidle_devices, dev->cpu) = dev; in __cpuidle_register_device() 527 device = &per_cpu(cpuidle_dev, cpu); in cpuidle_unregister() 559 device = &per_cpu(cpuidle_dev, cpu); in cpuidle_register()
|
D | cpuidle-arm.c | 154 dev = per_cpu(cpuidle_devices, cpu); in arm_idle_init()
|
/linux-4.1.27/arch/mips/sgi-ip27/ |
D | ip27-timer.c | 81 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); in hub_rt_counter_handler() 113 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); in hub_rt_clock_event_init() 114 unsigned char *name = per_cpu(hub_rt_name, cpu); in hub_rt_clock_event_init()
|
/linux-4.1.27/drivers/leds/trigger/ |
D | ledtrig-cpu.c | 129 struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); in ledtrig_cpu_init() 152 struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); in ledtrig_cpu_exit()
|
/linux-4.1.27/init/ |
D | calibrate.c | 280 if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { in calibrate_delay() 281 lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); in calibrate_delay() 305 per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; in calibrate_delay()
|
/linux-4.1.27/kernel/locking/ |
D | percpu-rwsem.c | 109 sum += per_cpu(*brw->fast_read_ctr, cpu); in clear_fast_ctr() 110 per_cpu(*brw->fast_read_ctr, cpu) = 0; in clear_fast_ctr()
|
/linux-4.1.27/arch/sh/kernel/ |
D | smp.c | 80 per_cpu(cpu_state, cpu) = CPU_ONLINE; in smp_prepare_boot_cpu() 90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { in native_cpu_die() 204 per_cpu(cpu_state, cpu) = CPU_ONLINE; in start_secondary() 222 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; in __cpu_up()
|
D | localtimer.c | 49 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); in local_timer_setup()
|
D | topology.c | 58 struct cpu *c = &per_cpu(cpu_devices, i); in topology_init()
|
/linux-4.1.27/arch/microblaze/kernel/ |
D | setup.c | 189 per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ in machine_early_init() 190 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; in machine_early_init()
|
/linux-4.1.27/block/ |
D | blk-softirq.c | 92 list_splice_init(&per_cpu(blk_cpu_done, cpu), in blk_cpu_notify() 180 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); in blk_softirq_init()
|
D | blk-iopoll.c | 200 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), in blk_iopoll_cpu_notify() 218 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); in blk_iopoll_setup()
|
/linux-4.1.27/drivers/clocksource/ |
D | metag_generic.c | 115 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); in arch_timer_setup() 116 char *name = per_cpu(local_clockevent_name, cpu); in arch_timer_setup()
|
/linux-4.1.27/arch/mips/loongson/loongson-3/ |
D | smp.c | 276 per_cpu(core0_c0count, i) = c0count; in loongson3_ipi_interrupt() 298 per_cpu(cpu_state, cpu) = CPU_ONLINE; in loongson3_init_secondary() 369 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; in loongson3_prepare_cpus() 424 while (per_cpu(cpu_state, cpu) != CPU_DEAD) in loongson3_cpu_die() 578 state_addr = &per_cpu(cpu_state, cpu); in play_dead()
|
D | hpet.c | 156 cd = &per_cpu(hpet_clockevent_device, cpu); in hpet_irq_handler() 205 cd = &per_cpu(hpet_clockevent_device, cpu); in setup_hpet_timer()
|
/linux-4.1.27/arch/hexagon/kernel/ |
D | smp.c | 97 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in handle_ipi() 114 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); in send_ipi()
|
D | time.c | 142 &per_cpu(clock_events, cpu); in setup_percpu_clockdev() 158 struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu); in ipi_timer()
|
/linux-4.1.27/drivers/base/ |
D | cpu.c | 78 per_cpu(cpu_sys_devices, logical_cpu) = NULL; in unregister_cpu() 350 per_cpu(cpu_sys_devices, num) = &cpu->dev; in register_cpu() 360 return per_cpu(cpu_sys_devices, cpu); in get_cpu_device() 466 if (register_cpu(&per_cpu(cpu_devices, i), i)) in cpu_dev_register_generic()
|
D | cacheinfo.c | 33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 219 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 225 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
|
/linux-4.1.27/lib/ |
D | random32.c | 185 struct rnd_state *state = &per_cpu(net_rand_state, i); in prandom_seed() 204 struct rnd_state *state = &per_cpu(net_rand_state,i); in prandom_init() 271 struct rnd_state *state = &per_cpu(net_rand_state,i); in __prandom_reseed()
|
/linux-4.1.27/kernel/time/ |
D | tick-common.c | 58 return &per_cpu(tick_cpu_device, cpu); in tick_get_device() 299 td = &per_cpu(tick_cpu_device, cpu); in tick_check_new_device() 361 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); in tick_shutdown()
|
D | timer_stats.c | 248 lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); in timer_stats_update_stats() 351 raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); in sync_access() 413 raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); in init_timer_stats()
|
D | tick-sched.c | 46 return &per_cpu(tick_cpu_sched, cpu); in tick_get_tick_sched() 250 irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu); in tick_nohz_full_kick_cpu() 503 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_idle_time_us() 544 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in get_cpu_iowait_time_us() 1192 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); in tick_cancel_sched_timer() 1211 set_bit(0, &per_cpu(tick_cpu_sched, cpu).check_clocks); in tick_clock_notify()
|
D | tick-broadcast.c | 268 td = &per_cpu(tick_cpu_device, cpu); in tick_do_broadcast() 279 td = &per_cpu(tick_cpu_device, cpumask_first(mask)); in tick_do_broadcast() 592 td = &per_cpu(tick_cpu_device, cpu); in tick_handle_oneshot_broadcast() 830 td = &per_cpu(tick_cpu_device, cpu); in tick_broadcast_init_next_event()
|
D | clockevents.c | 396 return ced == per_cpu(tick_cpu_device, cpu).evtdev ? -EAGAIN : -EBUSY; in __clockevents_try_unbind() 743 &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev() 757 return &per_cpu(tick_cpu_device, dev->id); in tick_get_tick_dev() 767 struct device *dev = &per_cpu(tick_percpu_dev, cpu); in tick_init_sysfs()
|
/linux-4.1.27/arch/blackfin/mach-common/ |
D | smp.c | 135 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in ipi_timer() 183 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); in bfin_ipi_init() 197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); in send_ipi()
|
/linux-4.1.27/arch/arm/mach-alpine/ |
D | alpine_cpu_resume.h | 31 struct al_cpu_resume_regs_per_cpu per_cpu[]; member
|
D | alpine_cpu_pm.c | 45 &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr); in alpine_cpu_wakeup()
|
/linux-4.1.27/arch/sparc/include/asm/ |
D | cpudata_32.h | 28 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
|
D | cpudata_64.h | 33 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
|
D | topology_64.h | 44 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
/linux-4.1.27/arch/tile/include/asm/ |
D | hardirq.h | 41 #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
/linux-4.1.27/drivers/crypto/ |
D | padlock-aes.c | 157 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || in aes_set_key() 158 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) in aes_set_key() 159 per_cpu(paes_last_cword, cpu) = NULL; in aes_set_key() 171 if (cword != per_cpu(paes_last_cword, cpu)) in padlock_reset_key() 181 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; in padlock_store_cword()
|
/linux-4.1.27/arch/ia64/include/asm/sn/ |
D | pda.h | 66 #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
|
/linux-4.1.27/kernel/rcu/ |
D | tree_trace.c | 125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), in print_one_rcu_data() 147 per_cpu(rcu_cpu_has_work, rdp->cpu), in print_one_rcu_data() 148 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, in print_one_rcu_data() 150 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); in print_one_rcu_data()
|
/linux-4.1.27/drivers/thermal/ |
D | x86_pkg_temp_thermal.c | 379 &per_cpu(pkg_temp_thermal_threshold_work, cpu), in pkg_temp_thermal_platform_thermal_notify() 536 INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu), in get_core_online() 548 &per_cpu(pkg_temp_thermal_threshold_work, cpu)); in put_core_offline() 638 &per_cpu(pkg_temp_thermal_threshold_work, i)); in pkg_temp_thermal_exit()
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | topology.h | 56 #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
/linux-4.1.27/arch/arm/include/asm/ |
D | smp_plat.h | 37 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); in smp_cpuid_part()
|
/linux-4.1.27/arch/mips/math-emu/ |
D | me-debugfs.c | 21 ps = &per_cpu(fpuemustats, cpu); in fpuemu_stat_get()
|
/linux-4.1.27/drivers/scsi/bnx2i/ |
D | bnx2i_init.c | 425 p = &per_cpu(bnx2i_percpu, cpu); in bnx2i_percpu_thread_create() 446 p = &per_cpu(bnx2i_percpu, cpu); in bnx2i_percpu_thread_destroy() 536 p = &per_cpu(bnx2i_percpu, cpu); in bnx2i_mod_init()
|
/linux-4.1.27/kernel/sched/ |
D | sched.h | 702 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 1716 seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); in irq_time_read() 1717 irq_time = per_cpu(cpu_softirq_time, cpu) + in irq_time_read() 1718 per_cpu(cpu_hardirq_time, cpu); in irq_time_read() 1719 } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); in irq_time_read() 1734 return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); in irq_time_read()
|
/linux-4.1.27/arch/blackfin/kernel/ |
D | time-ts.c | 306 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in bfin_coretmr_interrupt() 326 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); in bfin_coretmr_clockevent_init()
|
/linux-4.1.27/arch/arm/mach-qcom/ |
D | platsmp.c | 277 if (!per_cpu(cold_boot_done, cpu)) { in qcom_boot_secondary() 280 per_cpu(cold_boot_done, cpu) = true; in qcom_boot_secondary()
|
/linux-4.1.27/arch/arm/mach-bcm/ |
D | platsmp-brcmstb.c | 70 return per_cpu(per_cpu_sw_state, cpu); in per_cpu_sw_state_rd() 76 per_cpu(per_cpu_sw_state, cpu) = val; in per_cpu_sw_state_wr()
|
/linux-4.1.27/arch/arc/include/asm/ |
D | mmu_context.h | 54 #define asid_cpu(cpu) per_cpu(asid_cache, cpu)
|
/linux-4.1.27/arch/cris/arch-v10/mm/ |
D | fault.c | 48 pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); in handle_mmu_bus_fault()
|
/linux-4.1.27/arch/xtensa/include/asm/ |
D | mmu_context.h | 34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
|
/linux-4.1.27/arch/mn10300/include/asm/ |
D | mmu_context.h | 150 per_cpu(cpu_tlbstate, cpu).active_mm = next; in switch_mm()
|
/linux-4.1.27/arch/c6x/kernel/ |
D | setup.c | 101 p = &per_cpu(cpu_data, smp_processor_id()); in get_cpuinfo() 447 struct cpuinfo_c6x *p = &per_cpu(cpu_data, n); in show_cpuinfo()
|
/linux-4.1.27/arch/powerpc/oprofile/ |
D | op_model_cell.c | 503 per_cpu(pmc_values, cpu + prev_hdw_thread)[i] in cell_virtual_cntr() 506 if (per_cpu(pmc_values, cpu + next_hdw_thread)[i] in cell_virtual_cntr() 521 per_cpu(pmc_values, in cell_virtual_cntr() 785 per_cpu(pmc_values, j)[i] = 0; in cell_reg_setup_ppu() 834 per_cpu(pmc_values, cpu)[i] = reset_value[i]; in cell_reg_setup_ppu()
|