/linux-4.4.14/arch/x86/kernel/apic/ |
H A D | x2apic_cluster.c | 23 return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16; x2apic_cluster() 54 cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu); for_each_cpu() 60 dest |= per_cpu(x86_cpu_to_logical_apicid, i); for_each_cpu_and() 110 dest = per_cpu(x86_cpu_to_logical_apicid, i); for_each_cpu_and() 123 dest |= per_cpu(x86_cpu_to_logical_apicid, i); for_each_cpu_and() 136 per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR); init_x2apic_ldr() 138 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu)); for_each_online_cpu() 142 cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); for_each_online_cpu() 143 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); for_each_online_cpu() 159 if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu), update_clusterinfo() 162 } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu), update_clusterinfo() 164 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); update_clusterinfo() 174 cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); for_each_online_cpu() 175 cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); for_each_online_cpu() 177 free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); 178 free_cpumask_var(per_cpu(ipi_mask, this_cpu)); 193 zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL); x2apic_init_cpu_notifier() 194 zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL); x2apic_init_cpu_notifier() 196 BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); x2apic_init_cpu_notifier() 198 cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); x2apic_init_cpu_notifier() 234 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu)); cluster_vector_allocation_domain()
|
H A D | ipi.c | 33 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, for_each_cpu() 52 __default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, for_each_cpu() 145 if (per_cpu(x86_cpu_to_apicid, i) == apic_id) for_each_possible_cpu()
|
H A D | bigsmp_32.c | 45 id = per_cpu(x86_bios_cpu_apicid, cpu); calculate_ldr() 78 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); bigsmp_cpu_present_to_apicid()
|
H A D | x2apic_phys.c | 54 __x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu), for_each_cpu()
|
H A D | vector.c | 179 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) for_each_cpu() 189 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); 262 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; clear_irq_vector() 279 if (per_cpu(vector_irq, cpu)[vector] != desc) clear_irq_vector() 281 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; clear_irq_vector() 462 per_cpu(vector_irq, cpu)[vector] = desc; for_each_irq_desc() 466 desc = per_cpu(vector_irq, cpu)[vector]; 472 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; 492 per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq_to_desc(irq); setup_vector_irq() 744 per_cpu(vector_irq, cpu)[cfg->old_vector] = VECTOR_UNUSED; irq_force_complete_move()
|
H A D | x2apic_uv_x.c | 259 apicid = per_cpu(x86_cpu_to_apicid, cpu); uv_send_IPI_one() 330 *apicid = per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits; 857 * Called on each cpu to initialize the per_cpu UV data area. 963 int apicid = per_cpu(x86_cpu_to_apicid, cpu); for_each_present_cpu()
|
/linux-4.4.14/arch/s390/include/asm/ |
H A D | topology.h | 25 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) 26 #define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) 28 (&per_cpu(cpu_topology, cpu).thread_mask) 29 #define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) 30 #define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) 31 #define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) 32 #define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) 64 return per_cpu(cpu_topology, cpu).node_id; cpu_to_node()
|
/linux-4.4.14/arch/blackfin/mm/ |
H A D | sram-alloc.c | 78 per_cpu(free_l1_ssram_head, cpu).next = l1sram_init() 80 if (!per_cpu(free_l1_ssram_head, cpu).next) { l1sram_init() 85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; l1sram_init() 86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; l1sram_init() 87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; l1sram_init() 88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; l1sram_init() 90 per_cpu(used_l1_ssram_head, cpu).next = NULL; l1sram_init() 93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); l1sram_init() 106 per_cpu(free_l1_data_A_sram_head, cpu).next = l1_data_sram_init() 108 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { l1_data_sram_init() 113 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr = l1_data_sram_init() 115 per_cpu(free_l1_data_A_sram_head, cpu).next->size = l1_data_sram_init() 117 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0; l1_data_sram_init() 118 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL; l1_data_sram_init() 120 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL; l1_data_sram_init() 124 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10); l1_data_sram_init() 129 per_cpu(free_l1_data_B_sram_head, cpu).next = l1_data_sram_init() 131 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) { l1_data_sram_init() 136 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr = l1_data_sram_init() 138 per_cpu(free_l1_data_B_sram_head, cpu).next->size = l1_data_sram_init() 140 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0; l1_data_sram_init() 141 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL; l1_data_sram_init() 143 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL; l1_data_sram_init() 147 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10); l1_data_sram_init() 154 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu)); l1_data_sram_init() 163 per_cpu(free_l1_inst_sram_head, cpu).next = l1_inst_sram_init() 165 if (!per_cpu(free_l1_inst_sram_head, cpu).next) { l1_inst_sram_init() 170 per_cpu(free_l1_inst_sram_head, cpu).next->paddr = l1_inst_sram_init() 172 per_cpu(free_l1_inst_sram_head, cpu).next->size = l1_inst_sram_init() 174 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0; l1_inst_sram_init() 175 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL; l1_inst_sram_init() 177 per_cpu(used_l1_inst_sram_head, cpu).next = NULL; l1_inst_sram_init() 181 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10); l1_inst_sram_init() 184 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu)); l1_inst_sram_init() 446 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_alloc() 448 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu), l1_data_A_sram_alloc() 449 &per_cpu(used_l1_data_A_sram_head, cpu)); l1_data_A_sram_alloc() 452 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_alloc() 473 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_free() 475 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu), l1_data_A_sram_free() 476 &per_cpu(used_l1_data_A_sram_head, cpu)); l1_data_A_sram_free() 479 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_A_sram_free() 497 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_alloc() 499 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu), l1_data_B_sram_alloc() 500 &per_cpu(used_l1_data_B_sram_head, cpu)); l1_data_B_sram_alloc() 503 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_alloc() 524 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_free() 526 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu), l1_data_B_sram_free() 527 &per_cpu(used_l1_data_B_sram_head, cpu)); l1_data_B_sram_free() 530 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); l1_data_B_sram_free() 580 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_alloc() 582 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu), l1_inst_sram_alloc() 583 &per_cpu(used_l1_inst_sram_head, cpu)); l1_inst_sram_alloc() 586 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_alloc() 607 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_free() 609 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu), l1_inst_sram_free() 610 &per_cpu(used_l1_inst_sram_head, cpu)); l1_inst_sram_free() 613 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); l1_inst_sram_free() 631 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc() 633 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu), l1sram_alloc() 634 &per_cpu(used_l1_ssram_head, cpu)); l1sram_alloc() 637 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc() 651 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc_max() 653 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu), l1sram_alloc_max() 654 &per_cpu(used_l1_ssram_head, cpu), psize); l1sram_alloc_max() 657 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); l1sram_alloc_max() 671 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); l1sram_free() 673 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu), l1sram_free() 674 &per_cpu(used_l1_ssram_head, cpu)); l1sram_free() 677 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); l1sram_free() 844 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) sram_proc_show() 848 &per_cpu(free_l1_data_A_sram_head, cpu), sram_proc_show() 849 &per_cpu(used_l1_data_A_sram_head, cpu))) sram_proc_show() 854 &per_cpu(free_l1_data_B_sram_head, cpu), sram_proc_show() 855 &per_cpu(used_l1_data_B_sram_head, cpu))) sram_proc_show() 860 &per_cpu(free_l1_inst_sram_head, cpu), sram_proc_show() 861 &per_cpu(used_l1_inst_sram_head, cpu))) sram_proc_show()
|
/linux-4.4.14/arch/mips/kernel/ |
H A D | mips-cpc.c | 58 spin_lock_init(&per_cpu(cpc_core_lock, cpu)); mips_cpc_probe() 76 spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), mips_cpc_lock_other() 77 per_cpu(cpc_core_lock_flags, curr_core)); mips_cpc_lock_other() 90 spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), mips_cpc_unlock_other() 91 per_cpu(cpc_core_lock_flags, curr_core)); mips_cpc_unlock_other()
|
H A D | topology.c | 20 struct cpu *c = &per_cpu(cpu_devices, i); topology_init()
|
H A D | cevt-bcm1480.c | 114 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); sb1480_clockevent_init() 115 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); sb1480_clockevent_init() 116 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); sb1480_clockevent_init()
|
H A D | cevt-sb1250.c | 113 struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu); sb1250_clockevent_init() 114 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); sb1250_clockevent_init() 115 unsigned char *name = per_cpu(sibyte_hpt_name, cpu); sb1250_clockevent_init()
|
H A D | mips-cm.c | 257 spin_lock_init(&per_cpu(cm_core_lock, cpu)); mips_cm_probe() 269 spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), mips_cm_lock_other() 270 per_cpu(cm_core_lock_flags, curr_core)); mips_cm_lock_other() 293 spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), mips_cm_unlock_other() 294 per_cpu(cm_core_lock_flags, curr_core)); mips_cm_unlock_other()
|
H A D | cevt-r4k.c | 75 cd = &per_cpu(mips_clockevent_device, cpu); c0_compare_interrupt() 195 cd = &per_cpu(mips_clockevent_device, cpu); r4k_clockevent_init()
|
H A D | pm-cps.c | 132 entry = per_cpu(nc_asm_enter, core)[state]; cps_pm_enter_state() 168 core_ready_count = per_cpu(ready_count, core); cps_pm_enter_state() 176 coupled_barrier(&per_cpu(pm_barrier, core), online); cps_pm_enter_state() 632 if (per_cpu(nc_asm_enter, core)[state]) cps_gen_core_entries() 644 per_cpu(nc_asm_enter, core)[state] = entry_fn; cps_gen_core_entries() 647 if (!per_cpu(ready_count, core)) { cps_gen_core_entries() 653 per_cpu(ready_count_alloc, core) = core_rc; cps_gen_core_entries() 658 per_cpu(ready_count, core) = core_rc; cps_gen_core_entries()
|
H A D | smp.c | 469 count = &per_cpu(tick_broadcast_count, cpu); for_each_cpu() 470 csd = &per_cpu(tick_broadcast_csd, cpu); for_each_cpu() 481 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); tick_broadcast_callee() 490 csd = &per_cpu(tick_broadcast_csd, cpu); tick_broadcast_init()
|
/linux-4.4.14/arch/x86/xen/ |
H A D | smp.c | 120 if (per_cpu(xen_resched_irq, cpu).irq >= 0) { xen_smp_intr_free() 121 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); xen_smp_intr_free() 122 per_cpu(xen_resched_irq, cpu).irq = -1; xen_smp_intr_free() 123 kfree(per_cpu(xen_resched_irq, cpu).name); xen_smp_intr_free() 124 per_cpu(xen_resched_irq, cpu).name = NULL; xen_smp_intr_free() 126 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { xen_smp_intr_free() 127 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); xen_smp_intr_free() 128 per_cpu(xen_callfunc_irq, cpu).irq = -1; xen_smp_intr_free() 129 kfree(per_cpu(xen_callfunc_irq, cpu).name); xen_smp_intr_free() 130 per_cpu(xen_callfunc_irq, cpu).name = NULL; xen_smp_intr_free() 132 if (per_cpu(xen_debug_irq, cpu).irq >= 0) { xen_smp_intr_free() 133 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); xen_smp_intr_free() 134 per_cpu(xen_debug_irq, cpu).irq = -1; xen_smp_intr_free() 135 kfree(per_cpu(xen_debug_irq, cpu).name); xen_smp_intr_free() 136 per_cpu(xen_debug_irq, cpu).name = NULL; xen_smp_intr_free() 138 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { xen_smp_intr_free() 139 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, xen_smp_intr_free() 141 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; xen_smp_intr_free() 142 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); xen_smp_intr_free() 143 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; xen_smp_intr_free() 148 if (per_cpu(xen_irq_work, cpu).irq >= 0) { xen_smp_intr_free() 149 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); xen_smp_intr_free() 150 per_cpu(xen_irq_work, cpu).irq = -1; xen_smp_intr_free() 151 kfree(per_cpu(xen_irq_work, cpu).name); xen_smp_intr_free() 152 per_cpu(xen_irq_work, cpu).name = NULL; xen_smp_intr_free() 155 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { xen_smp_intr_free() 156 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); xen_smp_intr_free() 157 per_cpu(xen_pmu_irq, cpu).irq = -1; xen_smp_intr_free() 158 kfree(per_cpu(xen_pmu_irq, cpu).name); xen_smp_intr_free() 159 per_cpu(xen_pmu_irq, cpu).name = NULL; xen_smp_intr_free() 176 per_cpu(xen_resched_irq, cpu).irq = rc; xen_smp_intr_init() 177 per_cpu(xen_resched_irq, cpu).name = resched_name; xen_smp_intr_init() 188 per_cpu(xen_callfunc_irq, cpu).irq = rc; xen_smp_intr_init() 189 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; xen_smp_intr_init() 197 per_cpu(xen_debug_irq, cpu).irq = rc; xen_smp_intr_init() 198 per_cpu(xen_debug_irq, cpu).name = debug_name; xen_smp_intr_init() 209 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; xen_smp_intr_init() 210 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; xen_smp_intr_init() 228 per_cpu(xen_irq_work, cpu).irq = rc; xen_smp_intr_init() 229 per_cpu(xen_irq_work, cpu).name = callfunc_name; xen_smp_intr_init() 239 per_cpu(xen_pmu_irq, cpu).irq = rc; xen_smp_intr_init() 240 per_cpu(xen_pmu_irq, cpu).name = pmu_name; xen_smp_intr_init() 354 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); for_each_possible_cpu() 355 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); for_each_possible_cpu() 356 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); for_each_possible_cpu() 441 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); cpu_initialize_context() 483 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; xen_cpu_up()
|
H A D | spinlock.c | 256 const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); xen_unlock_kick() 283 WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", xen_init_lock_cpu() 284 cpu, per_cpu(lock_kicker_irq, cpu)); xen_init_lock_cpu() 296 per_cpu(lock_kicker_irq, cpu) = irq; xen_init_lock_cpu() 297 per_cpu(irq_name, cpu) = name; xen_init_lock_cpu() 308 unbind_from_irqhandler(per_cpu(lock_kicker_irq, cpu), NULL); xen_uninit_lock_cpu() 309 per_cpu(lock_kicker_irq, cpu) = -1; xen_uninit_lock_cpu() 310 kfree(per_cpu(irq_name, cpu)); xen_uninit_lock_cpu() 311 per_cpu(irq_name, cpu) = NULL; xen_uninit_lock_cpu()
|
H A D | pmu.c | 539 per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; xen_pmu_init() 540 per_cpu(xenpmu_shared, cpu).flags = 0; xen_pmu_init() 568 free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0); xen_pmu_finish() 569 per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL; xen_pmu_finish()
|
H A D | time.c | 101 return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable; xen_vcpu_stolen() 108 area.addr.v = &per_cpu(xen_runstate, cpu); xen_setup_runstate_info() 400 evt = &per_cpu(xen_clock_events, cpu).evt; xen_teardown_timer() 410 struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); xen_setup_timer()
|
H A D | enlighten.c | 117 * overrides the default per_cpu(xen_vcpu, cpu) value. 202 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) xen_vcpu_setup() 206 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; xen_vcpu_setup() 214 vcpup = &per_cpu(xen_vcpu_info, cpu); xen_vcpu_setup() 235 per_cpu(xen_vcpu, cpu) = vcpup; xen_vcpu_setup() 682 struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; load_TLS_descriptor() 1606 xen_initial_gdt = &per_cpu(gdt_page, 0); xen_start_kernel() 1620 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; xen_start_kernel() 1766 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; for_each_online_cpu()
|
/linux-4.4.14/arch/x86/oprofile/ |
H A D | nmi_int.c | 156 kfree(per_cpu(cpu_msrs, i).multiplex); for_each_possible_cpu() 157 per_cpu(cpu_msrs, i).multiplex = NULL; for_each_possible_cpu() 158 per_cpu(switch_index, i) = 0; for_each_possible_cpu() 172 per_cpu(cpu_msrs, i).multiplex = for_each_possible_cpu() 174 if (!per_cpu(cpu_msrs, i).multiplex) for_each_possible_cpu() 197 per_cpu(switch_index, cpu) = 0; nmi_cpu_setup_mux() 229 int si = per_cpu(switch_index, cpu); nmi_cpu_switch() 230 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); nmi_cpu_switch() 238 per_cpu(switch_index, cpu) = 0; nmi_cpu_switch() 240 per_cpu(switch_index, cpu) = si; nmi_cpu_switch() 285 memcpy(per_cpu(cpu_msrs, cpu).multiplex, mux_clone() 286 per_cpu(cpu_msrs, 0).multiplex, mux_clone() 307 kfree(per_cpu(cpu_msrs, i).counters); for_each_possible_cpu() 308 per_cpu(cpu_msrs, i).counters = NULL; for_each_possible_cpu() 309 kfree(per_cpu(cpu_msrs, i).controls); for_each_possible_cpu() 310 per_cpu(cpu_msrs, i).controls = NULL; for_each_possible_cpu() 322 per_cpu(cpu_msrs, i).counters = kzalloc(counters_size, for_each_possible_cpu() 324 if (!per_cpu(cpu_msrs, i).counters) for_each_possible_cpu() 326 per_cpu(cpu_msrs, i).controls = kzalloc(controls_size, for_each_possible_cpu() 328 if (!per_cpu(cpu_msrs, i).controls) for_each_possible_cpu() 345 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); nmi_cpu_setup() 351 per_cpu(saved_lvtpc, cpu) = apic_read(APIC_LVTPC); nmi_cpu_setup() 376 struct op_msrs *msrs = &per_cpu(cpu_msrs, cpu); nmi_cpu_shutdown() 385 apic_write(APIC_LVTPC, per_cpu(saved_lvtpc, cpu)); nmi_cpu_shutdown() 469 err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); nmi_setup() 477 memcpy(per_cpu(cpu_msrs, cpu).counters, for_each_possible_cpu() 478 per_cpu(cpu_msrs, 0).counters, for_each_possible_cpu() 481 memcpy(per_cpu(cpu_msrs, cpu).controls, for_each_possible_cpu() 482 per_cpu(cpu_msrs, 0).controls, for_each_possible_cpu()
|
/linux-4.4.14/kernel/ |
H A D | profile.c | 240 per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu); __profile_flip_buffers() 248 j = per_cpu(cpu_profile_flip, get_cpu()); profile_flip_buffers() 252 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j]; for_each_online_cpu() 271 i = per_cpu(cpu_profile_flip, get_cpu()); profile_discard_flip_buffers() 275 struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i]; for_each_online_cpu() 291 hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)]; do_profile_hits() 340 per_cpu(cpu_profile_flip, cpu) = 0; profile_cpu_callback() 341 if (!per_cpu(cpu_profile_hits, cpu)[1]) { profile_cpu_callback() 347 per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); profile_cpu_callback() 349 if (!per_cpu(cpu_profile_hits, cpu)[0]) { profile_cpu_callback() 355 per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); profile_cpu_callback() 359 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); profile_cpu_callback() 360 per_cpu(cpu_profile_hits, cpu)[1] = NULL; profile_cpu_callback() 374 if (per_cpu(cpu_profile_hits, cpu)[0]) { profile_cpu_callback() 375 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); profile_cpu_callback() 376 per_cpu(cpu_profile_hits, cpu)[0] = NULL; profile_cpu_callback() 379 if (per_cpu(cpu_profile_hits, cpu)[1]) { profile_cpu_callback() 380 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); profile_cpu_callback() 381 per_cpu(cpu_profile_hits, cpu)[1] = NULL; profile_cpu_callback() 551 per_cpu(cpu_profile_hits, cpu)[1] for_each_online_cpu() 558 per_cpu(cpu_profile_hits, cpu)[0] for_each_online_cpu() 569 if (per_cpu(cpu_profile_hits, cpu)[0]) { for_each_online_cpu() 570 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); for_each_online_cpu() 571 per_cpu(cpu_profile_hits, cpu)[0] = NULL; for_each_online_cpu() 574 if (per_cpu(cpu_profile_hits, cpu)[1]) { for_each_online_cpu() 575 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); for_each_online_cpu() 576 per_cpu(cpu_profile_hits, cpu)[1] = NULL; for_each_online_cpu()
|
H A D | smpboot.c | 30 struct task_struct *tsk = per_cpu(idle_threads, cpu); idle_thread_get() 40 per_cpu(idle_threads, smp_processor_id()) = current; idle_thread_set_boot_cpu() 51 struct task_struct *tsk = per_cpu(idle_threads, cpu); idle_init() 58 per_cpu(idle_threads, cpu) = tsk; idle_init() 275 * smpboot_register_percpu_thread_cpumask - Register a per_cpu thread related 313 * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug 331 * smpboot_update_cpumask_percpu_thread - Adjust which per_cpu hotplug threads stay parked 381 return atomic_read(&per_cpu(cpu_hotplug_state, cpu)); cpu_report_state() 399 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); cpu_check_up_prepare() 403 switch (atomic_read(&per_cpu(cpu_hotplug_state, cpu))) { cpu_check_up_prepare() 408 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_UP_PREPARE); cpu_check_up_prepare() 454 (void)atomic_xchg(&per_cpu(cpu_hotplug_state, cpu), CPU_ONLINE); cpu_set_state_online() 472 if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) cpu_wait_death() 477 while (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) != CPU_DEAD) { cpu_wait_death() 485 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); cpu_wait_death() 489 atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); cpu_wait_death() 492 if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), cpu_wait_death() 516 oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); cpu_report_death() 521 } while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), cpu_report_death()
|
H A D | softirq.c | 639 per_cpu(tasklet_vec, cpu).tail = for_each_possible_cpu() 640 &per_cpu(tasklet_vec, cpu).head; for_each_possible_cpu() 641 per_cpu(tasklet_hi_vec, cpu).tail = for_each_possible_cpu() 642 &per_cpu(tasklet_hi_vec, cpu).head; for_each_possible_cpu() 691 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) { tasklet_kill_immediate() 696 per_cpu(tasklet_vec, cpu).tail = i; tasklet_kill_immediate() 709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { takeover_tasklets() 710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; takeover_tasklets() 711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); takeover_tasklets() 712 per_cpu(tasklet_vec, cpu).head = NULL; takeover_tasklets() 713 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; takeover_tasklets() 717 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { takeover_tasklets() 718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; takeover_tasklets() 719 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); takeover_tasklets() 720 per_cpu(tasklet_hi_vec, cpu).head = NULL; takeover_tasklets() 721 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; takeover_tasklets()
|
H A D | stop_machine.c | 86 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); cpu_stop_queue_work() 330 work = &per_cpu(cpu_stopper.stop_work, cpu); for_each_cpu() 421 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); cpu_stop_should_run() 433 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); cpu_stopper_thread() 474 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); stop_machine_park() 488 sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); cpu_stop_create() 493 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); cpu_stop_park() 500 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); stop_machine_unpark() 521 struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); for_each_possible_cpu()
|
H A D | taskstats.c | 306 listeners = &per_cpu(listener_array, cpu); for_each_cpu() 324 listeners = &per_cpu(listener_array, cpu); for_each_cpu() 688 INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); for_each_possible_cpu() 689 init_rwsem(&(per_cpu(listener_array, i).sem)); for_each_possible_cpu()
|
H A D | context_tracking.c | 192 if (!per_cpu(context_tracking.active, cpu)) { context_tracking_cpu_set() 193 per_cpu(context_tracking.active, cpu) = true; context_tracking_cpu_set()
|
H A D | watchdog.c | 248 per_cpu(watchdog_touch_ts, cpu) = 0; touch_all_softlockup_watchdogs() 575 struct perf_event *event = per_cpu(watchdog_ev, cpu); watchdog_nmi_enable() 638 per_cpu(watchdog_ev, cpu) = event; watchdog_nmi_enable() 640 perf_event_enable(per_cpu(watchdog_ev, cpu)); watchdog_nmi_enable() 647 struct perf_event *event = per_cpu(watchdog_ev, cpu); watchdog_nmi_disable() 651 per_cpu(watchdog_ev, cpu) = NULL; watchdog_nmi_disable() 695 ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); for_each_watchdog_cpu() 714 kthread_unpark(per_cpu(softlockup_watchdog, cpu)); watchdog_unpark_threads()
|
/linux-4.4.14/drivers/oprofile/ |
H A D | nmi_timer_int.c | 38 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_start_cpu() 45 per_cpu(nmi_timer_events, cpu) = event; nmi_timer_start_cpu() 56 struct perf_event *event = per_cpu(nmi_timer_events, cpu); nmi_timer_stop_cpu() 114 event = per_cpu(nmi_timer_events, cpu); for_each_possible_cpu() 118 per_cpu(nmi_timer_events, cpu) = NULL; for_each_possible_cpu()
|
H A D | oprofile_perf.c | 42 if (per_cpu(perf_events, cpu)[id] == event) op_overflow_handler() 78 if (!counter_config[event].enabled || per_cpu(perf_events, cpu)[event]) op_create_counter() 95 per_cpu(perf_events, cpu)[event] = pevent; op_create_counter() 102 struct perf_event *pevent = per_cpu(perf_events, cpu)[event]; op_destroy_counter() 106 per_cpu(perf_events, cpu)[event] = NULL; op_destroy_counter() 261 event = per_cpu(perf_events, cpu)[id]; for_each_possible_cpu() 266 kfree(per_cpu(perf_events, cpu)); for_each_possible_cpu() 300 per_cpu(perf_events, cpu) = kcalloc(num_counters, for_each_possible_cpu() 302 if (!per_cpu(perf_events, cpu)) { for_each_possible_cpu()
|
H A D | oprofile_stats.c | 26 cpu_buf = &per_cpu(op_cpu_buffer, i); for_each_possible_cpu() 54 cpu_buf = &per_cpu(op_cpu_buffer, i); for_each_possible_cpu()
|
H A D | timer_int.c | 58 struct hrtimer *hrtimer = &per_cpu(oprofile_hrtimer, cpu); __oprofile_hrtimer_stop()
|
H A D | cpu_buffer.h | 64 struct oprofile_cpu_buffer *cpu_buf = &per_cpu(op_cpu_buffer, cpu); op_cpu_buffer_reset()
|
H A D | cpu_buffer.c | 75 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); for_each_possible_cpu() 102 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); for_each_online_cpu() 122 struct oprofile_cpu_buffer *b = &per_cpu(op_cpu_buffer, i); for_each_online_cpu()
|
/linux-4.4.14/arch/sparc/include/asm/ |
H A D | cpudata_32.h | 28 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
|
H A D | cpudata_64.h | 33 #define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
|
H A D | topology_64.h | 47 #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
/linux-4.4.14/drivers/cpufreq/ |
H A D | sh-cpufreq.c | 35 return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; sh_cpufreq_get() 46 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); sh_cpufreq_target() 83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); sh_cpufreq_verify() 102 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); sh_cpufreq_cpu_init() 144 struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); sh_cpufreq_cpu_exit()
|
H A D | speedstep-centrino.c | 259 per_cpu(centrino_model, policy->cpu) = model; centrino_cpu_init_table() 294 if ((per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_BANIAS]) || extract_clock() 295 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_A1]) || extract_clock() 296 (per_cpu(centrino_cpu, cpu) == &cpu_ids[CPU_DOTHAN_B0])) { extract_clock() 301 if ((!per_cpu(centrino_model, cpu)) || extract_clock() 302 (!per_cpu(centrino_model, cpu)->op_points)) extract_clock() 307 per_cpu(centrino_model, cpu)->op_points[i].frequency extract_clock() 310 if (msr == per_cpu(centrino_model, cpu)->op_points[i].driver_data) extract_clock() 311 return per_cpu(centrino_model, cpu)-> extract_clock() 315 return per_cpu(centrino_model, cpu)->op_points[i-1].frequency; extract_clock() 365 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; centrino_cpu_init() 367 if (!per_cpu(centrino_cpu, policy->cpu)) { centrino_cpu_init() 399 per_cpu(centrino_model, policy->cpu)->op_points); centrino_cpu_init() 406 if (!per_cpu(centrino_model, cpu)) centrino_cpu_exit() 409 per_cpu(centrino_model, cpu) = NULL; centrino_cpu_exit() 432 if (unlikely(per_cpu(centrino_model, cpu) == NULL)) { centrino_target() 438 op_points = &per_cpu(centrino_model, cpu)->op_points[index]; centrino_target()
|
H A D | cpufreq_userspace.c | 38 if (!per_cpu(cpu_is_managed, policy->cpu)) cpufreq_set() 64 per_cpu(cpu_is_managed, cpu) = 1; cpufreq_governor_userspace() 71 per_cpu(cpu_is_managed, cpu) = 0; cpufreq_governor_userspace()
|
H A D | arm_big_little.c | 88 cpu_freq = per_cpu(cpu_last_req_freq, j); for_each_online_cpu() 90 if ((cluster == per_cpu(physical_cluster, j)) && for_each_online_cpu() 103 u32 cur_cluster = per_cpu(physical_cluster, cpu); clk_get_cpu_rate() 119 pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq, bL_cpufreq_get_rate() 122 return per_cpu(cpu_last_req_freq, cpu); bL_cpufreq_get_rate() 138 prev_rate = per_cpu(cpu_last_req_freq, cpu); bL_cpufreq_set_rate() 139 per_cpu(cpu_last_req_freq, cpu) = rate; bL_cpufreq_set_rate() 140 per_cpu(physical_cluster, cpu) = new_cluster; bL_cpufreq_set_rate() 169 per_cpu(cpu_last_req_freq, cpu) = prev_rate; bL_cpufreq_set_rate() 170 per_cpu(physical_cluster, cpu) = old_cluster; bL_cpufreq_set_rate() 216 new_cluster = actual_cluster = per_cpu(physical_cluster, cpu); bL_cpufreq_set_target() 474 per_cpu(physical_cluster, cpu) = cur_cluster; bL_cpufreq_init() 477 per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; bL_cpufreq_init() 487 per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); bL_cpufreq_init()
|
H A D | cpufreq_ondemand.c | 42 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); ondemand_powersave_bias_init_cpu() 83 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, generic_powersave_bias_target() 157 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); od_check_cpu() 199 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, od_dbs_timer() 267 dbs_info = &per_cpu(od_cpu_dbs_info, cpu); for_each_online_cpu() 314 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, for_each_online_cpu() 353 struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, for_each_online_cpu() 384 dbs_info = &per_cpu(od_cpu_dbs_info, j); for_each_online_cpu() 550 shared = per_cpu(od_cpu_dbs_info, cpu).cdbs.shared; for_each_online_cpu()
|
H A D | cpufreq_conservative.c | 62 struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); cs_check_cpu() 134 &per_cpu(cs_cpu_dbs_info, freq->cpu); dbs_cpufreq_notifier() 246 dbs_info = &per_cpu(cs_cpu_dbs_info, j); for_each_online_cpu()
|
H A D | amd_freq_sensitivity.c | 47 struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu); amd_powersave_bias_target()
|
H A D | cpufreq_governor.h | 114 return &per_cpu(_dbs_info, cpu).cdbs; \ 119 return &per_cpu(_dbs_info, cpu); \
|
/linux-4.4.14/arch/x86/kernel/ |
H A D | irq_32.c | 114 if (per_cpu(hardirq_stack, cpu)) irq_ctx_init() 120 per_cpu(hardirq_stack, cpu) = irqstk; irq_ctx_init() 125 per_cpu(softirq_stack, cpu) = irqstk; irq_ctx_init() 128 cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); irq_ctx_init()
|
H A D | setup_percpu.c | 223 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); for_each_possible_cpu() 224 per_cpu(cpu_number, cpu) = cpu; for_each_possible_cpu() 235 per_cpu(x86_cpu_to_apicid, cpu) = for_each_possible_cpu() 237 per_cpu(x86_bios_cpu_apicid, cpu) = for_each_possible_cpu() 241 per_cpu(x86_cpu_to_logical_apicid, cpu) = for_each_possible_cpu() 245 per_cpu(irq_stack_ptr, cpu) = for_each_possible_cpu() 246 per_cpu(irq_stack_union.irq_stack, cpu) + for_each_possible_cpu() 250 per_cpu(x86_cpu_to_node_map, cpu) = for_each_possible_cpu() 256 * MEMORY_HOTPLUG is defined, before per_cpu(numa_node) is set for_each_possible_cpu()
|
H A D | topology.c | 140 per_cpu(cpu_devices, num).cpu.hotpluggable = 1; 142 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); 148 unregister_cpu(&per_cpu(cpu_devices, num).cpu); arch_unregister_cpu() 155 return register_cpu(&per_cpu(cpu_devices, num).cpu, num); arch_register_cpu()
|
H A D | dumpstack_32.c | 29 void *irq = per_cpu(hardirq_stack, cpu); is_hardirq_stack() 36 void *irq = per_cpu(softirq_stack, cpu); is_softirq_stack()
|
H A D | dumpstack_64.c | 44 unsigned long end = per_cpu(orig_ist, cpu).ist[k]; in_exception_stack() 157 unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); dump_trace() 261 irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); show_stack_log_lvl() 262 irq_stack = (unsigned long *)(per_cpu(irq_stack_ptr, cpu) - IRQ_STACK_SIZE); show_stack_log_lvl()
|
H A D | irq.c | 57 #define irq_stats(x) (&per_cpu(irq_stat, x)) 134 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); arch_show_interrupts() 138 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); arch_show_interrupts() 196 sum += per_cpu(mce_exception_count, cpu); arch_irq_stat_cpu() 197 sum += per_cpu(mce_poll_count, cpu); arch_irq_stat_cpu() 416 IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) for_each_online_cpu()
|
H A D | irqinit.c | 63 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) for_each_online_cpu() 97 per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = irq_to_desc(i); init_IRQ()
|
H A D | espfix_64.c | 149 if (likely(per_cpu(espfix_stack, cpu))) init_espfix_ap() 206 per_cpu(espfix_stack, cpu) = addr; init_espfix_ap() 207 per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page init_espfix_ap()
|
H A D | smpboot.c | 311 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) && match_smt() 327 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && match_llc() 328 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) match_llc() 812 per_cpu(current_task, cpu) = idle; common_cpu_up() 817 per_cpu(cpu_current_top_of_stack, cpu) = common_cpu_up() 1179 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); for_each_possible_cpu() 1180 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); for_each_possible_cpu() 1181 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); for_each_possible_cpu()
|
H A D | ioport.c | 57 tss = &per_cpu(cpu_tss, get_cpu()); sys_ioperm()
|
H A D | kvm.c | 306 struct kvm_steal_time *st = &per_cpu(steal_time, cpu); kvm_register_steal_time() 408 src = &per_cpu(steal_time, cpu); kvm_steal_clock() 583 apicid = per_cpu(x86_cpu_to_apicid, cpu); kvm_kick_cpu() 843 const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); kvm_unlock_kick()
|
/linux-4.4.14/arch/powerpc/platforms/ps3/ |
H A D | smp.c | 52 virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; ps3_smp_message_pass() 66 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); ps3_smp_probe() 107 unsigned int *virqs = per_cpu(ps3_ipi_virqs, cpu); ps3_smp_cleanup_cpu()
|
H A D | interrupt.c | 191 pd = &per_cpu(ps3_private, cpu); ps3_virq_setup() 695 struct ps3_private *pd = &per_cpu(ps3_private, cpu); ps3_register_ipi_debug_brk() 705 struct ps3_private *pd = &per_cpu(ps3_private, cpu); ps3_register_ipi_irq() 730 dump_bmp(&per_cpu(ps3_private, 0)); ps3_get_irq() 731 dump_bmp(&per_cpu(ps3_private, 1)); ps3_get_irq() 737 dump_bmp(&per_cpu(ps3_private, 0)); ps3_get_irq() 738 dump_bmp(&per_cpu(ps3_private, 1)); ps3_get_irq() 761 struct ps3_private *pd = &per_cpu(ps3_private, cpu); for_each_possible_cpu()
|
/linux-4.4.14/arch/s390/kernel/ |
H A D | idle.c | 54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); show_idle_count() 71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); show_idle_time() 89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); arch_cpu_idle_time()
|
H A D | topology.c | 44 * Socket/Book linked lists and per_cpu(cpu_topology) updates are 99 topo = &per_cpu(cpu_topology, lcpu + i); add_cpus_to_mask() 256 topo = &per_cpu(cpu_topology, cpu); for_each_possible_cpu() 431 return &per_cpu(cpu_topology, cpu).thread_mask; cpu_thread_mask() 437 return &per_cpu(cpu_topology, cpu).core_mask; cpu_coregroup_mask() 442 return &per_cpu(cpu_topology, cpu).book_mask; cpu_book_mask()
|
H A D | processor.c | 83 struct cpuid *id = &per_cpu(cpu_id, n); show_cpuinfo()
|
/linux-4.4.14/arch/mn10300/kernel/ |
H A D | cevt-mn10300.c | 57 cd = &per_cpu(mn10300_clockevent_device, cpu); timer_interrupt() 83 cd = &per_cpu(mn10300_clockevent_device, cpu); init_clockevents() 108 iact = &per_cpu(timer_irq, cpu); init_clockevents()
|
/linux-4.4.14/arch/ia64/mm/ |
H A D | tlb.c | 93 per_cpu(ia64_need_tlb_flush, i) = 1; wrap_mmu_context() 368 per_cpu(ia64_tr_num, cpu) = 8; ia64_tlb_init() 371 per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; ia64_tlb_init() 372 if (per_cpu(ia64_tr_num, cpu) > ia64_tlb_init() 374 per_cpu(ia64_tr_num, cpu) = ia64_tlb_init() 376 if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { ia64_tlb_init() 378 per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; ia64_tlb_init() 442 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); ia64_itr_entry() 454 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); ia64_itr_entry() 465 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { ia64_itr_entry() 486 if (i >= per_cpu(ia64_tr_num, cpu)) ia64_itr_entry() 490 if (i > per_cpu(ia64_tr_used, cpu)) ia64_itr_entry() 491 per_cpu(ia64_tr_used, cpu) = i; ia64_itr_entry() 533 if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) ia64_ptr_entry() 554 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { ia64_ptr_entry() 559 per_cpu(ia64_tr_used, cpu) = i; ia64_ptr_entry()
|
/linux-4.4.14/arch/arm/mm/ |
H A D | context.c | 70 asid = per_cpu(active_asids, cpu).counter; for_each_online_cpu() 72 asid = per_cpu(reserved_asids, cpu); for_each_online_cpu() 147 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); for_each_possible_cpu() 156 asid = per_cpu(reserved_asids, i); for_each_possible_cpu() 158 per_cpu(reserved_asids, i) = asid; for_each_possible_cpu() 183 if (per_cpu(reserved_asids, cpu) == asid) { for_each_possible_cpu() 185 per_cpu(reserved_asids, cpu) = newasid; for_each_possible_cpu() 258 && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) check_and_switch_context() 274 atomic64_set(&per_cpu(active_asids, cpu), asid); check_and_switch_context()
|
/linux-4.4.14/arch/s390/oprofile/ |
H A D | hwsampler.c | 86 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_ssctl_stop() 116 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_ssctl_deactivate() 142 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_ssctl_enable_activate() 170 cb = &per_cpu(sampler_cpu_buffer, cpu); smp_ctl_qsi() 207 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu() 218 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu() 251 cb = &per_cpu(sampler_cpu_buffer, cpu); allocate_sdbt() 338 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu() 382 cb = &per_cpu(sampler_cpu_buffer, cpu); start_sampling() 419 cb = &per_cpu(sampler_cpu_buffer, cpu); stop_sampling() 498 cb = &per_cpu(sampler_cpu_buffer, cpu); hws_oom_callback() 555 cb = &per_cpu(sampler_cpu_buffer, cpu); hwsampler_deactivate() 599 cb = &per_cpu(sampler_cpu_buffer, cpu); hwsampler_activate() 626 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu() 657 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu() 677 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_on_start() 688 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_check_error() 723 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_on_finish() 757 cb = &per_cpu(sampler_cpu_buffer, cpu); worker_on_interrupt() 970 cb = &per_cpu(sampler_cpu_buffer, cpu); hwsampler_get_sample_overflow_count() 1007 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu() 1159 cb = &per_cpu(sampler_cpu_buffer, cpu); for_each_online_cpu()
|
/linux-4.4.14/kernel/events/ |
H A D | hw_breakpoint.c | 246 * (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu) 247 * + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM 257 * (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *)) 258 * + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM 269 * ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu) 270 * + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM 277 * ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *)) 278 * + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM 496 * @return a set of per_cpu pointers to perf events 520 per_cpu(*cpu_events, cpu) = bp; for_each_online_cpu() 541 unregister_hw_breakpoint(per_cpu(*cpu_events, cpu)); unregister_wide_hw_breakpoint()
|
/linux-4.4.14/arch/powerpc/include/asm/ |
H A D | topology.h | 90 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) 91 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
H A D | smp.h | 98 return per_cpu(cpu_sibling_map, cpu); cpu_sibling_mask() 103 return per_cpu(cpu_core_map, cpu); cpu_core_mask()
|
/linux-4.4.14/arch/parisc/kernel/ |
H A D | irq.c | 89 per_cpu(local_ack_eiem, cpu) &= ~mask; cpu_ack_irq() 92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); cpu_ack_irq() 104 per_cpu(local_ack_eiem, cpu) |= mask; cpu_eoi_irq() 107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); cpu_eoi_irq() 156 #define irq_stats(x) (&per_cpu(irq_stat, x)) 345 return per_cpu(cpu_data, cpu).txn_addr; txn_affinity_addr() 357 (!per_cpu(cpu_data, next_cpu).txn_addr || txn_alloc_addr() 423 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; stack_overflow_check() 426 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); stack_overflow_check() 442 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); stack_overflow_check() 472 union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); execute_on_irq_stack() 519 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu); do_cpu_irq_mask() 539 per_cpu(cpu_data, cpu).hpa); do_cpu_irq_mask() 557 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu)); do_cpu_irq_mask()
|
H A D | topology.c | 32 register_cpu(&per_cpu(cpu_devices, num), num); for_each_present_cpu()
|
H A D | smp.c | 124 struct cpuinfo_parisc *p = &per_cpu(cpu_data, this_cpu); ipi_interrupt() 129 spinlock_t *lock = &per_cpu(ipi_lock, this_cpu); ipi_interrupt() 191 struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpu); ipi_send() 192 spinlock_t *lock = &per_cpu(ipi_lock, cpu); ipi_send() 319 const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); smp_boot_one_cpu() 377 int bootstrap_processor = per_cpu(cpu_data, 0).cpuid; smp_prepare_boot_cpu() 397 spin_lock_init(&per_cpu(ipi_lock, cpu)); smp_prepare_cpus()
|
H A D | processor.c | 81 p = &per_cpu(cpu_data, cpunum); init_percpu_prof() 172 p = &per_cpu(cpu_data, cpuid); processor_probe() 316 per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; init_per_cpu() 317 per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; init_per_cpu() 356 const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); for_each_online_cpu()
|
H A D | time.c | 65 struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); timer_interrupt() 224 per_cpu(cpu_data, cpu).it_value = next_tick; start_cpu_itimer()
|
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/ |
H A D | mce_amd.c | 283 per_cpu(bank_map, cpu) |= (1 << bank); mce_amd_feature_init() 394 if (!(per_cpu(bank_map, cpu) & (1 << bank))) amd_threshold_interrupt() 613 if (per_cpu(threshold_banks, cpu)[bank]->blocks) { allocate_threshold_blocks() 615 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); allocate_threshold_blocks() 617 per_cpu(threshold_banks, cpu)[bank]->blocks = b; allocate_threshold_blocks() 621 per_cpu(threshold_banks, cpu)[bank]->kobj, allocate_threshold_blocks() 679 struct device *dev = per_cpu(mce_device, cpu); threshold_create_bank() 696 per_cpu(threshold_banks, cpu)[bank] = b; threshold_create_bank() 717 per_cpu(threshold_banks, cpu)[bank] = b; threshold_create_bank() 752 per_cpu(threshold_banks, cpu) = bp; threshold_create_device() 755 if (!(per_cpu(bank_map, cpu) & (1 << bank))) threshold_create_device() 770 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; deallocate_threshold_block() 781 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); deallocate_threshold_block() 782 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; deallocate_threshold_block() 801 b = per_cpu(threshold_banks, cpu)[bank]; threshold_remove_bank() 811 per_cpu(threshold_banks, cpu)[bank] = NULL; threshold_remove_bank() 829 per_cpu(threshold_banks, cpu)[bank] = NULL; threshold_remove_bank() 837 if (!(per_cpu(bank_map, cpu) & (1 << bank))) threshold_remove_device() 841 kfree(per_cpu(threshold_banks, cpu)); threshold_remove_device()
|
H A D | mce-inject.c | 34 struct mce *i = &per_cpu(injectm, m->extcpu); inject_mce() 164 struct mce *mcpu = &per_cpu(injectm, cpu); for_each_online_cpu()
|
H A D | therm_throt.c | 101 per_cpu(thermal_state, cpu).event.name); \ 157 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); therm_throt_process() 214 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); thresh_event_valid()
|
/linux-4.4.14/arch/arm64/mm/ |
H A D | context.c | 58 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); for_each_possible_cpu() 67 asid = per_cpu(reserved_asids, i); for_each_possible_cpu() 69 per_cpu(reserved_asids, i) = asid; for_each_possible_cpu() 94 if (per_cpu(reserved_asids, cpu) == asid) { for_each_possible_cpu() 96 per_cpu(reserved_asids, cpu) = newasid; for_each_possible_cpu() 167 && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) check_and_switch_context() 181 atomic64_set(&per_cpu(active_asids, cpu), asid); check_and_switch_context()
|
/linux-4.4.14/drivers/xen/events/ |
H A D | events_2l.c | 51 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu))); evtchn_2l_bind_to_cpu() 52 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu))); evtchn_2l_bind_to_cpu() 149 per_cpu(cpu_evtchn_mask, cpu)[idx] & active_evtchns() 268 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu); xen_debug_interrupt() 280 v = per_cpu(xen_vcpu, i); for_each_online_cpu() 289 v = per_cpu(xen_vcpu, cpu); 353 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) * evtchn_2l_resume()
|
H A D | events_fifo.c | 105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); init_control_block() 287 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); consume_one_event() 333 control_block = per_cpu(cpu_control_block, cpu); __evtchn_fifo_handle_events() 354 void *control_block = per_cpu(cpu_control_block, cpu); for_each_possible_cpu() 367 per_cpu(cpu_control_block, cpu) = NULL; for_each_possible_cpu() 412 per_cpu(cpu_control_block, cpu) = control_block; evtchn_fifo_alloc_control_block() 430 if (!per_cpu(cpu_control_block, cpu)) evtchn_fifo_cpu_notification()
|
H A D | events_base.c | 203 per_cpu(ipi_to_irq, cpu)[ipi] = irq; xen_irq_info_ipi_setup() 217 per_cpu(virq_to_irq, cpu)[virq] = irq; xen_irq_info_virq_setup() 264 return per_cpu(virq_to_irq, cpu)[virq]; irq_from_virq() 628 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1; __unbind_from_irq() 631 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1; __unbind_from_irq() 888 irq = per_cpu(ipi_to_irq, cpu)[ipi]; bind_ipi_to_irq() 979 irq = per_cpu(virq_to_irq, cpu)[virq]; bind_virq_to_irq() 1220 irq = per_cpu(ipi_to_irq, cpu)[vector]; xen_send_IPI_one() 1454 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) restore_cpu_virqs() 1479 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) restore_cpu_ipis()
|
/linux-4.4.14/arch/x86/include/asm/ |
H A D | topology.h | 64 /* Same function but used if called before per_cpu areas are setup */ early_cpu_to_node() 126 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) setup_node_to_cpumask_map() 127 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) setup_node_to_cpumask_map()
|
H A D | smp.h | 42 return per_cpu(cpu_llc_shared_map, cpu); cpu_llc_shared_mask() 162 #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu)
|
H A D | preempt.h | 36 per_cpu(__preempt_count, (cpu)) = PREEMPT_ENABLED; \
|
H A D | percpu.h | 561 * Define the "EARLY_PER_CPU" macros. These are used for some per_cpu 562 * variables that are initialized and accessed before there are per_cpu 596 &per_cpu(_name, _cpu)) 614 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
|
H A D | stackprotector.h | 89 unsigned long canary = (unsigned long)&per_cpu(stack_canary, cpu); setup_stack_canary_segment()
|
/linux-4.4.14/scripts/gdb/linux/ |
H A D | cpus.py | 36 def per_cpu(var_ptr, cpu): function 115 return per_cpu(var_ptr, cpu) 132 return per_cpu(var_ptr, cpu).dereference()
|
/linux-4.4.14/arch/tile/include/asm/ |
H A D | hardirq.h | 41 #define __IRQ_STAT(cpu, member) (per_cpu(irq_stat, cpu).member)
|
/linux-4.4.14/include/linux/ |
H A D | topology.h | 84 return per_cpu(numa_node, cpu); cpu_to_node() 98 per_cpu(numa_node, cpu) = node; set_cpu_numa_node() 150 return per_cpu(_numa_mem_, cpu); cpu_to_mem() 157 per_cpu(_numa_mem_, cpu) = node; set_cpu_numa_mem()
|
H A D | kernel_stat.h | 49 #define kstat_cpu(cpu) per_cpu(kstat, cpu) 50 #define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
|
/linux-4.4.14/arch/sh/kernel/ |
H A D | localtimer.c | 44 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); local_timer_setup()
|
H A D | smp.c | 80 per_cpu(cpu_state, cpu) = CPU_ONLINE; smp_prepare_boot_cpu() 90 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { native_cpu_die() 204 per_cpu(cpu_state, cpu) = CPU_ONLINE; start_secondary() 222 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __cpu_up()
|
H A D | topology.c | 58 struct cpu *c = &per_cpu(cpu_devices, i); topology_init()
|
/linux-4.4.14/arch/sparc/kernel/ |
H A D | nmi.c | 60 if (per_cpu(nmi_touch, cpu) != 1) for_each_present_cpu() 61 per_cpu(nmi_touch, cpu) = 1; for_each_present_cpu() 150 per_cpu(wd_enabled, cpu) = 0; report_broken_nmi() 185 if (!per_cpu(wd_enabled, cpu)) for_each_online_cpu()
|
H A D | sysfs.c | 22 struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ 135 ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); write_mmustat_enable() 226 struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu_online() 239 struct cpu *c = &per_cpu(cpu_devices, cpu); unregister_cpu_online() 306 struct cpu *c = &per_cpu(cpu_devices, cpu); for_each_possible_cpu()
|
H A D | sun4d_smp.c | 200 work = &per_cpu(sun4d_ipi_work, cpu); for_each_possible_cpu() 238 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); sun4d_ipi_single() 249 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); sun4d_ipi_mask_one() 260 struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); sun4d_ipi_resched() 383 ce = &per_cpu(sparc32_clockevent, cpu); smp4d_percpu_timer_interrupt()
|
H A D | leon_smp.c | 299 work = &per_cpu(leon_ipi_work, cpu); for_each_possible_cpu() 313 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); leon_ipi_single() 324 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); leon_ipi_mask_one() 335 struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); leon_ipi_resched()
|
H A D | time_64.c | 623 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); sparc64_get_clock_tick() 638 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu); sparc64_cpufreq_notifier() 698 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu); timer_interrupt()
|
/linux-4.4.14/arch/mips/math-emu/ |
H A D | me-debugfs.c | 22 ps = &per_cpu(fpuemustats, cpu); for_each_online_cpu()
|
/linux-4.4.14/arch/arm/mach-alpine/ |
H A D | alpine_cpu_resume.h | 31 struct al_cpu_resume_regs_per_cpu per_cpu[]; member in struct:al_cpu_resume_regs
|
H A D | alpine_cpu_pm.c | 45 &al_cpu_resume_regs->per_cpu[phys_cpu].resume_addr); alpine_cpu_wakeup()
|
/linux-4.4.14/mm/ |
H A D | kmemleak-test.c | 89 per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); for_each_possible_cpu() 91 per_cpu(kmemleak_test_pointer, i)); for_each_possible_cpu()
|
H A D | quicklist.c | 96 ql = per_cpu(quicklist, cpu); for_each_online_cpu()
|
H A D | swap.c | 525 struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); activate_page_drain() 533 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; need_activate_page_drain() 809 struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); lru_add_drain_cpu() 814 pvec = &per_cpu(lru_rotate_pvecs, cpu); lru_add_drain_cpu() 824 pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); lru_add_drain_cpu() 881 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); for_each_online_cpu() 883 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || for_each_online_cpu() 884 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || for_each_online_cpu() 885 pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || for_each_online_cpu() 894 flush_work(&per_cpu(lru_add_drain_work, cpu));
|
/linux-4.4.14/kernel/trace/ |
H A D | trace_stack.c | 211 if (per_cpu(trace_active, cpu)++ != 0) stack_trace_call() 219 per_cpu(trace_active, cpu)--; stack_trace_call() 265 per_cpu(trace_active, cpu)++; stack_max_size_write() 271 per_cpu(trace_active, cpu)--; stack_max_size_write() 310 per_cpu(trace_active, cpu)++; t_start() 327 per_cpu(trace_active, cpu)--; t_stop()
|
H A D | trace_irqsoff.c | 108 if (likely(!per_cpu(tracing_cpu, cpu))) func_prolog_dec() 162 per_cpu(tracing_cpu, cpu) = 0; irqsoff_display_graph() 366 if (per_cpu(tracing_cpu, cpu)) start_critical_timing() 384 per_cpu(tracing_cpu, cpu) = 1; start_critical_timing() 399 if (unlikely(per_cpu(tracing_cpu, cpu))) stop_critical_timing() 400 per_cpu(tracing_cpu, cpu) = 0; stop_critical_timing()
|
/linux-4.4.14/arch/powerpc/kernel/ |
H A D | irq.c | 361 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); arch_show_interrupts() 366 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); arch_show_interrupts() 371 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); arch_show_interrupts() 376 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); arch_show_interrupts() 381 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); arch_show_interrupts() 388 per_cpu(irq_stat, j).hmi_exceptions); arch_show_interrupts() 396 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); arch_show_interrupts() 409 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; arch_irq_stat_cpu() 411 sum += per_cpu(irq_stat, cpu).pmu_irqs; arch_irq_stat_cpu() 412 sum += per_cpu(irq_stat, cpu).mce_exceptions; arch_irq_stat_cpu() 413 sum += per_cpu(irq_stat, cpu).spurious_irqs; arch_irq_stat_cpu() 414 sum += per_cpu(irq_stat, cpu).timer_irqs_others; arch_irq_stat_cpu() 415 sum += per_cpu(irq_stat, cpu).hmi_exceptions; arch_irq_stat_cpu() 417 sum += per_cpu(irq_stat, cpu).doorbell_irqs; arch_irq_stat_cpu()
|
H A D | smp.c | 216 struct cpu_messages *info = &per_cpu(ipi_message, cpu); smp_muxed_ipi_set_data() 223 struct cpu_messages *info = &per_cpu(ipi_message, cpu); smp_muxed_ipi_message_pass() 352 per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); smp_store_cpu_info() 354 per_cpu(next_tlbcam_idx, id) smp_store_cpu_info() 376 zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), for_each_possible_cpu() 378 zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), for_each_possible_cpu() 430 if (per_cpu(cpu_state, cpu) == CPU_DEAD) generic_cpu_die() 439 per_cpu(cpu_state, cpu) = CPU_DEAD; generic_set_cpu_dead() 449 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; generic_set_cpu_up() 454 return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; generic_check_cpu_restart()
|
H A D | sysfs.c | 53 per_cpu(smt_snooze_delay, cpu->dev.id) = snooze; store_smt_snooze_delay() 63 return sprintf(buf, "%ld\n", per_cpu(smt_snooze_delay, cpu->dev.id)); show_smt_snooze_delay() 79 per_cpu(smt_snooze_delay, cpu) = snooze; setup_smt_snooze_delay() 708 struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu_online() 790 struct cpu *c = &per_cpu(cpu_devices, cpu); unregister_cpu_online() 1033 struct cpu *c = &per_cpu(cpu_devices, cpu); for_each_possible_cpu()
|
/linux-4.4.14/arch/arm/mach-omap2/ |
H A D | omap-mpuss-lowpower.c | 121 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); set_cpu_wakeup_addr() 132 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); scu_pwrst_prepare() 186 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id); l2x0_pwrst_prepare() 229 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); omap4_enter_lowpower() 312 struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu); omap4_hotplug_cpu() 373 pm_info = &per_cpu(omap4_pm_info, 0x0); omap4_mpuss_init() 393 pm_info = &per_cpu(omap4_pm_info, 0x1); omap4_mpuss_init()
|
/linux-4.4.14/arch/metag/kernel/ |
H A D | smp.c | 329 struct cpuinfo_metag *cpu_info = &per_cpu(cpu_data, cpuid); smp_store_cpu_info() 364 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); secondary_start_kernel() 366 if (!per_cpu(pTBI, cpu)) secondary_start_kernel() 408 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; smp_cpus_done() 431 per_cpu(pTBI, cpu) = __TBI(TBID_ISTAT_BIT); smp_prepare_boot_cpu() 433 if (!per_cpu(pTBI, cpu)) smp_prepare_boot_cpu() 449 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); for_each_cpu() 505 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); show_ipi_list() 524 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); do_IPI()
|
H A D | topology.c | 54 struct cpuinfo_metag *cpuinfo = &per_cpu(cpu_data, i); topology_init()
|
H A D | setup.c | 290 per_cpu(pTBI, cpu) = _pTBI; setup_arch() 292 if (!per_cpu(pTBI, cpu)) setup_arch() 481 lpj = per_cpu(cpu_data, i).loops_per_jiffy; for_each_online_cpu() 578 return per_cpu(pTBI, cpu); pTBI_get()
|
H A D | traps.c | 754 PTBI _pTBI = per_cpu(pTBI, cpu); traps_save_context() 770 PTBI _pTBI = per_cpu(pTBI, cpu); traps_restore_context() 786 return per_cpu(trigger_mask, cpu); _get_trigger_mask() 798 per_cpu(trigger_mask, cpu) = mask; set_trigger_mask() 837 PTBI _pTBI = per_cpu(pTBI, cpu); trap_init() 857 PTBI _pTBI = per_cpu(pTBI, cpu); tbi_startup_interrupt() 873 PTBI _pTBI = per_cpu(pTBI, cpu); tbi_shutdown_interrupt()
|
/linux-4.4.14/drivers/soc/qcom/ |
H A D | spm.c | 185 struct spm_driver_data *drv = per_cpu(cpu_spm_drv, cpu); qcom_cpu_spc() 202 return per_cpu(qcom_idle_ops, cpu)[index](cpu); qcom_idle_enter() 266 per_cpu(qcom_idle_ops, cpu) = fns; qcom_cpuidle_init() 274 return per_cpu(cpu_spm_drv, cpu) ? 0 : -ENXIO; qcom_cpuidle_init() 369 per_cpu(cpu_spm_drv, cpu) = drv; spm_dev_probe()
|
/linux-4.4.14/arch/tile/kernel/ |
H A D | smpboot.c | 97 per_cpu(boot_sp, cpu) = 0; smp_prepare_cpus() 98 per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; smp_prepare_cpus() 109 per_cpu(boot_sp, cpu) = task_ksp0(idle); smp_prepare_cpus() 110 per_cpu(boot_pc, cpu) = idle->thread.pc; smp_prepare_cpus() 228 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __cpu_up()
|
H A D | tlb.c | 42 asid->asid = per_cpu(current_asid, cpu); for_each_cpu()
|
/linux-4.4.14/lib/ |
H A D | nmi_backtrace.c | 74 * Set up per_cpu seq_buf buffers that the NMIs running on the other nmi_trigger_all_cpu_backtrace() 78 s = &per_cpu(nmi_print_seq, cpu); for_each_cpu() 103 s = &per_cpu(nmi_print_seq, cpu);
|
H A D | random32.c | 185 struct rnd_state *state = &per_cpu(net_rand_state, i); for_each_possible_cpu() 204 struct rnd_state *state = &per_cpu(net_rand_state, i); for_each_possible_cpu()
|
/linux-4.4.14/arch/powerpc/platforms/cell/ |
H A D | cpufreq_spudemand.c | 95 info = &per_cpu(spu_gov_info, cpu); spu_gov_govern() 113 affected_info = &per_cpu(spu_gov_info, i); spu_gov_govern() 130 info = &per_cpu(spu_gov_info, i); spu_gov_govern()
|
H A D | interrupt.c | 172 return per_cpu(cpu_iic, cpu).target_id; iic_get_target_id() 187 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); iic_message_pass() 302 struct iic *iic = &per_cpu(cpu_iic, hw_cpu); init_one_iic()
|
/linux-4.4.14/arch/mips/sgi-ip27/ |
H A D | ip27-timer.c | 75 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); hub_rt_counter_handler() 107 struct clock_event_device *cd = &per_cpu(hub_rt_clockevent, cpu); hub_rt_clock_event_init() 108 unsigned char *name = per_cpu(hub_rt_name, cpu); hub_rt_clock_event_init()
|
/linux-4.4.14/arch/hexagon/kernel/ |
H A D | smp.c | 41 * up the... per_cpu areas. 97 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); handle_ipi() 114 struct ipi_data *ipi = &per_cpu(ipi_data, cpu); for_each_cpu()
|
H A D | time.c | 128 &per_cpu(clock_events, cpu); setup_percpu_clockdev() 143 struct clock_event_device *ce_dev = &per_cpu(clock_events, cpu); ipi_timer()
|
/linux-4.4.14/arch/powerpc/platforms/powernv/ |
H A D | subcore.c | 156 while(per_cpu(split_state, i).step < step) wait_for_sync_step() 187 per_cpu(split_state, cpu).step = SYNC_STEP_UNSPLIT; unsplit_core() 221 split_core_secondary_loop(&per_cpu(split_state, cpu).step); split_core() 253 per_cpu(split_state, smp_processor_id()).step = SYNC_STEP_FINISHED; cpu_do_split() 311 while(per_cpu(split_state, cpu).step < SYNC_STEP_FINISHED) for_each_present_cpu() 346 state = &per_cpu(split_state, cpu); for_each_present_cpu()
|
H A D | rng.c | 94 if (per_cpu(powernv_rng, cpu) == NULL || for_each_possible_cpu() 96 per_cpu(powernv_rng, cpu) = rng; for_each_possible_cpu()
|
/linux-4.4.14/drivers/leds/trigger/ |
H A D | ledtrig-cpu.c | 129 struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); for_each_possible_cpu() 152 struct led_trigger_cpu *trig = &per_cpu(cpu_trig, cpu); for_each_possible_cpu()
|
/linux-4.4.14/drivers/cpuidle/ |
H A D | cpuidle-cps.c | 112 device = &per_cpu(cpuidle_dev, cpu); for_each_possible_cpu() 164 device = &per_cpu(cpuidle_dev, cpu); for_each_possible_cpu()
|
H A D | driver.c | 35 return per_cpu(cpuidle_drivers, cpu); __cpuidle_get_cpu_driver() 55 per_cpu(cpuidle_drivers, cpu) = NULL; __cpuidle_unset_driver() 79 per_cpu(cpuidle_drivers, cpu) = drv; __cpuidle_set_driver()
|
H A D | cpuidle-arm.c | 154 dev = per_cpu(cpuidle_devices, cpu);
|
H A D | cpuidle.c | 434 per_cpu(cpuidle_devices, dev->cpu) = NULL; __cpuidle_unregister_device() 461 per_cpu(cpuidle_devices, dev->cpu) = dev; __cpuidle_register_device() 556 device = &per_cpu(cpuidle_dev, cpu); cpuidle_unregister() 588 device = &per_cpu(cpuidle_dev, cpu); cpuidle_register()
|
H A D | cpuidle-powernv.c | 113 per_cpu(cpuidle_devices, hotcpu); powernv_cpuidle_add_cpu_notifier()
|
H A D | cpuidle-pseries.c | 179 per_cpu(cpuidle_devices, hotcpu); pseries_cpuidle_add_cpu_notifier()
|
/linux-4.4.14/arch/m32r/kernel/ |
H A D | smp.c | 653 * 2003-06-24 hy use per_cpu structure. 669 if (--per_cpu(prof_counter, cpu_id) <= 0) { smp_local_timer_interrupt() 678 per_cpu(prof_counter, cpu_id) smp_local_timer_interrupt() 679 = per_cpu(prof_multiplier, cpu_id); smp_local_timer_interrupt() 680 if (per_cpu(prof_counter, cpu_id) smp_local_timer_interrupt() 681 != per_cpu(prof_old_multiplier, cpu_id)) smp_local_timer_interrupt() 683 per_cpu(prof_old_multiplier, cpu_id) smp_local_timer_interrupt() 684 = per_cpu(prof_counter, cpu_id); smp_local_timer_interrupt()
|
/linux-4.4.14/arch/mips/cavium-octeon/ |
H A D | octeon-irq.c | 262 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); octeon_irq_ciu_enable() 268 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); octeon_irq_ciu_enable() 277 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); octeon_irq_ciu_enable() 365 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); for_each_online_cpu() 367 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); for_each_online_cpu() 369 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); for_each_online_cpu() 398 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); for_each_online_cpu() 400 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); for_each_online_cpu() 402 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); for_each_online_cpu() 438 set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); octeon_irq_ciu_enable_v2() 442 set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); octeon_irq_ciu_enable_v2() 586 &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); for_each_online_cpu() 593 &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); for_each_online_cpu() 616 &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); for_each_online_cpu() 623 &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); for_each_online_cpu() 758 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); for_each_online_cpu() 762 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); for_each_online_cpu() 764 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); for_each_online_cpu() 809 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); for_each_online_cpu() 822 unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); for_each_online_cpu() 1012 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); octeon_irq_ciu_wd_enable() 1015 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); octeon_irq_ciu_wd_enable() 1035 set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); octeon_irq_ciu1_wd_enable_v2()
|
H A D | smp.c | 257 while (per_cpu(cpu_state, cpu) != CPU_DEAD) octeon_cpu_die() 295 per_cpu(cpu_state, cpu) = CPU_DEAD; play_dead()
|
/linux-4.4.14/arch/ia64/kernel/ |
H A D | smpboot.c | 388 per_cpu(cpu_state, cpuid) = CPU_ONLINE; smp_callin() 570 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; smp_prepare_boot_cpu() 579 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) clear_cpu_sibling_map() 580 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); clear_cpu_sibling_map() 584 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE; clear_cpu_sibling_map() 595 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); remove_siblinginfo() 685 if (per_cpu(cpu_state, cpu) == CPU_DEAD) __cpu_die() 724 &per_cpu(cpu_sibling_map, cpu)); for_each_online_cpu() 726 &per_cpu(cpu_sibling_map, i)); for_each_online_cpu() 749 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; __cpu_up() 757 cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu)); __cpu_up()
|
H A D | irq_ia64.c | 142 per_cpu(vector_irq, cpu)[vector] = irq; __bind_irq_vector() 172 per_cpu(vector_irq, cpu)[vector] = -1; __clear_irq_vector() 242 per_cpu(vector_irq, cpu)[vector] = -1; __setup_vector_irq() 248 per_cpu(vector_irq, cpu)[vector] = irq; __setup_vector_irq()
|
/linux-4.4.14/arch/arm/kernel/ |
H A D | smp.c | 339 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); smp_store_cpu_info() 417 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; smp_cpus_done() 574 per_cpu(cpu_completion, cpu) = completion; register_ipi_completion() 580 complete(per_cpu(cpu_completion, cpu)); ipi_complete() 714 if (!per_cpu(l_p_j_ref, cpu)) { cpufreq_callback() 715 per_cpu(l_p_j_ref, cpu) = cpufreq_callback() 716 per_cpu(cpu_data, cpu).loops_per_jiffy; cpufreq_callback() 717 per_cpu(l_p_j_ref_freq, cpu) = freq->old; cpufreq_callback() 729 per_cpu(cpu_data, cpu).loops_per_jiffy = cpufreq_callback() 730 cpufreq_scale(per_cpu(l_p_j_ref, cpu), cpufreq_callback() 731 per_cpu(l_p_j_ref_freq, cpu), cpufreq_callback()
|
H A D | smp_twd.c | 281 if (per_cpu(percpu_setup_called, cpu)) { twd_timer_setup() 287 per_cpu(percpu_setup_called, cpu) = true; twd_timer_setup()
|
H A D | topology.c | 47 return per_cpu(cpu_scale, cpu); arch_scale_cpu_capacity() 52 per_cpu(cpu_scale, cpu) = capacity; set_capacity_scale()
|
/linux-4.4.14/arch/powerpc/platforms/pseries/ |
H A D | dtl.c | 104 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); dtl_start() 125 struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); dtl_stop() 141 return per_cpu(dtl_rings, dtl->cpu).write_index; dtl_current_index() 379 struct dtl *dtl = &per_cpu(cpu_dtl, i); for_each_possible_cpu()
|
H A D | hotplug-cpu.c | 66 return per_cpu(current_state, cpu); get_cpu_current_state() 71 per_cpu(current_state, cpu) = state; set_cpu_current_state() 76 return per_cpu(preferred_offline_state, cpu); get_preferred_offline_state() 81 per_cpu(preferred_offline_state, cpu) = state; set_preferred_offline_state() 86 per_cpu(preferred_offline_state, cpu) = default_offline_state; set_default_offline_state()
|
H A D | hvCall_inst.c | 158 per_cpu(hcall_stats, cpu), for_each_possible_cpu()
|
/linux-4.4.14/arch/ia64/include/asm/ |
H A D | topology.h | 56 #define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
/linux-4.4.14/drivers/acpi/ |
H A D | acpi_processor.c | 402 if (per_cpu(processor_device_array, pr->id) != NULL && acpi_processor_add() 403 per_cpu(processor_device_array, pr->id) != device) { acpi_processor_add() 414 per_cpu(processor_device_array, pr->id) = device; acpi_processor_add() 415 per_cpu(processors, pr->id) = pr; acpi_processor_add() 439 per_cpu(processors, pr->id) = NULL; acpi_processor_add() 473 per_cpu(processor_device_array, pr->id) = NULL; acpi_processor_remove() 474 per_cpu(processors, pr->id) = NULL; acpi_processor_remove()
|
H A D | processor_perflib.c | 91 pr = per_cpu(processors, policy->cpu); acpi_processor_ppc_notifier() 195 pr = per_cpu(processors, cpu); acpi_processor_get_bios_limit() 616 pr = per_cpu(processors, i); for_each_possible_cpu() 635 pr = per_cpu(processors, i); for_each_possible_cpu() 654 pr = per_cpu(processors, i); for_each_possible_cpu() 680 match_pr = per_cpu(processors, j); for_each_possible_cpu() 708 match_pr = per_cpu(processors, j); for_each_possible_cpu() 725 pr = per_cpu(processors, i); for_each_possible_cpu() 756 pr = per_cpu(processors, cpu); acpi_processor_register_performance() 789 pr = per_cpu(processors, cpu); acpi_processor_unregister_performance()
|
H A D | processor_idle.c | 707 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); acpi_idle_play_dead() 789 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); acpi_idle_enter() 799 cx = per_cpu(acpi_cstate[index], dev->cpu); acpi_idle_enter() 806 cx = per_cpu(acpi_cstate[index], dev->cpu); acpi_idle_enter() 829 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); acpi_idle_enter_freeze() 886 per_cpu(acpi_cstate[count], dev->cpu) = cx; acpi_processor_setup_cpuidle_cx() 982 dev = per_cpu(acpi_cpuidle_device, pr->id); acpi_processor_hotplug() 1024 _pr = per_cpu(processors, cpu); for_each_online_cpu() 1027 dev = per_cpu(acpi_cpuidle_device, cpu); for_each_online_cpu() 1037 _pr = per_cpu(processors, cpu); for_each_online_cpu() 1042 dev = per_cpu(acpi_cpuidle_device, cpu); for_each_online_cpu() 1107 per_cpu(acpi_cpuidle_device, pr->id) = dev; acpi_processor_power_init() 1127 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id); acpi_processor_power_exit()
|
H A D | cppc_acpi.c | 218 cpc_ptr = per_cpu(cpc_desc_ptr, i); for_each_possible_cpu() 241 match_cpc_ptr = per_cpu(cpc_desc_ptr, j); for_each_possible_cpu() 272 match_cpc_ptr = per_cpu(cpc_desc_ptr, j); for_each_possible_cpu() 506 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr; acpi_cppc_processor_probe() 544 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); acpi_cppc_processor_exit() 583 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); cppc_get_perf_caps() 646 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); cppc_get_perf_ctrs() 703 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu); cppc_set_perf()
|
H A D | processor_throttling.c | 86 pr = per_cpu(processors, i); for_each_possible_cpu() 107 pr = per_cpu(processors, i); for_each_possible_cpu() 133 match_pr = per_cpu(processors, j); for_each_possible_cpu() 166 match_pr = per_cpu(processors, j); for_each_possible_cpu() 188 pr = per_cpu(processors, i); for_each_possible_cpu() 231 pr = per_cpu(processors, cpu); acpi_processor_throttling_notifier() 1133 match_pr = per_cpu(processors, i); acpi_processor_set_throttling()
|
H A D | processor_thermal.c | 54 per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
H A D | perf_event_intel_rapl.c | 535 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); rapl_cpu_exit() 593 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); rapl_cpu_prepare() 629 per_cpu(rapl_pmu, cpu) = pmu; rapl_cpu_prepare() 630 per_cpu(rapl_pmu_to_free, cpu) = NULL; rapl_cpu_prepare() 637 struct rapl_pmu *pmu = per_cpu(rapl_pmu_to_free, cpu); rapl_cpu_kfree() 641 per_cpu(rapl_pmu_to_free, cpu) = NULL; rapl_cpu_kfree() 646 struct rapl_pmu *pmu = per_cpu(rapl_pmu, cpu); rapl_cpu_dying() 651 per_cpu(rapl_pmu, cpu) = NULL; rapl_cpu_dying() 653 per_cpu(rapl_pmu_to_free, cpu) = pmu; rapl_cpu_dying()
|
H A D | perf_event_intel_ds.c | 251 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; init_debug_store_on_cpu() 263 if (!per_cpu(cpu_hw_events, cpu).ds) fini_debug_store_on_cpu() 273 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; alloc_pebs_buffer() 295 per_cpu(insn_buffer, cpu) = ibuffer; alloc_pebs_buffer() 310 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; release_pebs_buffer() 315 kfree(per_cpu(insn_buffer, cpu)); release_pebs_buffer() 316 per_cpu(insn_buffer, cpu) = NULL; release_pebs_buffer() 324 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; alloc_bts_buffer() 353 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; release_bts_buffer() 371 per_cpu(cpu_hw_events, cpu).ds = ds; alloc_ds_buffer() 378 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; release_ds_buffer() 383 per_cpu(cpu_hw_events, cpu).ds = NULL; release_ds_buffer()
|
H A D | perf_event_amd.c | 368 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); amd_pmu_cpu_prepare() 384 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); amd_pmu_cpu_starting() 398 nb = per_cpu(cpu_hw_events, i).amd_nb; for_each_online_cpu() 420 cpuhw = &per_cpu(cpu_hw_events, cpu); amd_pmu_cpu_dead()
|
H A D | amd.c | 337 per_cpu(cpu_llc_id, cpu) = node_id; amd_get_topology() 363 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; amd_detect_cmp() 376 per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; amd_detect_cmp() 384 id = per_cpu(cpu_llc_id, cpu); amd_get_nb_id() 405 node = per_cpu(cpu_llc_id, cpu); srat_detect_node()
|
H A D | common.c | 377 wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); load_percpu_segment() 996 tss = &per_cpu(cpu_tss, cpu); enable_sep_cpu() 1347 t = &per_cpu(cpu_tss, cpu); cpu_init() 1348 oist = &per_cpu(orig_ist, cpu); cpu_init() 1386 char *estacks = per_cpu(exception_stacks, cpu); cpu_init() 1393 per_cpu(debug_stack_addr, cpu) = (unsigned long)estacks; cpu_init() 1431 struct tss_struct *t = &per_cpu(cpu_tss, cpu); cpu_init()
|
/linux-4.4.14/arch/ia64/sn/kernel/sn2/ |
H A D | sn2_smp.c | 494 stat = &per_cpu(ptcstats, cpu); sn2_ptc_seq_show() 498 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, sn2_ptc_seq_show() 499 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, sn2_ptc_seq_show() 500 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec, sn2_ptc_seq_show() 504 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec); sn2_ptc_seq_show() 522 memset(&per_cpu(ptcstats, cpu), 0, sizeof(struct ptc_stats)); sn2_ptc_proc_write()
|
/linux-4.4.14/drivers/clocksource/ |
H A D | metag_generic.c | 96 struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); arch_timer_setup() 97 char *name = per_cpu(local_clockevent_name, cpu); arch_timer_setup()
|
/linux-4.4.14/block/ |
H A D | blk-iopoll.c | 200 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), blk_iopoll_cpu_notify() 218 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); blk_iopoll_setup()
|
H A D | blk-softirq.c | 92 list_splice_init(&per_cpu(blk_cpu_done, cpu), blk_cpu_notify() 180 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); blk_softirq_init()
|
/linux-4.4.14/kernel/locking/ |
H A D | percpu-rwsem.c | 117 sum += per_cpu(*brw->fast_read_ctr, cpu); for_each_possible_cpu() 118 per_cpu(*brw->fast_read_ctr, cpu) = 0; for_each_possible_cpu()
|
H A D | lockdep_internals.h | 160 __cpu_lockdep_stats = &per_cpu(lockdep_stats, __cpu); \
|
/linux-4.4.14/arch/microblaze/kernel/ |
H A D | setup.c | 189 per_cpu(KM, 0) = 0x1; /* We start in kernel mode */ machine_early_init() 190 per_cpu(CURRENT_SAVE, 0) = (unsigned long)current; machine_early_init()
|
/linux-4.4.14/arch/mips/loongson64/loongson-3/ |
H A D | hpet.c | 182 cd = &per_cpu(hpet_clockevent_device, cpu); hpet_irq_handler() 231 cd = &per_cpu(hpet_clockevent_device, cpu); setup_hpet_timer()
|
H A D | smp.c | 303 per_cpu(cpu_state, cpu) = CPU_ONLINE; loongson3_init_secondary() 378 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; loongson3_prepare_cpus() 433 while (per_cpu(cpu_state, cpu) != CPU_DEAD) loongson3_cpu_die() 587 state_addr = &per_cpu(cpu_state, cpu); play_dead()
|
/linux-4.4.14/arch/x86/platform/uv/ |
H A D | tlb_uv.c | 136 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 151 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 183 return per_cpu(x86_cpu_to_apicid, cpu); uvhub_to_first_apicid() 361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); do_reset() 767 tbcp = &per_cpu(bau_control, tcpu); for_each_present_cpu() 975 tbcp = &per_cpu(bau_control, tcpu); for_each_present_cpu() 1094 bcp = &per_cpu(bau_control, cpu); uv_flush_tlb_others() 1125 flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu); uv_flush_tlb_others() 1254 bcp = &per_cpu(bau_control, smp_processor_id()); uv_bau_message_interrupt() 1384 bcp = &per_cpu(bau_control, cpu); ptc_seq_show() 1497 stat = &per_cpu(ptcstats, cpu); for_each_present_cpu() 1599 bcp = &per_cpu(bau_control, cpu); tunables_write() 1606 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 1761 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 1797 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 1886 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 1890 bcp->statp = &per_cpu(ptcstats, cpu); for_each_present_cpu() 1926 bcp = &per_cpu(bau_control, cpu); for_each_present_cpu() 1992 * Initialize all the per_cpu information for the cpu's on a given socket, 2006 bcp = &per_cpu(bau_control, cpu); scan_sock() 2039 * Summarize the blade and socket topology into the per_cpu structures. 2126 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu); for_each_possible_cpu()
|
/linux-4.4.14/drivers/base/ |
H A D | cpu.c | 79 per_cpu(cpu_sys_devices, logical_cpu) = NULL; unregister_cpu() 375 per_cpu(cpu_sys_devices, num) = &cpu->dev; register_cpu() 385 return per_cpu(cpu_sys_devices, cpu); get_cpu_device() 495 if (register_cpu(&per_cpu(cpu_devices, i), i)) for_each_possible_cpu()
|
H A D | cacheinfo.c | 33 #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) 219 #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) 225 #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
|
/linux-4.4.14/arch/alpha/kernel/ |
H A D | irq.c | 80 seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); arch_show_interrupts()
|
H A D | time.c | 94 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); rtc_timer_interrupt() 119 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); init_rtc_clockevent() 177 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); qemu_timer_interrupt() 187 struct clock_event_device *ce = &per_cpu(cpu_ce, cpu); init_qemu_clockevent()
|
/linux-4.4.14/arch/cris/arch-v10/mm/ |
H A D | fault.c | 48 pgd_t* pgd = (pgd_t*)per_cpu(current_pgd, smp_processor_id()); handle_mmu_bus_fault()
|
H A D | tlb.c | 165 per_cpu(current_pgd, smp_processor_id()) = next->pgd; switch_mm()
|
/linux-4.4.14/kernel/rcu/ |
H A D | tree_trace.c | 125 rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu), print_one_rcu_data() 147 per_cpu(rcu_cpu_has_work, rdp->cpu), print_one_rcu_data() 148 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, print_one_rcu_data() 150 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); print_one_rcu_data()
|
/linux-4.4.14/arch/ia64/include/asm/sn/ |
H A D | pda.h | 66 #define pdacpu(cpu) (&per_cpu(pda_percpu, cpu))
|
/linux-4.4.14/arch/arm/include/asm/ |
H A D | smp_plat.h | 37 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); smp_cpuid_part()
|
/linux-4.4.14/net/rds/ |
H A D | ib_stats.c | 93 src = (uint64_t *)&(per_cpu(rds_ib_stats, cpu)); for_each_online_cpu()
|
H A D | iw_stats.c | 85 src = (uint64_t *)&(per_cpu(rds_iw_stats, cpu)); for_each_online_cpu()
|
H A D | tcp_stats.c | 64 src = (uint64_t *)&(per_cpu(rds_tcp_stats, cpu)); for_each_online_cpu()
|
H A D | page.c | 119 rem = &per_cpu(rds_page_remainders, get_cpu()); rds_page_remainder_alloc() 153 rem = &per_cpu(rds_page_remainders, get_cpu()); rds_page_remainder_alloc() 188 rem = &per_cpu(rds_page_remainders, cpu); rds_page_remainder_cpu_notify()
|
/linux-4.4.14/arch/blackfin/mach-common/ |
H A D | smp.c | 135 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); ipi_timer() 183 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); for_each_possible_cpu() 197 bfin_ipi_data = &per_cpu(bfin_ipi, cpu); for_each_cpu()
|
/linux-4.4.14/init/ |
H A D | calibrate.c | 280 if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { calibrate_delay() 281 lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); calibrate_delay() 305 per_cpu(cpu_loops_per_jiffy, this_cpu) = lpj; calibrate_delay()
|
/linux-4.4.14/drivers/crypto/ |
H A D | padlock-aes.c | 157 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || aes_set_key() 158 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) aes_set_key() 159 per_cpu(paes_last_cword, cpu) = NULL; aes_set_key() 171 if (cword != per_cpu(paes_last_cword, cpu)) padlock_reset_key() 181 per_cpu(paes_last_cword, raw_smp_processor_id()) = cword; padlock_store_cword()
|
/linux-4.4.14/arch/x86/mm/ |
H A D | numa.c | 98 per_cpu(x86_cpu_to_node_map, cpu) = node; numa_set_node() 737 * Called before the per_cpu areas are setup. 781 return per_cpu(x86_cpu_to_node_map, cpu); __cpu_to_node() 787 * per_cpu areas are setup. 796 "early_cpu_to_node(%d): no per_cpu area!\n", cpu); early_cpu_to_node() 800 return per_cpu(x86_cpu_to_node_map, cpu); early_cpu_to_node()
|
/linux-4.4.14/kernel/time/ |
H A D | timer_stats.c | 248 lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id()); timer_stats_update_stats() 351 raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu); for_each_online_cpu() 413 raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu)); init_timer_stats()
|
H A D | tick-common.c | 59 return &per_cpu(tick_cpu_device, cpu); tick_get_device() 307 td = &per_cpu(tick_cpu_device, cpu); tick_check_new_device() 391 struct tick_device *td = &per_cpu(tick_cpu_device, cpu); tick_shutdown()
|
/linux-4.4.14/drivers/thermal/ |
H A D | x86_pkg_temp_thermal.c | 379 &per_cpu(pkg_temp_thermal_threshold_work, cpu), pkg_temp_thermal_platform_thermal_notify() 536 INIT_DELAYED_WORK(&per_cpu(pkg_temp_thermal_threshold_work, cpu), get_core_online() 548 &per_cpu(pkg_temp_thermal_threshold_work, cpu)); put_core_offline() 638 &per_cpu(pkg_temp_thermal_threshold_work, i)); pkg_temp_thermal_exit()
|
/linux-4.4.14/arch/xtensa/include/asm/ |
H A D | mmu_context.h | 34 #define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
|
/linux-4.4.14/arch/xtensa/kernel/ |
H A D | irq.c | 67 seq_printf(p, " %10lu", per_cpu(nmi_count, cpu)); arch_show_interrupts()
|
H A D | time.c | 117 struct ccount_timer *timer = &per_cpu(ccount_timer, cpu); local_timer_setup()
|
/linux-4.4.14/fs/fscache/ |
H A D | main.c | 130 init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); fscache_init()
|
/linux-4.4.14/net/core/ |
H A D | drop_monitor.c | 386 data = &per_cpu(dm_cpu_data, cpu); for_each_possible_cpu() 419 data = &per_cpu(dm_cpu_data, cpu); for_each_possible_cpu()
|
H A D | sysctl_net_core.c | 121 sd = &per_cpu(softnet_data, i); for_each_possible_cpu() 153 sd = &per_cpu(softnet_data, i); for_each_possible_cpu()
|
/linux-4.4.14/arch/arm/xen/ |
H A D | enlighten.c | 94 if (per_cpu(xen_vcpu, cpu) != NULL) xen_percpu_init() 105 per_cpu(xen_vcpu, cpu) = vcpup; xen_percpu_init()
|
/linux-4.4.14/arch/arm64/kernel/ |
H A D | cpuinfo.c | 110 struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); for_each_online_cpu() 255 struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); cpuinfo_store_boot_cpu()
|
/linux-4.4.14/arch/blackfin/kernel/ |
H A D | time-ts.c | 308 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); bfin_coretmr_interrupt() 328 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); bfin_coretmr_clockevent_init()
|
/linux-4.4.14/arch/arm/mach-qcom/ |
H A D | platsmp.c | 277 if (!per_cpu(cold_boot_done, cpu)) { qcom_boot_secondary() 280 per_cpu(cold_boot_done, cpu) = true; qcom_boot_secondary()
|
/linux-4.4.14/arch/arm/mach-bcm/ |
H A D | platsmp-brcmstb.c | 70 return per_cpu(per_cpu_sw_state, cpu); per_cpu_sw_state_rd() 76 per_cpu(per_cpu_sw_state, cpu) = val; per_cpu_sw_state_wr()
|
/linux-4.4.14/drivers/scsi/bnx2i/ |
H A D | bnx2i_init.c | 425 p = &per_cpu(bnx2i_percpu, cpu); bnx2i_percpu_thread_create() 446 p = &per_cpu(bnx2i_percpu, cpu); bnx2i_percpu_thread_destroy() 536 p = &per_cpu(bnx2i_percpu, cpu); for_each_possible_cpu()
|
/linux-4.4.14/drivers/cpuidle/governors/ |
H A D | ladder.c | 136 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); ladder_enable_device()
|
/linux-4.4.14/arch/cris/arch-v32/mm/ |
H A D | init.c | 41 per_cpu(current_pgd, smp_processor_id()) = init_mm.pgd; cris_mmu_init()
|