Home
last modified time | relevance | path

Searched refs:per_cpu_ptr (Results 1 – 157 of 157) sorted by relevance

/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event_amd_uncore.c67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore()
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); in event_to_amd_uncore()
307 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; in amd_uncore_cpu_up_prepare()
320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; in amd_uncore_cpu_up_prepare()
338 that = *per_cpu_ptr(uncores, cpu); in amd_uncore_find_online_sibling()
363 uncore = *per_cpu_ptr(amd_uncore_nb, cpu); in amd_uncore_cpu_starting()
368 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; in amd_uncore_cpu_starting()
375 uncore = *per_cpu_ptr(amd_uncore_l2, cpu); in amd_uncore_cpu_starting()
381 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; in amd_uncore_cpu_starting()
388 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); in uncore_online()
[all …]
Dperf_event_intel_uncore.c40 box = *per_cpu_ptr(pmu->box, cpu); in uncore_pmu_to_box()
46 if (*per_cpu_ptr(pmu->box, cpu)) in uncore_pmu_to_box()
51 *per_cpu_ptr(pmu->box, cpu) = box; in uncore_pmu_to_box()
58 return *per_cpu_ptr(pmu->box, cpu); in uncore_pmu_to_box()
888 if (*per_cpu_ptr(pmu->box, cpu) == box) { in uncore_pci_remove()
889 *per_cpu_ptr(pmu->box, cpu) = NULL; in uncore_pci_remove()
983 box = *per_cpu_ptr(pmu->box, cpu); in uncore_cpu_dying()
984 *per_cpu_ptr(pmu->box, cpu) = NULL; in uncore_cpu_dying()
1004 box = *per_cpu_ptr(pmu->box, cpu); in uncore_cpu_starting()
1012 exist = *per_cpu_ptr(pmu->box, k); in uncore_cpu_starting()
[all …]
/linux-4.1.27/kernel/locking/
Dlglock.c47 lock = per_cpu_ptr(lg->lock, cpu); in lg_local_lock_cpu()
57 lock = per_cpu_ptr(lg->lock, cpu); in lg_local_unlock_cpu()
71 lock = per_cpu_ptr(lg->lock, i); in lg_global_lock()
84 lock = per_cpu_ptr(lg->lock, i); in lg_global_unlock()
Dosq_lock.c28 return per_cpu_ptr(&osq_node, cpu_nr); in decode_cpu()
/linux-4.1.27/kernel/sched/
Dcpuacct.c101 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read()
120 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write()
192 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); in cpuacct_stats_show()
201 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); in cpuacct_stats_show()
247 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_charge()
Dcore.c5859 sibling = *per_cpu_ptr(sdd->sd, i); in build_group_mask()
5894 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
5914 sg->sgc = *per_cpu_ptr(sdd->sgc, i); in build_overlap_sched_groups()
5953 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
5960 *sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
5961 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
6134 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
6135 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
6137 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
6138 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
[all …]
/linux-4.1.27/fs/squashfs/
Ddecompressor_multi_percpu.c40 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create()
53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create()
70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
/linux-4.1.27/kernel/
Dsmpboot.c170 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread()
189 *per_cpu_ptr(ht->store, cpu) = tsk; in __smpboot_create_thread()
222 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_unpark_thread()
241 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_park_thread()
263 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_destroy_threads()
268 *per_cpu_ptr(ht->store, cpu) = NULL; in smpboot_destroy_threads()
Dpadata.c137 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel()
186 next_queue = per_cpu_ptr(pd->pqueue, cpu); in padata_get_next()
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
337 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_do_serial()
380 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
395 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_init_pqueues()
466 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_flush_queues()
476 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_flush_queues()
Dsmp.c449 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many()
466 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many()
Dworkqueue.c1370 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
2986 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu()
2993 flush_work(per_cpu_ptr(works, cpu)); in schedule_on_each_cpu()
3811 per_cpu_ptr(wq->cpu_pwqs, cpu); in alloc_and_link_pwqs()
4105 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
Dmodule.c525 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); in percpu_modcopy()
550 void *start = per_cpu_ptr(mod->percpu, cpu); in is_module_percpu_address()
Dkexec.c1598 buf = (u32 *)per_cpu_ptr(crash_notes, cpu); in crash_save_cpu()
/linux-4.1.27/net/xfrm/
Dxfrm_ipcomp.c49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress()
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress()
216 vfree(*per_cpu_ptr(scratches, i)); in ipcomp_free_scratches()
241 *per_cpu_ptr(scratches, i) = scratch; in ipcomp_alloc_scratches()
269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms()
311 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
/linux-4.1.27/kernel/trace/
Dtrace_functions_graph.c340 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_entry()
425 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_return()
544 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid()
780 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf()
819 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested()
913 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry()
959 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return()
1042 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return()
1102 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment()
1163 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { in print_graph_function_flags()
[all …]
Dtrace_sched_wakeup.c94 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_preempt_disable()
454 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
466 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup_sched_switch()
488 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
545 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup()
578 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup()
592 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup()
Dtrace_kdb.c31 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
88 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
Dtrace_irqsoff.c123 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_dec()
378 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in start_critical_timing()
416 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in stop_critical_timing()
Dtrace.c1021 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); in __update_max_tr()
1022 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); in __update_max_tr()
2427 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset()
2447 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
2548 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { in get_total_entries()
2549 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; in get_total_entries()
2606 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); in print_trace_header()
2676 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start()
3434 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
3439 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
[all …]
Dtrace_mmiotrace.c323 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_rw()
356 data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_mapping()
Dtrace_functions.c145 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_trace_call()
176 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_stack_trace_call()
Dtrace_uprobe.c700 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init()
701 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); in uprobe_buffer_init()
710 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); in uprobe_buffer_init()
740 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, in uprobe_buffer_disable()
754 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); in uprobe_buffer_get()
Dtrace_event_perf.c102 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); in perf_trace_event_reg()
Dblktrace.c255 sequence = per_cpu_ptr(bt->sequence, cpu); in __blk_add_trace()
Dftrace.c211 *per_cpu_ptr(ops->disabled, cpu) = 1; in control_ops_disable_all()
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_irq.c671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); in find_next_online_cpu()
709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task()
710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task()
718 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task()
719 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task()
750 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_park()
763 target = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_park()
764 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu); in comp_task_park()
775 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_stop()
786 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_should_run()
[all …]
/linux-4.1.27/lib/
Dpercpu_ida.c77 remote = per_cpu_ptr(pool->tag_cpu, cpu); in steal_tags()
322 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); in __percpu_ida_init()
350 remote = per_cpu_ptr(pool->tag_cpu, cpu); in percpu_ida_for_each_free()
387 remote = per_cpu_ptr(pool->tag_cpu, cpu); in percpu_ida_free_tags()
Dpercpu_counter.c67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
107 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum()
186 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_hotcpu_callback()
Dpercpu-refcount.c130 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
247 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
/linux-4.1.27/arch/x86/kernel/
Dkgdb.c224 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break()
253 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot()
265 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot()
280 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot()
320 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break()
413 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug()
680 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
Dcrash.c458 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); in prepare_elf64_headers()
/linux-4.1.27/include/net/
Dgro_cells.h78 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_init()
94 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_destroy()
/linux-4.1.27/net/core/
Dflow.c59 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; in flow_cache_new_hashrnd()
323 fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_percpu_empty()
395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu_prepare()
418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu()
480 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); in flow_cache_init()
503 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); in flow_cache_fini()
Dgen_stats.c110 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu()
228 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); in __gnet_stats_copy_queue_cpu()
Dneighbour.c1838 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info()
2687 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_start()
2701 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_next()
Dsock.c2656 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; in sock_prot_inuse_get()
Ddev.c6536 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); in netdev_refcnt_read()
/linux-4.1.27/kernel/rcu/
Dsrcu.c154 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); in srcu_readers_seq_idx()
171 t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); in srcu_readers_active_idx()
268 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); in srcu_readers_active()
269 sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); in srcu_readers_active()
Dtree.c1208 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_other_cpu_stall()
1255 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_cpu_stall()
2511 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dying_idle_cpu()
2532 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dead_cpu()
2768 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) in force_qs_rnp()
2996 rdp = per_cpu_ptr(rsp->rda, cpu); in __call_rcu()
3635 rdp = per_cpu_ptr(rsp->rda, cpu); in _rcu_barrier()
3727 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_boot_init_percpu_data()
3753 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_init_percpu_data()
3804 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_cpu_notify()
[all …]
Dtree_plugin.h1315 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_prepare_kthreads()
1745 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in print_cpu_stall_info()
1916 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_nocb_cpu_needs_barrier()
2393 init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu)); in rcu_init_nohz()
2417 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); in rcu_spawn_one_nocb_kthread()
2509 rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_organize_nocb_kthreads()
2960 rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_sys_is_idle()
Dtree_trace.c67 return per_cpu_ptr(rsp->rda, *pos); in r_start()
Drcutorture.c543 c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx]; in srcu_torture_stats()
544 c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]; in srcu_torture_stats()
/linux-4.1.27/arch/x86/kernel/acpi/
Dcstate.c130 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); in acpi_processor_ffh_cstate_probe()
160 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); in acpi_processor_ffh_cstate_enter()
/linux-4.1.27/drivers/xen/
Dxen-acpi-processor.c465 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) in free_acpi_perf_data()
530 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, in xen_acpi_processor_init()
545 perf = per_cpu_ptr(acpi_perf_data, i); in xen_acpi_processor_init()
565 perf = per_cpu_ptr(acpi_perf_data, i); in xen_acpi_processor_init()
584 perf = per_cpu_ptr(acpi_perf_data, i); in xen_acpi_processor_exit()
/linux-4.1.27/include/linux/
Dpercpu-defs.h220 #define per_cpu_ptr(ptr, cpu) \ macro
250 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) macro
251 #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
256 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
Dgenhd.h311 (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
318 res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
327 memset(per_cpu_ptr(part->dkstats, i), value, in part_stat_set_all()
Dblk-mq.h250 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
Dvmstat.h155 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; in zone_page_state_snapshot()
Dnetdevice.h2030 stat = per_cpu_ptr(pcpu_stats, __cpu); \
/linux-4.1.27/arch/arc/kernel/
Dsmp.c211 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); in ipi_send_msg_one()
337 int *dev = per_cpu_ptr(&ipi_dev, cpu); in smp_ipi_irq_setup()
/linux-4.1.27/crypto/
Dmcryptd.c56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); in mcryptd_arm_flusher()
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_init_queue()
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_fini_queue()
128 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); in mcryptd_opportunistic_flush()
210 flist = per_cpu_ptr(mcryptd_flist, cpu); in mcryptd_flusher()
695 flist = per_cpu_ptr(mcryptd_flist, cpu); in mcryptd_init()
Dcryptd.c97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_init_queue()
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_fini_queue()
/linux-4.1.27/arch/arm/kernel/
Dperf_event_cpu.c104 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); in cpu_pmu_free_irq()
159 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); in cpu_pmu_request_irq()
211 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); in cpu_pmu_init()
/linux-4.1.27/mm/
Dzswap.c117 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu()); in zswap_comp_op()
353 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm; in __zswap_cpu_notifier()
358 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; in __zswap_cpu_notifier()
365 tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu); in __zswap_cpu_notifier()
368 *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; in __zswap_cpu_notifier()
Dvmstat.c180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
211 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in set_pgdat_percpu_threshold()
535 p = per_cpu_ptr(zone->pageset, cpu); in cpu_vm_stats_fold()
1221 pageset = per_cpu_ptr(zone->pageset, i); in zoneinfo_show_print()
1405 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu); in need_update()
1455 INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu), in start_shepherd_timer()
Dkmemleak.c877 log->ptr = per_cpu_ptr(ptr, cpu); in early_alloc_percpu()
930 create_object((unsigned long)per_cpu_ptr(ptr, cpu), in kmemleak_alloc_percpu()
990 delete_object_full((unsigned long)per_cpu_ptr(ptr, in kmemleak_free_percpu()
Dpage_alloc.c1426 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
1501 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
1506 pcp = per_cpu_ptr(z->pageset, cpu); in drain_all_pages()
3289 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
3326 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
4399 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6039 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
6566 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
6581 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
Dslub.c1811 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2105 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2125 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
4275 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4523 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show()
4535 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show()
4813 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
4836 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
Dpercpu.c1289 void *start = per_cpu_ptr(base, cpu); in is_kernel_percpu_address()
1340 void *start = per_cpu_ptr(base, cpu); in per_cpu_ptr_to_phys()
Dslab.c1118 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
2015 init_arraycache(per_cpu_ptr(cpu_cache, cpu), in alloc_kmem_cache_cpus()
3713 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache()
Dswapfile.c2456 cluster = per_cpu_ptr(p->percpu_cluster, i); in SYSCALL_DEFINE2()
Dmemcontrol.c5788 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
/linux-4.1.27/drivers/net/team/
Dteam_mode_loadbalance.c473 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_stats_refresh()
487 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_stats_refresh()
488 stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i); in lb_stats_refresh()
597 team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_init()
Dteam.c1788 p = per_cpu_ptr(team->pcpu_stats, i); in team_get_stats64()
/linux-4.1.27/arch/x86/lib/
Dmsr-smp.c13 reg = per_cpu_ptr(rv->msrs, this_cpu); in __rdmsr_on_cpu()
27 reg = per_cpu_ptr(rv->msrs, this_cpu); in __wrmsr_on_cpu()
/linux-4.1.27/kernel/irq/
Dirqdesc.c93 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; in desc_set_defaults()
607 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; in kstat_irqs_cpu()
627 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); in kstat_irqs()
/linux-4.1.27/block/
Dblk-mq.h79 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
Dblk-throttle.c287 tg_stats_init(per_cpu_ptr(stats_cpu, cpu)); in tg_stats_alloc_fn()
505 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); in throtl_pd_reset_stats()
1299 struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); in tg_prfill_cpu_rwstat()
Dblk-mq.c1749 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
/linux-4.1.27/drivers/cpufreq/
Dpcc-cpufreq.c152 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_get_freq()
208 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_cpufreq_target()
258 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_get_offset()
Dacpi-cpufreq.c125 struct msr *reg = per_cpu_ptr(msrs, cpu); in boost_set_msrs()
516 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) in free_acpi_perf_data()
579 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, in acpi_cpufreq_early_init()
675 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); in acpi_cpufreq_cpu_init()
/linux-4.1.27/drivers/thermal/
Dintel_powerclamp.c534 per_cpu_ptr(powerclamp_thread, cpu); in start_power_clamp()
568 thread = *per_cpu_ptr(powerclamp_thread, i); in end_power_clamp()
580 per_cpu_ptr(powerclamp_thread, cpu); in powerclamp_cpu_callback()
/linux-4.1.27/net/netfilter/
Dnf_synproxy_core.c256 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start()
271 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
Dnf_conntrack_standalone.c282 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
297 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dnf_conntrack_core.c253 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_dying_list()
268 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_unconfirmed_list()
282 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_del_from_dying_or_unconfirmed_list()
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu); in nf_conntrack_tmpl_insert()
1375 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in get_next_corpse()
1750 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in nf_conntrack_init_net()
Dnf_conntrack_ecache.c92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
Dnf_conntrack_helper.c419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in __nf_conntrack_helper_unregister()
Dnf_conntrack_netlink.c1215 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list()
1951 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_ct_stat_cpu_dump()
3100 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_exp_stat_cpu_dump()
Dnf_tables_api.c896 cpu_stats = per_cpu_ptr(stats, cpu); in nft_dump_stats()
/linux-4.1.27/drivers/net/
Dloopback.c112 lb_stats = per_cpu_ptr(dev->lstats, i); in loopback_get_stats64()
Dnlmon.c90 nl_stats = per_cpu_ptr(dev->lstats, i); in nlmon_get_stats64()
Ddummy.c67 dstats = per_cpu_ptr(dev->dstats, i); in dummy_get_stats64()
Dveth.c148 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); in veth_stats_one()
Dvirtio_net.c1059 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); in virtnet_stats()
1808 virtnet_stats = per_cpu_ptr(vi->stats, i); in virtnet_probe()
Dxen-netfront.c1026 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); in xennet_get_stats64()
1027 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); in xennet_get_stats64()
Dmacvlan.c841 p = per_cpu_ptr(vlan->pcpu_stats, i); in macvlan_dev_get_stats64()
/linux-4.1.27/arch/s390/kernel/
Dmachine_kexec.c46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); in add_elf_notes()
/linux-4.1.27/drivers/dma/
Ddmaengine.c99 count += per_cpu_ptr(chan->local, i)->memcpy_count; in memcpy_count_show()
121 count += per_cpu_ptr(chan->local, i)->bytes_transferred; in bytes_transferred_show()
443 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; in dma_channel_rebalance()
460 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; in dma_channel_rebalance()
/linux-4.1.27/drivers/idle/
Dintel_idle.c731 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); in cpu_hotplug_notify()
911 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); in intel_idle_cpuidle_devices_uninit()
1023 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); in intel_idle_cpu_init()
/linux-4.1.27/arch/ia64/kernel/
Dcrash.c76 buf = (u64 *) per_cpu_ptr(crash_notes, cpu); in crash_save_this_cpu()
/linux-4.1.27/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c341 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
356 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dip_tables.c336 stackptr = per_cpu_ptr(private->stackptr, cpu); in ipt_do_table()
/linux-4.1.27/arch/x86/crypto/sha-mb/
Dsha1_mb.c881 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_init()
910 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_init()
925 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_fini()
/linux-4.1.27/net/ipv4/
Dip_tunnel_core.c170 per_cpu_ptr(dev->tstats, i); in ip_tunnel_get_stats64()
Dicmp.c1143 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); in icmp_sk_exit()
1164 *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk; in icmp_sk_init()
1205 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); in icmp_sk_init()
Daf_inet.c1461 res += *(((unsigned long *) per_cpu_ptr(mib, i)) + offt); in snmp_fold_field()
1479 bhptr = per_cpu_ptr(mib, cpu); in snmp_fold_field64()
1537 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i); in ipv4_mib_init_net()
Dtcp_ipv4.c2395 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); in tcp_sk_exit()
2414 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; in tcp_sk_init()
Dfib_semantics.c196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1); in rt_fibinfo_free_cpus()
Dip_tunnel.c99 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0); in ip_tunnel_dst_reset_all()
Droute.c344 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); in rt_acct_proc_show()
690 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i); in update_or_create_fnhe()
Dfib_trie.c2159 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu); in trie_show_usage()
/linux-4.1.27/net/caif/
Dcffrml.c195 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i); in cffrml_refcnt_read()
Dcaif_dev.c87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); in caifd_refcnt_read()
/linux-4.1.27/arch/arm/xen/
Denlighten.c94 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); in xen_percpu_init()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
Dixgbe_fcoe.c205 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); in ixgbe_fcoe_ddp_setup()
622 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_fcoe_dma_pool_free()
643 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_fcoe_dma_pool_alloc()
Dixgbe_main.c6090 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_update_stats()
/linux-4.1.27/net/netfilter/ipvs/
Dip_vs_est.c67 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); in ip_vs_read_cpu_stats()
Dip_vs_ctl.c897 ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i); in ip_vs_new_dest()
1224 ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i); in ip_vs_add_service()
2141 struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i); in ip_vs_stats_percpu_show()
3885 ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i); in ip_vs_control_net_init()
/linux-4.1.27/drivers/acpi/
Dprocessor_perflib.c631 if (!performance || !per_cpu_ptr(performance, i)) { in acpi_processor_preregister_performance()
643 pr->performance = per_cpu_ptr(performance, i); in acpi_processor_preregister_performance()
/linux-4.1.27/drivers/scsi/libfc/
Dfc_fcp.c162 per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++; in fc_fcp_pkt_alloc()
270 per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++; in fc_fcp_send_abort()
429 per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++; in fc_fcp_frame_alloc()
507 stats = per_cpu_ptr(lport->stats, get_cpu()); in fc_fcp_recv_data()
1862 stats = per_cpu_ptr(lport->stats, get_cpu()); in fc_queuecommand()
Dfc_exch.c818 pool = per_cpu_ptr(mp->pool, cpu); in fc_exch_em_alloc()
913 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); in fc_exch_find()
1917 per_cpu_ptr(ema->mp->pool, cpu), in fc_exch_mgr_reset()
2415 pool = per_cpu_ptr(mp->pool, cpu); in fc_exch_mgr_alloc()
Dfc_lport.c319 stats = per_cpu_ptr(lport->stats, cpu); in fc_get_host_stats()
/linux-4.1.27/Documentation/
Dthis_cpu_ops.txt102 y = per_cpu_ptr(&x, cpu);
281 To access per-cpu data structure remotely, typically the per_cpu_ptr()
287 struct data *p = per_cpu_ptr(&datap, cpu);
/linux-4.1.27/net/batman-adv/
Dmain.h339 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); in batadv_sum_counter()
/linux-4.1.27/fs/
Dseq_file.c930 hlist_for_each(node, per_cpu_ptr(head, *cpu)) { in seq_hlist_start_percpu()
961 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu()
Dnamespace.c184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; in mnt_get_count()
309 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; in mnt_get_writers()
Dlocks.c2705 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); in filelock_init()
Dbuffer.c1434 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); in has_bh_in_lru()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
Di40e_fcoe.c487 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in i40e_fcoe_dma_pool_free()
513 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in i40e_fcoe_dma_pool_create()
859 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); in i40e_fcoe_ddp_setup()
/linux-4.1.27/drivers/scsi/fcoe/
Dfcoe.c1553 per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; in fcoe_rcv()
1705 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_xmit()
1770 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_filter_frames()
1818 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_recv_frame()
2063 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_device_notification()
Dfcoe_ctlr.c834 stats = per_cpu_ptr(fip->lp->stats, get_cpu()); in fcoe_ctlr_age_fcfs()
1291 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_ctlr_recv_els()
1403 per_cpu_ptr(lport->stats, in fcoe_ctlr_recv_clr_vlink()
1433 per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++; in fcoe_ctlr_recv_clr_vlink()
Dfcoe_transport.c173 stats = per_cpu_ptr(lport->stats, cpu); in __fcoe_get_lesb()
/linux-4.1.27/drivers/base/
Dcpu.c154 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); in show_crash_notes()
/linux-4.1.27/net/bridge/
Dbr_device.c158 = per_cpu_ptr(br->stats, cpu); in br_get_stats64()
/linux-4.1.27/net/rds/
Dib_recv.c110 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
141 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
/linux-4.1.27/net/openvswitch/
Dvport.c316 percpu_stats = per_cpu_ptr(vport->percpu_stats, i); in ovs_vport_get_stats()
Ddatapath.c649 percpu_stats = per_cpu_ptr(dp->stats_percpu, i); in get_dp_stats()
/linux-4.1.27/kernel/events/
Dhw_breakpoint.c66 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info()
Dcore.c400 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
626 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
3354 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
7158 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context()
7229 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
7333 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
8310 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
8311 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
8922 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
/linux-4.1.27/drivers/irqchip/
Dirq-gic.c923 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; in gic_init_bases()
924 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; in gic_init_bases()
/linux-4.1.27/drivers/bus/
Dmips_cdmm.c302 bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu); in mips_cdmm_get_bus()
/linux-4.1.27/drivers/clocksource/
Dexynos_mct.c554 per_cpu_ptr(&percpu_mct_tick, cpu); in exynos4_timer_resources()
/linux-4.1.27/net/sched/
Dcls_u32.c993 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); in u32_dump()
1023 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); in u32_dump()
/linux-4.1.27/arch/arm/kvm/
Darm.c1031 cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); in init_hyp_mode()
/linux-4.1.27/drivers/scsi/bnx2fc/
Dbnx2fc_fcoe.c381 stats = per_cpu_ptr(lport->stats, get_cpu()); in bnx2fc_xmit()
578 stats = per_cpu_ptr(lport->stats, smp_processor_id()); in bnx2fc_recv_frame()
909 per_cpu_ptr(lport->stats, in bnx2fc_indicate_netevent()
Dbnx2fc_io.c2022 stats = per_cpu_ptr(lport->stats, get_cpu()); in bnx2fc_post_io_req()
/linux-4.1.27/drivers/net/ipvlan/
Dipvlan_main.c261 pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); in ipvlan_get_stats64()
/linux-4.1.27/net/8021q/
Dvlan_dev.c666 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); in vlan_dev_get_stats64()
/linux-4.1.27/drivers/net/ethernet/marvell/
Dmvpp2.c4217 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_reserved_desc_num_proc()
4647 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_init()
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_deinit()
4732 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_clean()
5704 cpu_stats = per_cpu_ptr(port->stats, cpu); in mvpp2_get_stats64()
5960 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_port_init()
Dmvneta.c538 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
/linux-4.1.27/net/ipv6/
Daf_inet6.c735 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i); in ipv6_init_mibs()
Dip6_tunnel.c103 per_cpu_ptr(dev->tstats, i); in ip6_get_stats()
Daddrconf.c304 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i); in snmp6_alloc_dev()
/linux-4.1.27/drivers/md/
Ddm-stats.c196 last = per_cpu_ptr(stats->last, cpu); in dm_stats_init()
Draid5.c1922 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2097 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
6307 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_free_percpu()
6331 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_notify()
6344 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_notify()
6372 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_alloc_percpu()
/linux-4.1.27/kernel/time/
Dtimer.c1653 base = per_cpu_ptr(&__tvec_bases, cpu); in init_timer_cpus()
/linux-4.1.27/drivers/edac/
Damd64_edac.c2499 struct msr *reg = per_cpu_ptr(msrs, cpu); in nb_mce_bank_enabled_on_node()
2532 struct msr *reg = per_cpu_ptr(msrs, cpu); in toggle_ecc_err_reporting()
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_init.c1093 int_counter += *per_cpu_ptr(dd->int_counter, cpu); in qib_int_counter()
Dqib_mad.c1646 p = per_cpu_ptr(ibp->pmastats, cpu); in qib_snapshot_pmacounters()
/linux-4.1.27/fs/gfs2/
Dglock.c1792 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); in gfs2_sbstats_seq_show()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/
Dsge.c984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); in t1_sge_get_port_stats()
/linux-4.1.27/net/ipv6/netfilter/
Dip6_tables.c362 stackptr = per_cpu_ptr(private->stackptr, cpu); in ip6t_do_table()
/linux-4.1.27/arch/x86/kvm/
Dx86.c206 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in shared_msr_update()
241 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in kvm_set_shared_msr()
263 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in drop_user_return_notifiers()
/linux-4.1.27/fs/nfs/
Dsuper.c860 stats = per_cpu_ptr(nfss->io_stats, cpu); in nfs_show_stats()
/linux-4.1.27/drivers/scsi/
Dhpsa.c2017 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in lockup_detected()
6639 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in set_lockup_detected_for_all_cpus()
/linux-4.1.27/net/packet/
Daf_packet.c1216 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); in packet_read_pending()
/linux-4.1.27/fs/ext4/
Dmballoc.c2640 lg = per_cpu_ptr(sbi->s_locality_groups, i); in ext4_mb_init()