Home
last modified time | relevance | path

Searched refs:per_cpu_ptr (Results 1 – 174 of 174) sorted by relevance

/linux-4.4.14/kernel/locking/
Dlglock.c47 lock = per_cpu_ptr(lg->lock, cpu); in lg_local_lock_cpu()
57 lock = per_cpu_ptr(lg->lock, cpu); in lg_local_unlock_cpu()
73 arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); in lg_double_lock()
74 arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); in lg_double_lock()
80 arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); in lg_double_unlock()
81 arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); in lg_double_unlock()
93 lock = per_cpu_ptr(lg->lock, i); in lg_global_lock()
106 lock = per_cpu_ptr(lg->lock, i); in lg_global_unlock()
Dosq_lock.c28 return per_cpu_ptr(&osq_node, cpu_nr); in decode_cpu()
Dqspinlock.c110 return per_cpu_ptr(&mcs_nodes[idx], cpu); in decode_tail()
/linux-4.4.14/fs/xfs/
Dxfs_stats.c28 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val()
81 xs_xstrat_bytes += per_cpu_ptr(stats, i)->xs_xstrat_bytes; in xfs_stats_format()
82 xs_write_bytes += per_cpu_ptr(stats, i)->xs_write_bytes; in xfs_stats_format()
83 xs_read_bytes += per_cpu_ptr(stats, i)->xs_read_bytes; in xfs_stats_format()
107 vn_active = per_cpu_ptr(stats, c)->vn_active; in xfs_stats_clearall()
108 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); in xfs_stats_clearall()
109 per_cpu_ptr(stats, c)->vn_active = vn_active; in xfs_stats_clearall()
Dxfs_stats.h222 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v++; \
223 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v++; \
228 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v--; \
229 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v--; \
234 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v += (inc); \
235 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v += (inc); \
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event_amd_uncore.c67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore()
69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); in event_to_amd_uncore()
307 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; in amd_uncore_cpu_up_prepare()
320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; in amd_uncore_cpu_up_prepare()
338 that = *per_cpu_ptr(uncores, cpu); in amd_uncore_find_online_sibling()
363 uncore = *per_cpu_ptr(amd_uncore_nb, cpu); in amd_uncore_cpu_starting()
368 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; in amd_uncore_cpu_starting()
375 uncore = *per_cpu_ptr(amd_uncore_l2, cpu); in amd_uncore_cpu_starting()
381 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; in amd_uncore_cpu_starting()
388 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); in uncore_online()
[all …]
Dperf_event_intel_uncore.c94 box = *per_cpu_ptr(pmu->box, cpu); in uncore_pmu_to_box()
100 if (*per_cpu_ptr(pmu->box, cpu)) in uncore_pmu_to_box()
105 *per_cpu_ptr(pmu->box, cpu) = box; in uncore_pmu_to_box()
112 return *per_cpu_ptr(pmu->box, cpu); in uncore_pmu_to_box()
942 if (*per_cpu_ptr(pmu->box, cpu) == box) { in uncore_pci_remove()
943 *per_cpu_ptr(pmu->box, cpu) = NULL; in uncore_pci_remove()
1043 box = *per_cpu_ptr(pmu->box, cpu); in uncore_cpu_dying()
1044 *per_cpu_ptr(pmu->box, cpu) = NULL; in uncore_cpu_dying()
1064 box = *per_cpu_ptr(pmu->box, cpu); in uncore_cpu_starting()
1072 exist = *per_cpu_ptr(pmu->box, k); in uncore_cpu_starting()
[all …]
/linux-4.4.14/kernel/sched/
Dcpuacct.c101 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read()
120 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write()
192 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); in cpuacct_stats_show()
201 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); in cpuacct_stats_show()
247 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_charge()
Dcore.c6116 sibling = *per_cpu_ptr(sdd->sd, i); in build_group_mask()
6151 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups()
6171 sg->sgc = *per_cpu_ptr(sdd->sgc, i); in build_overlap_sched_groups()
6210 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group()
6217 *sg = *per_cpu_ptr(sdd->sg, cpu); in get_group()
6218 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group()
6391 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations()
6392 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations()
6394 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations()
6395 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations()
[all …]
/linux-4.4.14/fs/squashfs/
Ddecompressor_multi_percpu.c40 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create()
53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create()
70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
/linux-4.4.14/net/xfrm/
Dxfrm_ipcomp.c49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress()
50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress()
216 vfree(*per_cpu_ptr(scratches, i)); in ipcomp_free_scratches()
241 *per_cpu_ptr(scratches, i) = scratch; in ipcomp_alloc_scratches()
269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms()
311 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
/linux-4.4.14/kernel/
Dsmpboot.c171 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread()
190 *per_cpu_ptr(ht->store, cpu) = tsk; in __smpboot_create_thread()
223 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_unpark_thread()
242 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_park_thread()
264 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_destroy_threads()
269 *per_cpu_ptr(ht->store, cpu) = NULL; in smpboot_destroy_threads()
Dpadata.c137 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel()
186 next_queue = per_cpu_ptr(pd->pqueue, cpu); in padata_get_next()
260 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
337 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_do_serial()
380 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
395 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_init_pqueues()
466 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_flush_queues()
476 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_flush_queues()
Dstop_machine.c222 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); in cpu_stop_queue_two_works()
223 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); in cpu_stop_queue_two_works()
Dsmp.c449 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many()
466 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many()
Dworkqueue.c1365 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work()
2979 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu()
2986 flush_work(per_cpu_ptr(works, cpu)); in schedule_on_each_cpu()
3791 per_cpu_ptr(wq->cpu_pwqs, cpu); in alloc_and_link_pwqs()
4085 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
Dkexec_core.c993 buf = (u32 *)per_cpu_ptr(crash_notes, cpu); in crash_save_cpu()
Dmodule.c682 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); in percpu_modcopy()
707 void *start = per_cpu_ptr(mod->percpu, cpu); in is_module_percpu_address()
/linux-4.4.14/kernel/trace/
Dtrace_functions_graph.c342 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_entry()
424 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_return()
543 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid()
781 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf()
821 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested()
916 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry()
962 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return()
1046 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return()
1107 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment()
1168 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { in print_graph_function_flags()
[all …]
Dtrace_sched_wakeup.c91 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_preempt_disable()
458 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
470 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup_sched_switch()
492 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch()
549 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup()
582 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup()
596 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup()
Dtrace_kdb.c33 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
90 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
Dtrace_irqsoff.c120 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_dec()
369 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in start_critical_timing()
407 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in stop_critical_timing()
Dtrace.c1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); in __update_max_tr()
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); in __update_max_tr()
2424 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset()
2444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset()
2545 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { in get_total_entries()
2546 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; in get_total_entries()
2603 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); in print_trace_header()
2674 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start()
3442 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
3447 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write()
[all …]
Dtrace_mmiotrace.c323 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_rw()
356 data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_mapping()
Dtrace_functions.c145 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_trace_call()
176 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_stack_trace_call()
Dtrace_uprobe.c715 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init()
716 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); in uprobe_buffer_init()
725 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); in uprobe_buffer_init()
755 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, in uprobe_buffer_disable()
769 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); in uprobe_buffer_get()
Dtrace_event_perf.c102 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); in perf_trace_event_reg()
Dblktrace.c255 sequence = per_cpu_ptr(bt->sequence, cpu); in __blk_add_trace()
Dtrace_events.c598 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; in __ftrace_clear_event_pids()
Dftrace.c211 *per_cpu_ptr(ops->disabled, cpu) = 1; in control_ops_disable_all()
/linux-4.4.14/drivers/staging/rdma/ehca/
Dehca_irq.c671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); in find_next_online_cpu()
709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task()
710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task()
718 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task()
719 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task()
750 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_park()
763 target = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_park()
764 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu); in comp_task_park()
775 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_stop()
786 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_should_run()
[all …]
/linux-4.4.14/lib/
Dpercpu_ida.c77 remote = per_cpu_ptr(pool->tag_cpu, cpu); in steal_tags()
322 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); in __percpu_ida_init()
350 remote = per_cpu_ptr(pool->tag_cpu, cpu); in percpu_ida_for_each_free()
387 remote = per_cpu_ptr(pool->tag_cpu, cpu); in percpu_ida_free_tags()
Dpercpu_counter.c67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set()
107 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum()
186 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_hotcpu_callback()
Dpercpu-refcount.c130 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu()
247 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
Drandom32.c246 struct rnd_state *state = per_cpu_ptr(pcpu_state, i); in prandom_seed_full_state()
/linux-4.4.14/include/net/
Dgro_cells.h69 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_init()
85 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_destroy()
/linux-4.4.14/arch/x86/kernel/
Dkgdb.c224 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break()
253 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot()
265 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot()
280 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot()
320 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break()
413 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug()
685 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
Dcrash.c456 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); in prepare_elf64_headers()
/linux-4.4.14/net/core/
Dflow.c59 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; in flow_cache_new_hashrnd()
323 fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_percpu_empty()
395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu_prepare()
418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu()
480 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); in flow_cache_init()
503 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); in flow_cache_fini()
Dgen_stats.c110 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu()
228 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); in __gnet_stats_copy_queue_cpu()
Ddst.c409 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen); in metadata_dst_alloc_percpu()
Dneighbour.c1845 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info()
2738 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_start()
2752 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_next()
Dsock.c2723 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; in sock_prot_inuse_get()
Ddev.c6815 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); in netdev_refcnt_read()
/linux-4.4.14/kernel/rcu/
Dsrcu.c154 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); in srcu_readers_seq_idx()
171 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); in srcu_readers_active_idx()
269 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); in srcu_readers_active()
270 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); in srcu_readers_active()
Dtree.c1274 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_other_cpu_stall()
1321 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_cpu_stall()
2606 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dying_idle_cpu()
2630 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dead_cpu()
2843 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) in force_qs_rnp()
3070 rdp = per_cpu_ptr(rsp->rda, cpu); in __call_rcu()
3640 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); in exp_funnel_lock()
3686 rdp = per_cpu_ptr(rsp->rda, cpu); in sync_sched_exp_online_cleanup()
3717 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in sync_rcu_exp_select_cpus()
3806 rdp = per_cpu_ptr(rsp->rda, cpu); in synchronize_sched_expedited_wait()
[all …]
Dtree_plugin.h1305 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_prepare_kthreads()
1733 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in print_cpu_stall_info()
1881 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_nocb_cpu_needs_barrier()
2358 init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu)); in rcu_init_nohz()
2382 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); in rcu_spawn_one_nocb_kthread()
2474 rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_organize_nocb_kthreads()
2925 rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_sys_is_idle()
Dtree_trace.c67 return per_cpu_ptr(rsp->rda, *pos); in r_start()
Drcutorture.c546 c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx]; in srcu_torture_stats()
547 c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx]; in srcu_torture_stats()
/linux-4.4.14/arch/x86/kernel/acpi/
Dcstate.c130 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); in acpi_processor_ffh_cstate_probe()
160 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); in acpi_processor_ffh_cstate_enter()
/linux-4.4.14/include/linux/
Dpercpu-defs.h220 #define per_cpu_ptr(ptr, cpu) \ macro
250 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) macro
251 #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0)
256 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
Dgenhd.h324 (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd))
331 res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
340 memset(per_cpu_ptr(part->dkstats, i), value, in part_stat_set_all()
Dvmstat.h155 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; in zone_page_state_snapshot()
Dblk-mq.h260 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
Dnetdevice.h2079 stat = per_cpu_ptr(pcpu_stats, __cpu); \
/linux-4.4.14/crypto/
Dmcryptd.c56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); in mcryptd_arm_flusher()
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_init_queue()
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_fini_queue()
128 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); in mcryptd_opportunistic_flush()
210 flist = per_cpu_ptr(mcryptd_flist, cpu); in mcryptd_flusher()
695 flist = per_cpu_ptr(mcryptd_flist, cpu); in mcryptd_init()
Dcryptd.c97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_init_queue()
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_fini_queue()
/linux-4.4.14/arch/arc/kernel/
Dsmp.c219 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); in ipi_send_msg_one()
357 int *dev = per_cpu_ptr(&ipi_dev, cpu); in smp_ipi_irq_setup()
/linux-4.4.14/drivers/nvdimm/
Dregion_devs.c600 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_acquire_lane()
601 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_acquire_lane()
617 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_release_lane()
618 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_release_lane()
688 ndl = per_cpu_ptr(nd_region->lane, i); in nd_region_create()
/linux-4.4.14/drivers/cpufreq/
Dacpi-cpufreq.c80 return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu); in to_perf_data()
128 struct msr *reg = per_cpu_ptr(msrs, cpu); in boost_set_msrs()
527 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) in free_acpi_perf_data()
590 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, in acpi_cpufreq_early_init()
686 perf = per_cpu_ptr(acpi_perf_data, cpu); in acpi_cpufreq_cpu_init()
Dpcc-cpufreq.c152 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_get_freq()
208 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_cpufreq_target()
258 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_get_offset()
/linux-4.4.14/drivers/net/team/
Dteam_mode_loadbalance.c473 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_stats_refresh()
487 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_stats_refresh()
488 stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i); in lb_stats_refresh()
597 team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_init()
Dteam.c1793 p = per_cpu_ptr(team->pcpu_stats, i); in team_get_stats64()
/linux-4.4.14/arch/x86/lib/
Dmsr-smp.c13 reg = per_cpu_ptr(rv->msrs, this_cpu); in __rdmsr_on_cpu()
27 reg = per_cpu_ptr(rv->msrs, this_cpu); in __wrmsr_on_cpu()
/linux-4.4.14/kernel/irq/
Dirqdesc.c92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; in desc_set_defaults()
606 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; in kstat_irqs_cpu()
626 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); in kstat_irqs()
/linux-4.4.14/block/
Dblk-mq.h77 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
Dblk-mq.c1777 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
/linux-4.4.14/drivers/xen/
Dxen-acpi-processor.c465 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) in free_acpi_perf_data()
530 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, in xen_acpi_processor_init()
545 perf = per_cpu_ptr(acpi_perf_data, i); in xen_acpi_processor_init()
/linux-4.4.14/drivers/thermal/
Dintel_powerclamp.c534 per_cpu_ptr(powerclamp_thread, cpu); in start_power_clamp()
568 thread = *per_cpu_ptr(powerclamp_thread, i); in end_power_clamp()
580 per_cpu_ptr(powerclamp_thread, cpu); in powerclamp_cpu_callback()
/linux-4.4.14/mm/
Dvmstat.c180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds()
211 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in set_pgdat_percpu_threshold()
535 p = per_cpu_ptr(zone->pageset, cpu); in cpu_vm_stats_fold()
1243 pageset = per_cpu_ptr(zone->pageset, i); in zoneinfo_show_print()
1429 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu); in need_update()
1479 INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu), in start_shepherd_timer()
Dzswap.c428 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu))) in __zswap_cpu_comp_notifier()
436 *per_cpu_ptr(pool->tfm, cpu) = tfm; in __zswap_cpu_comp_notifier()
440 tfm = *per_cpu_ptr(pool->tfm, cpu); in __zswap_cpu_comp_notifier()
443 *per_cpu_ptr(pool->tfm, cpu) = NULL; in __zswap_cpu_comp_notifier()
Dkmemleak.c890 log->ptr = per_cpu_ptr(ptr, cpu); in early_alloc_percpu()
943 create_object((unsigned long)per_cpu_ptr(ptr, cpu), in kmemleak_alloc_percpu()
1003 delete_object_full((unsigned long)per_cpu_ptr(ptr, in kmemleak_free_percpu()
Dpage_alloc.c1907 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone()
1982 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages()
1987 pcp = per_cpu_ptr(z->pageset, cpu); in drain_all_pages()
3713 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
3750 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas()
4683 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init()
6306 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler()
6833 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update()
6848 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
Dslub.c1854 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus()
2148 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab()
2168 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab()
4490 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects()
4738 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show()
4750 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show()
5028 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat()
5051 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
Dpercpu.c1289 void *start = per_cpu_ptr(base, cpu); in is_kernel_percpu_address()
1340 void *start = per_cpu_ptr(base, cpu); in per_cpu_ptr_to_phys()
Dslab.c1119 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled()
2006 init_arraycache(per_cpu_ptr(cpu_cache, cpu), in alloc_kmem_cache_cpus()
3717 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache()
Dswapfile.c2487 cluster = per_cpu_ptr(p->percpu_cluster, cpu); in SYSCALL_DEFINE2()
Dmemcontrol.c5639 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
/linux-4.4.14/net/netfilter/
Dnf_synproxy_core.c259 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start()
274 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
Dnft_counter.c60 cpu_stats = per_cpu_ptr(counter, cpu); in nft_counter_fetch()
Dnf_conntrack_standalone.c311 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
326 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
Dnf_conntrack_ecache.c92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
Dnf_conntrack_core.c255 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_dying_list()
270 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_unconfirmed_list()
284 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_del_from_dying_or_unconfirmed_list()
1400 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in get_next_corpse()
1772 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in nf_conntrack_init_net()
Dnf_conntrack_helper.c419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in __nf_conntrack_helper_unregister()
Dnf_conntrack_netlink.c1285 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list()
2025 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_ct_stat_cpu_dump()
3234 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_exp_stat_cpu_dump()
Dnf_tables_api.c936 cpu_stats = per_cpu_ptr(stats, cpu); in nft_dump_stats()
/linux-4.4.14/kernel/time/
Dtimer.c142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target()); in get_target_base()
771 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); in lock_timer_base()
980 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu); in add_timer_on()
1591 old_base = per_cpu_ptr(&tvec_bases, cpu); in migrate_timers()
1644 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu); in init_timer_cpu()
/linux-4.4.14/arch/s390/kernel/
Dmachine_kexec.c46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); in add_elf_notes()
/linux-4.4.14/drivers/net/
Dnlmon.c90 nl_stats = per_cpu_ptr(dev->lstats, i); in nlmon_get_stats64()
Dloopback.c112 lb_stats = per_cpu_ptr(dev->lstats, i); in loopback_get_stats64()
Ddummy.c67 dstats = per_cpu_ptr(dev->dstats, i); in dummy_get_stats64()
Dvrf.c203 dstats = per_cpu_ptr(dev->dstats, i); in vrf_get_stats64()
472 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); in vrf_rt6_create()
Dveth.c148 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); in veth_stats_one()
Dvirtio_net.c1064 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); in virtnet_stats()
1812 virtnet_stats = per_cpu_ptr(vi->stats, i); in virtnet_probe()
Dxen-netfront.c1077 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); in xennet_get_stats64()
1078 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); in xennet_get_stats64()
Dmacvlan.c843 p = per_cpu_ptr(vlan->pcpu_stats, i); in macvlan_dev_get_stats64()
/linux-4.4.14/drivers/dma/
Ddmaengine.c99 count += per_cpu_ptr(chan->local, i)->memcpy_count; in memcpy_count_show()
121 count += per_cpu_ptr(chan->local, i)->bytes_transferred; in bytes_transferred_show()
450 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; in dma_channel_rebalance()
467 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; in dma_channel_rebalance()
/linux-4.4.14/drivers/perf/
Darm_pmu.c626 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); in cpu_pmu_free_irq()
681 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); in cpu_pmu_request_irq()
737 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); in cpu_pmu_init()
/linux-4.4.14/arch/ia64/kernel/
Dcrash.c76 buf = (u64 *) per_cpu_ptr(crash_notes, cpu); in crash_save_this_cpu()
/linux-4.4.14/net/ipv4/netfilter/
Dnf_conntrack_l3proto_ipv4_compat.c341 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start()
356 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
/linux-4.4.14/arch/x86/crypto/sha-mb/
Dsha1_mb.c878 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_init()
908 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_init()
923 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_fini()
/linux-4.4.14/drivers/idle/
Dintel_idle.c800 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); in cpu_hotplug_notify()
988 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); in intel_idle_cpuidle_devices_uninit()
1165 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); in intel_idle_cpu_init()
/linux-4.4.14/net/caif/
Dcffrml.c195 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i); in cffrml_refcnt_read()
Dcaif_dev.c87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); in caifd_refcnt_read()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/
Dcore.c612 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read()
628 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read()
639 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read()
655 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read()
Dswitchx2.c361 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); in mlxsw_sx_port_get_stats64()
Dspectrum.c460 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); in mlxsw_sp_port_get_stats64()
/linux-4.4.14/arch/arm/xen/
Denlighten.c98 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); in xen_percpu_init()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
Dixgbe_fcoe.c205 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); in ixgbe_fcoe_ddp_setup()
622 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_fcoe_dma_pool_free()
643 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_fcoe_dma_pool_alloc()
Dixgbe_main.c6204 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_update_stats()
/linux-4.4.14/net/netfilter/ipvs/
Dip_vs_est.c67 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); in ip_vs_read_cpu_stats()
Dip_vs_ctl.c894 ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i); in ip_vs_new_dest()
1217 ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i); in ip_vs_add_service()
2147 struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i); in ip_vs_stats_percpu_show()
3971 ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i); in ip_vs_control_net_init()
/linux-4.4.14/net/ipv4/
Dicmp.c1167 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); in icmp_sk_exit()
1188 *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk; in icmp_sk_init()
1229 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); in icmp_sk_init()
Dip_tunnel_core.c201 per_cpu_ptr(dev->tstats, i); in ip_tunnel_get_stats64()
Daf_inet.c1450 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); in snmp_get_cpu_field()
1475 bhptr = per_cpu_ptr(mib, cpu); in snmp_get_cpu_field64()
1543 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i); in ipv4_mib_init_net()
Dtcp_ipv4.c2360 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); in tcp_sk_exit()
2379 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; in tcp_sk_init()
Dip_tunnel.c99 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0); in ip_tunnel_dst_reset_all()
Droute.c348 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); in rt_acct_proc_show()
692 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i); in update_or_create_fnhe()
Dfib_semantics.c196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1); in rt_fibinfo_free_cpus()
Dfib_trie.c2179 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu); in trie_show_usage()
/linux-4.4.14/drivers/acpi/
Dprocessor_perflib.c627 if (!performance || !per_cpu_ptr(performance, i)) { in acpi_processor_preregister_performance()
639 pr->performance = per_cpu_ptr(performance, i); in acpi_processor_preregister_performance()
/linux-4.4.14/drivers/scsi/libfc/
Dfc_fcp.c162 per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++; in fc_fcp_pkt_alloc()
270 per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++; in fc_fcp_send_abort()
429 per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++; in fc_fcp_frame_alloc()
507 stats = per_cpu_ptr(lport->stats, get_cpu()); in fc_fcp_recv_data()
1862 stats = per_cpu_ptr(lport->stats, get_cpu()); in fc_queuecommand()
Dfc_exch.c818 pool = per_cpu_ptr(mp->pool, cpu); in fc_exch_em_alloc()
913 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); in fc_exch_find()
1917 per_cpu_ptr(ema->mp->pool, cpu), in fc_exch_mgr_reset()
2415 pool = per_cpu_ptr(mp->pool, cpu); in fc_exch_mgr_alloc()
Dfc_lport.c319 stats = per_cpu_ptr(lport->stats, cpu); in fc_get_host_stats()
/linux-4.4.14/Documentation/
Dthis_cpu_ops.txt102 y = per_cpu_ptr(&x, cpu);
281 To access per-cpu data structure remotely, typically the per_cpu_ptr()
287 struct data *p = per_cpu_ptr(&datap, cpu);
/linux-4.4.14/net/openvswitch/
Dvport-internal_dev.c125 percpu_stats = per_cpu_ptr(dev->tstats, i); in internal_get_stats()
Ddatapath.c692 percpu_stats = per_cpu_ptr(dp->stats_percpu, i); in get_dp_stats()
/linux-4.4.14/net/batman-adv/
Dmain.h336 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); in batadv_sum_counter()
/linux-4.4.14/include/linux/netfilter/
Dx_tables.h413 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu); in xt_get_per_cpu_counter()
/linux-4.4.14/fs/
Dseq_file.c973 hlist_for_each(node, per_cpu_ptr(head, *cpu)) { in seq_hlist_start_percpu()
1004 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu()
Dnamespace.c184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; in mnt_get_count()
309 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; in mnt_get_writers()
Dlocks.c2734 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); in filelock_init()
Dbuffer.c1458 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); in has_bh_in_lru()
/linux-4.4.14/drivers/net/ethernet/intel/i40e/
Di40e_fcoe.c487 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in i40e_fcoe_dma_pool_free()
513 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in i40e_fcoe_dma_pool_create()
859 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); in i40e_fcoe_ddp_setup()
/linux-4.4.14/drivers/scsi/fcoe/
Dfcoe.c1552 per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; in fcoe_rcv()
1704 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_xmit()
1769 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_filter_frames()
1817 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_recv_frame()
2061 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_device_notification()
Dfcoe_ctlr.c834 stats = per_cpu_ptr(fip->lp->stats, get_cpu()); in fcoe_ctlr_age_fcfs()
1291 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_ctlr_recv_els()
1403 per_cpu_ptr(lport->stats, in fcoe_ctlr_recv_clr_vlink()
1433 per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++; in fcoe_ctlr_recv_clr_vlink()
Dfcoe_transport.c173 stats = per_cpu_ptr(lport->stats, cpu); in __fcoe_get_lesb()
/linux-4.4.14/net/bridge/
Dbr_device.c158 = per_cpu_ptr(br->stats, cpu); in br_get_stats64()
/linux-4.4.14/net/rds/
Dib_recv.c110 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
141 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
/linux-4.4.14/kernel/events/
Dhw_breakpoint.c66 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info()
Dcore.c403 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
638 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time()
3465 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context()
7477 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context()
7552 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store()
7658 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register()
8688 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context()
8689 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context()
9333 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
/linux-4.4.14/drivers/base/
Dcpu.c155 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); in show_crash_notes()
/linux-4.4.14/drivers/net/ethernet/marvell/
Dmvneta.c602 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64()
2450 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev()
2476 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev()
2760 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_percpu_notifier()
2773 per_cpu_ptr(pp->ports, other_cpu); in mvneta_percpu_notifier()
3398 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
Dmvpp2.c4230 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_reserved_desc_num_proc()
4665 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_init()
4688 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_init()
4708 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_deinit()
4765 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_clean()
5649 port_pcpu = per_cpu_ptr(port->pcpu, cpu); in mvpp2_stop()
5787 cpu_stats = per_cpu_ptr(port->stats, cpu); in mvpp2_get_stats64()
6042 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_port_init()
6228 port_pcpu = per_cpu_ptr(port->pcpu, cpu); in mvpp2_port_probe()
/linux-4.4.14/drivers/bus/
Dmips_cdmm.c302 bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu); in mips_cdmm_get_bus()
/linux-4.4.14/drivers/clocksource/
Dexynos_mct.c549 per_cpu_ptr(&percpu_mct_tick, cpu); in exynos4_timer_resources()
/linux-4.4.14/drivers/irqchip/
Dirq-gic.c1073 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; in __gic_init_bases()
1074 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; in __gic_init_bases()
/linux-4.4.14/net/sched/
Dcls_u32.c993 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); in u32_dump()
1023 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); in u32_dump()
/linux-4.4.14/drivers/net/hyperv/
Dnetvsc_drv.c920 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, in netvsc_get_stats64()
922 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, in netvsc_get_stats64()
/linux-4.4.14/net/ipv6/
Dip6_tunnel.c103 per_cpu_ptr(dev->tstats, i); in ip6_get_stats()
180 ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); in ip6_tnl_dst_reset()
210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock); in ip6_tnl_dst_init()
Daf_inet6.c738 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i); in ipv6_init_mibs()
Droute.c150 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); in rt6_uncached_list_flush_dev()
354 p = per_cpu_ptr(rt->rt6i_pcpu, cpu); in ip6_dst_alloc()
3739 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); in ip6_route_init()
Dip6_fib.c174 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); in rt6_free_pcpu()
Daddrconf.c309 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i); in snmp6_alloc_dev()
/linux-4.4.14/arch/arm/kvm/
Darm.c1096 cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); in init_hyp_mode()
/linux-4.4.14/drivers/scsi/bnx2fc/
Dbnx2fc_fcoe.c381 stats = per_cpu_ptr(lport->stats, get_cpu()); in bnx2fc_xmit()
578 stats = per_cpu_ptr(lport->stats, smp_processor_id()); in bnx2fc_recv_frame()
908 per_cpu_ptr(lport->stats, in bnx2fc_indicate_netevent()
Dbnx2fc_io.c1964 stats = per_cpu_ptr(lport->stats, get_cpu()); in bnx2fc_post_io_req()
/linux-4.4.14/net/8021q/
Dvlan_dev.c666 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); in vlan_dev_get_stats64()
/linux-4.4.14/drivers/net/ipvlan/
Dipvlan_main.c259 pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); in ipvlan_get_stats64()
/linux-4.4.14/drivers/edac/
Damd64_edac.c2514 struct msr *reg = per_cpu_ptr(msrs, cpu); in nb_mce_bank_enabled_on_node()
2547 struct msr *reg = per_cpu_ptr(msrs, cpu); in toggle_ecc_err_reporting()
/linux-4.4.14/drivers/md/
Ddm-stats.c203 last = per_cpu_ptr(stats->last, cpu); in dm_stats_init()
Draid5.c1924 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops()
2099 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks()
6343 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_free_percpu()
6370 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_notify()
6383 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_notify()
6411 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_alloc_percpu()
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_init.c1093 int_counter += *per_cpu_ptr(dd->int_counter, cpu); in qib_int_counter()
Dqib_mad.c1647 p = per_cpu_ptr(ibp->pmastats, cpu); in qib_snapshot_pmacounters()
/linux-4.4.14/net/mac80211/
Diface.c1121 tstats = per_cpu_ptr(dev->tstats, i); in ieee80211_get_stats64()
/linux-4.4.14/fs/gfs2/
Dglock.c1739 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); in gfs2_sbstats_seq_show()
Drgrp.c1875 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP]; in gfs2_rgrp_congested()
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb/
Dsge.c984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); in t1_sge_get_port_stats()
/linux-4.4.14/fs/nfs/
Dsuper.c863 stats = per_cpu_ptr(nfss->io_stats, cpu); in nfs_show_stats()
/linux-4.4.14/arch/x86/kvm/
Dx86.c218 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in shared_msr_update()
251 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in kvm_set_shared_msr()
273 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in drop_user_return_notifiers()
/linux-4.4.14/net/packet/
Daf_packet.c1213 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); in packet_read_pending()
/linux-4.4.14/drivers/scsi/
Dhpsa.c2665 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in lockup_detected()
8075 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in set_lockup_detected_for_all_cpus()
/linux-4.4.14/fs/ext4/
Dmballoc.c2663 lg = per_cpu_ptr(sbi->s_locality_groups, i); in ext4_mb_init()
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dchip.c1461 counter += *per_cpu_ptr(cntr, cpu); in get_all_cpu_total()