/linux-4.4.14/kernel/locking/ |
D | lglock.c | 47 lock = per_cpu_ptr(lg->lock, cpu); in lg_local_lock_cpu() 57 lock = per_cpu_ptr(lg->lock, cpu); in lg_local_unlock_cpu() 73 arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); in lg_double_lock() 74 arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); in lg_double_lock() 80 arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); in lg_double_unlock() 81 arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); in lg_double_unlock() 93 lock = per_cpu_ptr(lg->lock, i); in lg_global_lock() 106 lock = per_cpu_ptr(lg->lock, i); in lg_global_unlock()
|
D | osq_lock.c | 28 return per_cpu_ptr(&osq_node, cpu_nr); in decode_cpu()
|
D | qspinlock.c | 110 return per_cpu_ptr(&mcs_nodes[idx], cpu); in decode_tail()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_stats.c | 28 val += *(((__u32 *)per_cpu_ptr(stats, cpu) + idx)); in counter_val() 81 xs_xstrat_bytes += per_cpu_ptr(stats, i)->xs_xstrat_bytes; in xfs_stats_format() 82 xs_write_bytes += per_cpu_ptr(stats, i)->xs_write_bytes; in xfs_stats_format() 83 xs_read_bytes += per_cpu_ptr(stats, i)->xs_read_bytes; in xfs_stats_format() 107 vn_active = per_cpu_ptr(stats, c)->vn_active; in xfs_stats_clearall() 108 memset(per_cpu_ptr(stats, c), 0, sizeof(*stats)); in xfs_stats_clearall() 109 per_cpu_ptr(stats, c)->vn_active = vn_active; in xfs_stats_clearall()
|
D | xfs_stats.h | 222 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v++; \ 223 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v++; \ 228 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v--; \ 229 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v--; \ 234 per_cpu_ptr(xfsstats.xs_stats, current_cpu())->v += (inc); \ 235 per_cpu_ptr(mp->m_stats.xs_stats, current_cpu())->v += (inc); \
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
D | perf_event_amd_uncore.c | 67 return *per_cpu_ptr(amd_uncore_nb, event->cpu); in event_to_amd_uncore() 69 return *per_cpu_ptr(amd_uncore_l2, event->cpu); in event_to_amd_uncore() 307 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; in amd_uncore_cpu_up_prepare() 320 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; in amd_uncore_cpu_up_prepare() 338 that = *per_cpu_ptr(uncores, cpu); in amd_uncore_find_online_sibling() 363 uncore = *per_cpu_ptr(amd_uncore_nb, cpu); in amd_uncore_cpu_starting() 368 *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; in amd_uncore_cpu_starting() 375 uncore = *per_cpu_ptr(amd_uncore_l2, cpu); in amd_uncore_cpu_starting() 381 *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; in amd_uncore_cpu_starting() 388 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); in uncore_online() [all …]
|
D | perf_event_intel_uncore.c | 94 box = *per_cpu_ptr(pmu->box, cpu); in uncore_pmu_to_box() 100 if (*per_cpu_ptr(pmu->box, cpu)) in uncore_pmu_to_box() 105 *per_cpu_ptr(pmu->box, cpu) = box; in uncore_pmu_to_box() 112 return *per_cpu_ptr(pmu->box, cpu); in uncore_pmu_to_box() 942 if (*per_cpu_ptr(pmu->box, cpu) == box) { in uncore_pci_remove() 943 *per_cpu_ptr(pmu->box, cpu) = NULL; in uncore_pci_remove() 1043 box = *per_cpu_ptr(pmu->box, cpu); in uncore_cpu_dying() 1044 *per_cpu_ptr(pmu->box, cpu) = NULL; in uncore_cpu_dying() 1064 box = *per_cpu_ptr(pmu->box, cpu); in uncore_cpu_starting() 1072 exist = *per_cpu_ptr(pmu->box, k); in uncore_cpu_starting() [all …]
|
/linux-4.4.14/kernel/sched/ |
D | cpuacct.c | 101 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_read() 120 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_cpuusage_write() 192 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); in cpuacct_stats_show() 201 struct kernel_cpustat *kcpustat = per_cpu_ptr(ca->cpustat, cpu); in cpuacct_stats_show() 247 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu); in cpuacct_charge()
|
D | core.c | 6116 sibling = *per_cpu_ptr(sdd->sd, i); in build_group_mask() 6151 sibling = *per_cpu_ptr(sdd->sd, i); in build_overlap_sched_groups() 6171 sg->sgc = *per_cpu_ptr(sdd->sgc, i); in build_overlap_sched_groups() 6210 struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu); in get_group() 6217 *sg = *per_cpu_ptr(sdd->sg, cpu); in get_group() 6218 (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu); in get_group() 6391 WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); in claim_allocations() 6392 *per_cpu_ptr(sdd->sd, cpu) = NULL; in claim_allocations() 6394 if (atomic_read(&(*per_cpu_ptr(sdd->sg, cpu))->ref)) in claim_allocations() 6395 *per_cpu_ptr(sdd->sg, cpu) = NULL; in claim_allocations() [all …]
|
/linux-4.4.14/fs/squashfs/ |
D | decompressor_multi_percpu.c | 40 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 53 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_create() 70 stream = per_cpu_ptr(percpu, cpu); in squashfs_decompressor_destroy()
|
/linux-4.4.14/net/xfrm/ |
D | xfrm_ipcomp.c | 49 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); in ipcomp_decompress() 50 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu); in ipcomp_decompress() 216 vfree(*per_cpu_ptr(scratches, i)); in ipcomp_free_scratches() 241 *per_cpu_ptr(scratches, i) = scratch; in ipcomp_alloc_scratches() 269 struct crypto_comp *tfm = *per_cpu_ptr(tfms, cpu); in ipcomp_free_tfms() 311 *per_cpu_ptr(tfms, cpu) = tfm; in ipcomp_alloc_tfms()
|
/linux-4.4.14/kernel/ |
D | smpboot.c | 171 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in __smpboot_create_thread() 190 *per_cpu_ptr(ht->store, cpu) = tsk; in __smpboot_create_thread() 223 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_unpark_thread() 242 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_park_thread() 264 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); in smpboot_destroy_threads() 269 *per_cpu_ptr(ht->store, cpu) = NULL; in smpboot_destroy_threads()
|
D | padata.c | 137 queue = per_cpu_ptr(pd->pqueue, target_cpu); in padata_do_parallel() 186 next_queue = per_cpu_ptr(pd->pqueue, cpu); in padata_get_next() 260 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder() 337 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_do_serial() 380 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues() 395 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_init_pqueues() 466 pqueue = per_cpu_ptr(pd->pqueue, cpu); in padata_flush_queues() 476 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_flush_queues()
|
D | stop_machine.c | 222 struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1); in cpu_stop_queue_two_works() 223 struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); in cpu_stop_queue_two_works()
|
D | smp.c | 449 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many() 466 csd = per_cpu_ptr(cfd->csd, cpu); in smp_call_function_many()
|
D | workqueue.c | 1365 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in __queue_work() 2979 struct work_struct *work = per_cpu_ptr(works, cpu); in schedule_on_each_cpu() 2986 flush_work(per_cpu_ptr(works, cpu)); in schedule_on_each_cpu() 3791 per_cpu_ptr(wq->cpu_pwqs, cpu); in alloc_and_link_pwqs() 4085 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); in workqueue_congested()
|
D | kexec_core.c | 993 buf = (u32 *)per_cpu_ptr(crash_notes, cpu); in crash_save_cpu()
|
D | module.c | 682 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); in percpu_modcopy() 707 void *start = per_cpu_ptr(mod->percpu, cpu); in is_module_percpu_address()
|
/linux-4.4.14/kernel/trace/ |
D | trace_functions_graph.c | 342 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_entry() 424 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in trace_graph_return() 543 last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); in verif_pid() 781 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_leaf() 821 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_entry_nested() 916 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_entry() 962 depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq); in check_irq_return() 1046 cpu_data = per_cpu_ptr(data->cpu_data, cpu); in print_graph_return() 1107 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; in print_graph_comment() 1168 if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { in print_graph_function_flags() [all …]
|
D | trace_sched_wakeup.c | 91 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_preempt_disable() 458 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch() 470 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup_sched_switch() 492 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup_sched_switch() 549 disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup() 582 data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu); in probe_wakeup() 596 atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled); in probe_wakeup()
|
D | trace_kdb.c | 33 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf() 90 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); in ftrace_dump_buf()
|
D | trace_irqsoff.c | 120 *data = per_cpu_ptr(tr->trace_buffer.data, cpu); in func_prolog_dec() 369 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in start_critical_timing() 407 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in stop_critical_timing()
|
D | trace.c | 1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu); in __update_max_tr() 1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu); in __update_max_tr() 2424 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0; in tracing_iter_reset() 2444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries; in tracing_iter_reset() 2545 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) { in get_total_entries() 2546 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries; in get_total_entries() 2603 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu); in print_trace_header() 2674 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries) in test_cpu_buff_start() 3442 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write() 3447 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); in tracing_cpumask_write() [all …]
|
D | trace_mmiotrace.c | 323 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_rw() 356 data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id()); in mmio_trace_mapping()
|
D | trace_functions.c | 145 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_trace_call() 176 data = per_cpu_ptr(tr->trace_buffer.data, cpu); in function_stack_trace_call()
|
D | trace_uprobe.c | 715 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init() 716 mutex_init(&per_cpu_ptr(uprobe_cpu_buffer, cpu)->mutex); in uprobe_buffer_init() 725 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf); in uprobe_buffer_init() 755 free_page((unsigned long)per_cpu_ptr(uprobe_cpu_buffer, in uprobe_buffer_disable() 769 ucb = per_cpu_ptr(uprobe_cpu_buffer, cpu); in uprobe_buffer_get()
|
D | trace_event_perf.c | 102 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu)); in perf_trace_event_reg()
|
D | blktrace.c | 255 sequence = per_cpu_ptr(bt->sequence, cpu); in __blk_add_trace()
|
D | trace_events.c | 598 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false; in __ftrace_clear_event_pids()
|
D | ftrace.c | 211 *per_cpu_ptr(ops->disabled, cpu) = 1; in control_ops_disable_all()
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ehca_irq.c | 671 } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); in find_next_online_cpu() 709 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task() 710 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task() 718 cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); in queue_comp_task() 719 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); in queue_comp_task() 750 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_park() 763 target = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_park() 764 thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu); in comp_task_park() 775 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_stop() 786 struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); in comp_task_should_run() [all …]
|
/linux-4.4.14/lib/ |
D | percpu_ida.c | 77 remote = per_cpu_ptr(pool->tag_cpu, cpu); in steal_tags() 322 spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); in __percpu_ida_init() 350 remote = per_cpu_ptr(pool->tag_cpu, cpu); in percpu_ida_for_each_free() 387 remote = per_cpu_ptr(pool->tag_cpu, cpu); in percpu_ida_free_tags()
|
D | percpu_counter.c | 67 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_set() 107 s32 *pcount = per_cpu_ptr(fbc->counters, cpu); in __percpu_counter_sum() 186 pcount = per_cpu_ptr(fbc->counters, cpu); in percpu_counter_hotcpu_callback()
|
D | percpu-refcount.c | 130 count += *per_cpu_ptr(percpu_count, cpu); in percpu_ref_switch_to_atomic_rcu() 247 *per_cpu_ptr(percpu_count, cpu) = 0; in __percpu_ref_switch_to_percpu()
|
D | random32.c | 246 struct rnd_state *state = per_cpu_ptr(pcpu_state, i); in prandom_seed_full_state()
|
/linux-4.4.14/include/net/ |
D | gro_cells.h | 69 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_init() 85 struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); in gro_cells_destroy()
|
/linux-4.4.14/arch/x86/kernel/ |
D | kgdb.c | 224 bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); in kgdb_correct_hw_break() 253 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 265 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_reserve_slot() 280 pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); in hw_break_release_slot() 320 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_remove_all_hw_break() 413 bp = *per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_disable_hw_debug() 685 pevent = per_cpu_ptr(breakinfo[i].pev, cpu); in kgdb_arch_late()
|
D | crash.c | 456 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu)); in prepare_elf64_headers()
|
/linux-4.4.14/net/core/ |
D | flow.c | 59 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; in flow_cache_new_hashrnd() 323 fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_percpu_empty() 395 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu_prepare() 418 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); in flow_cache_cpu() 480 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); in flow_cache_init() 503 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); in flow_cache_fini()
|
D | gen_stats.c | 110 struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i); in __gnet_stats_copy_basic_cpu() 228 const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); in __gnet_stats_copy_queue_cpu()
|
D | dst.c | 409 __metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen); in metadata_dst_alloc_percpu()
|
D | neighbour.c | 1845 st = per_cpu_ptr(tbl->stats, cpu); in neightbl_fill_info() 2738 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_start() 2752 return per_cpu_ptr(tbl->stats, cpu); in neigh_stat_seq_next()
|
D | sock.c | 2723 res += per_cpu_ptr(net->core.inuse, cpu)->val[idx]; in sock_prot_inuse_get()
|
D | dev.c | 6815 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i); in netdev_refcnt_read()
|
/linux-4.4.14/kernel/rcu/ |
D | srcu.c | 154 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); in srcu_readers_seq_idx() 171 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); in srcu_readers_active_idx() 269 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); in srcu_readers_active() 270 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); in srcu_readers_active()
|
D | tree.c | 1274 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_other_cpu_stall() 1321 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen; in print_cpu_stall() 2606 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dying_idle_cpu() 2630 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_cleanup_dead_cpu() 2843 if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) in force_qs_rnp() 3070 rdp = per_cpu_ptr(rsp->rda, cpu); in __call_rcu() 3640 rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id()); in exp_funnel_lock() 3686 rdp = per_cpu_ptr(rsp->rda, cpu); in sync_sched_exp_online_cleanup() 3717 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in sync_rcu_exp_select_cpus() 3806 rdp = per_cpu_ptr(rsp->rda, cpu); in synchronize_sched_expedited_wait() [all …]
|
D | tree_plugin.h | 1305 struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_prepare_kthreads() 1733 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in print_cpu_stall_info() 1881 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_nocb_cpu_needs_barrier() 2358 init_nocb_callback_list(per_cpu_ptr(rsp->rda, cpu)); in rcu_init_nohz() 2382 struct rcu_data *rdp_spawn = per_cpu_ptr(rsp->rda, cpu); in rcu_spawn_one_nocb_kthread() 2474 rdp = per_cpu_ptr(rsp->rda, cpu); in rcu_organize_nocb_kthreads() 2925 rdp = per_cpu_ptr(rcu_state_p->rda, cpu); in rcu_sys_is_idle()
|
D | tree_trace.c | 67 return per_cpu_ptr(rsp->rda, *pos); in r_start()
|
D | rcutorture.c | 546 c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx]; in srcu_torture_stats() 547 c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx]; in srcu_torture_stats()
|
/linux-4.4.14/arch/x86/kernel/acpi/ |
D | cstate.c | 130 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); in acpi_processor_ffh_cstate_probe() 160 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu); in acpi_processor_ffh_cstate_enter()
|
/linux-4.4.14/include/linux/ |
D | percpu-defs.h | 220 #define per_cpu_ptr(ptr, cpu) \ macro 250 #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) macro 251 #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) 256 #define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu))
|
D | genhd.h | 324 (per_cpu_ptr((part)->dkstats, (cpu))->field += (addnd)) 331 res += per_cpu_ptr((part)->dkstats, _cpu)->field; \ 340 memset(per_cpu_ptr(part->dkstats, i), value, in part_stat_set_all()
|
D | vmstat.h | 155 x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; in zone_page_state_snapshot()
|
D | blk-mq.h | 260 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
|
D | netdevice.h | 2079 stat = per_cpu_ptr(pcpu_stats, __cpu); \
|
/linux-4.4.14/crypto/ |
D | mcryptd.c | 56 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); in mcryptd_arm_flusher() 79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_init_queue() 93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_fini_queue() 128 flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); in mcryptd_opportunistic_flush() 210 flist = per_cpu_ptr(mcryptd_flist, cpu); in mcryptd_flusher() 695 flist = per_cpu_ptr(mcryptd_flist, cpu); in mcryptd_init()
|
D | cryptd.c | 97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_init_queue() 110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_fini_queue()
|
/linux-4.4.14/arch/arc/kernel/ |
D | smp.c | 219 unsigned long __percpu *ipi_data_ptr = per_cpu_ptr(&ipi_data, cpu); in ipi_send_msg_one() 357 int *dev = per_cpu_ptr(&ipi_dev, cpu); in smp_ipi_irq_setup()
|
/linux-4.4.14/drivers/nvdimm/ |
D | region_devs.c | 600 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_acquire_lane() 601 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_acquire_lane() 617 ndl_count = per_cpu_ptr(nd_region->lane, cpu); in nd_region_release_lane() 618 ndl_lock = per_cpu_ptr(nd_region->lane, lane); in nd_region_release_lane() 688 ndl = per_cpu_ptr(nd_region->lane, i); in nd_region_create()
|
/linux-4.4.14/drivers/cpufreq/ |
D | acpi-cpufreq.c | 80 return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu); in to_perf_data() 128 struct msr *reg = per_cpu_ptr(msrs, cpu); in boost_set_msrs() 527 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) in free_acpi_perf_data() 590 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, in acpi_cpufreq_early_init() 686 perf = per_cpu_ptr(acpi_perf_data, cpu); in acpi_cpufreq_cpu_init()
|
D | pcc-cpufreq.c | 152 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_get_freq() 208 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_cpufreq_target() 258 pcc_cpu_data = per_cpu_ptr(pcc_cpu_info, cpu); in pcc_get_offset()
|
/linux-4.4.14/drivers/net/team/ |
D | team_mode_loadbalance.c | 473 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_stats_refresh() 487 pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_stats_refresh() 488 stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i); in lb_stats_refresh() 597 team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); in lb_init()
|
D | team.c | 1793 p = per_cpu_ptr(team->pcpu_stats, i); in team_get_stats64()
|
/linux-4.4.14/arch/x86/lib/ |
D | msr-smp.c | 13 reg = per_cpu_ptr(rv->msrs, this_cpu); in __rdmsr_on_cpu() 27 reg = per_cpu_ptr(rv->msrs, this_cpu); in __wrmsr_on_cpu()
|
/linux-4.4.14/kernel/irq/ |
D | irqdesc.c | 92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; in desc_set_defaults() 606 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; in kstat_irqs_cpu() 626 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); in kstat_irqs()
|
/linux-4.4.14/block/ |
D | blk-mq.h | 77 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
|
D | blk-mq.c | 1777 struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); in blk_mq_init_cpu_queues()
|
/linux-4.4.14/drivers/xen/ |
D | xen-acpi-processor.c | 465 free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) in free_acpi_perf_data() 530 &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, in xen_acpi_processor_init() 545 perf = per_cpu_ptr(acpi_perf_data, i); in xen_acpi_processor_init()
|
/linux-4.4.14/drivers/thermal/ |
D | intel_powerclamp.c | 534 per_cpu_ptr(powerclamp_thread, cpu); in start_power_clamp() 568 thread = *per_cpu_ptr(powerclamp_thread, i); in end_power_clamp() 580 per_cpu_ptr(powerclamp_thread, cpu); in powerclamp_cpu_callback()
|
/linux-4.4.14/mm/ |
D | vmstat.c | 180 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in refresh_zone_stat_thresholds() 211 per_cpu_ptr(zone->pageset, cpu)->stat_threshold in set_pgdat_percpu_threshold() 535 p = per_cpu_ptr(zone->pageset, cpu); in cpu_vm_stats_fold() 1243 pageset = per_cpu_ptr(zone->pageset, i); in zoneinfo_show_print() 1429 struct per_cpu_pageset *p = per_cpu_ptr(zone->pageset, cpu); in need_update() 1479 INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu), in start_shepherd_timer()
|
D | zswap.c | 428 if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu))) in __zswap_cpu_comp_notifier() 436 *per_cpu_ptr(pool->tfm, cpu) = tfm; in __zswap_cpu_comp_notifier() 440 tfm = *per_cpu_ptr(pool->tfm, cpu); in __zswap_cpu_comp_notifier() 443 *per_cpu_ptr(pool->tfm, cpu) = NULL; in __zswap_cpu_comp_notifier()
|
D | kmemleak.c | 890 log->ptr = per_cpu_ptr(ptr, cpu); in early_alloc_percpu() 943 create_object((unsigned long)per_cpu_ptr(ptr, cpu), in kmemleak_alloc_percpu() 1003 delete_object_full((unsigned long)per_cpu_ptr(ptr, in kmemleak_free_percpu()
|
D | page_alloc.c | 1907 pset = per_cpu_ptr(zone->pageset, cpu); in drain_pages_zone() 1982 pcp = per_cpu_ptr(zone->pageset, cpu); in drain_all_pages() 1987 pcp = per_cpu_ptr(z->pageset, cpu); in drain_all_pages() 3713 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas() 3750 free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count; in show_free_areas() 4683 struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu); in zone_pageset_init() 6306 per_cpu_ptr(zone->pageset, cpu)); in percpu_pagelist_fraction_sysctl_handler() 6833 per_cpu_ptr(zone->pageset, cpu)); in zone_pcp_update() 6848 pset = per_cpu_ptr(zone->pageset, cpu); in zone_pcp_reset()
|
D | slub.c | 1854 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu); in init_kmem_cache_cpus() 2148 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in __flush_cpu_slab() 2168 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); in has_cpu_slab() 4490 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, in show_slab_objects() 4738 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial; in slabs_cpu_partial_show() 4750 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial; in slabs_cpu_partial_show() 5028 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si]; in show_stat() 5051 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0; in clear_stat()
|
D | percpu.c | 1289 void *start = per_cpu_ptr(base, cpu); in is_kernel_percpu_address() 1340 void *start = per_cpu_ptr(base, cpu); in per_cpu_ptr_to_phys()
|
D | slab.c | 1119 nc = per_cpu_ptr(cachep->cpu_cache, cpu); in cpuup_canceled() 2006 init_arraycache(per_cpu_ptr(cpu_cache, cpu), in alloc_kmem_cache_cpus() 3717 struct array_cache *ac = per_cpu_ptr(prev, cpu); in __do_tune_cpucache()
|
D | swapfile.c | 2487 cluster = per_cpu_ptr(p->percpu_cluster, cpu); in SYSCALL_DEFINE2()
|
D | memcontrol.c | 5639 INIT_WORK(&per_cpu_ptr(&memcg_stock, cpu)->work, in mem_cgroup_init()
|
/linux-4.4.14/net/netfilter/ |
D | nf_synproxy_core.c | 259 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_start() 274 return per_cpu_ptr(snet->stats, cpu); in synproxy_cpu_seq_next()
|
D | nft_counter.c | 60 cpu_stats = per_cpu_ptr(counter, cpu); in nft_counter_fetch()
|
D | nf_conntrack_standalone.c | 311 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start() 326 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
|
D | nf_conntrack_ecache.c | 92 pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu); in ecache_work()
|
D | nf_conntrack_core.c | 255 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_dying_list() 270 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_add_to_unconfirmed_list() 284 pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); in nf_ct_del_from_dying_or_unconfirmed_list() 1400 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in get_next_corpse() 1772 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in nf_conntrack_init_net()
|
D | nf_conntrack_helper.c | 419 struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in __nf_conntrack_helper_unregister()
|
D | nf_conntrack_netlink.c | 1285 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); in ctnetlink_dump_list() 2025 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_ct_stat_cpu_dump() 3234 st = per_cpu_ptr(net->ct.stat, cpu); in ctnetlink_exp_stat_cpu_dump()
|
D | nf_tables_api.c | 936 cpu_stats = per_cpu_ptr(stats, cpu); in nft_dump_stats()
|
/linux-4.4.14/kernel/time/ |
D | timer.c | 142 return per_cpu_ptr(&tvec_bases, get_nohz_timer_target()); in get_target_base() 771 base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); in lock_timer_base() 980 struct tvec_base *new_base = per_cpu_ptr(&tvec_bases, cpu); in add_timer_on() 1591 old_base = per_cpu_ptr(&tvec_bases, cpu); in migrate_timers() 1644 struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu); in init_timer_cpu()
|
/linux-4.4.14/arch/s390/kernel/ |
D | machine_kexec.c | 46 ptr = (u64 *) per_cpu_ptr(crash_notes, cpu); in add_elf_notes()
|
/linux-4.4.14/drivers/net/ |
D | nlmon.c | 90 nl_stats = per_cpu_ptr(dev->lstats, i); in nlmon_get_stats64()
|
D | loopback.c | 112 lb_stats = per_cpu_ptr(dev->lstats, i); in loopback_get_stats64()
|
D | dummy.c | 67 dstats = per_cpu_ptr(dev->dstats, i); in dummy_get_stats64()
|
D | vrf.c | 203 dstats = per_cpu_ptr(dev->dstats, i); in vrf_get_stats64() 472 struct rt6_info **p = per_cpu_ptr(rt6->rt6i_pcpu, cpu); in vrf_rt6_create()
|
D | veth.c | 148 struct pcpu_vstats *stats = per_cpu_ptr(dev->vstats, cpu); in veth_stats_one()
|
D | virtio_net.c | 1064 struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); in virtnet_stats() 1812 virtnet_stats = per_cpu_ptr(vi->stats, i); in virtnet_probe()
|
D | xen-netfront.c | 1077 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); in xennet_get_stats64() 1078 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); in xennet_get_stats64()
|
D | macvlan.c | 843 p = per_cpu_ptr(vlan->pcpu_stats, i); in macvlan_dev_get_stats64()
|
/linux-4.4.14/drivers/dma/ |
D | dmaengine.c | 99 count += per_cpu_ptr(chan->local, i)->memcpy_count; in memcpy_count_show() 121 count += per_cpu_ptr(chan->local, i)->bytes_transferred; in bytes_transferred_show() 450 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; in dma_channel_rebalance() 467 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; in dma_channel_rebalance()
|
/linux-4.4.14/drivers/perf/ |
D | arm_pmu.c | 626 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); in cpu_pmu_free_irq() 681 per_cpu_ptr(&hw_events->percpu_pmu, cpu)); in cpu_pmu_request_irq() 737 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); in cpu_pmu_init()
|
/linux-4.4.14/arch/ia64/kernel/ |
D | crash.c | 76 buf = (u64 *) per_cpu_ptr(crash_notes, cpu); in crash_save_this_cpu()
|
/linux-4.4.14/net/ipv4/netfilter/ |
D | nf_conntrack_l3proto_ipv4_compat.c | 341 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_start() 356 return per_cpu_ptr(net->ct.stat, cpu); in ct_cpu_seq_next()
|
/linux-4.4.14/arch/x86/crypto/sha-mb/ |
D | sha1_mb.c | 878 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_init() 908 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_init() 923 cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu); in sha1_mb_mod_fini()
|
/linux-4.4.14/drivers/idle/ |
D | intel_idle.c | 800 dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); in cpu_hotplug_notify() 988 dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); in intel_idle_cpuidle_devices_uninit() 1165 dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu); in intel_idle_cpu_init()
|
/linux-4.4.14/net/caif/ |
D | cffrml.c | 195 refcnt += *per_cpu_ptr(this->pcpu_refcnt, i); in cffrml_refcnt_read()
|
D | caif_dev.c | 87 refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); in caifd_refcnt_read()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlxsw/ |
D | core.c | 612 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read() 628 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read() 639 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read() 655 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j); in mlxsw_core_rx_stats_dbg_read()
|
D | switchx2.c | 361 p = per_cpu_ptr(mlxsw_sx_port->pcpu_stats, i); in mlxsw_sx_port_get_stats64()
|
D | spectrum.c | 460 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i); in mlxsw_sp_port_get_stats64()
|
/linux-4.4.14/arch/arm/xen/ |
D | enlighten.c | 98 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); in xen_percpu_init()
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_fcoe.c | 205 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); in ixgbe_fcoe_ddp_setup() 622 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_fcoe_dma_pool_free() 643 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_fcoe_dma_pool_alloc()
|
D | ixgbe_main.c | 6204 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in ixgbe_update_stats()
|
/linux-4.4.14/net/netfilter/ipvs/ |
D | ip_vs_est.c | 67 struct ip_vs_cpu_stats *s = per_cpu_ptr(stats, i); in ip_vs_read_cpu_stats()
|
D | ip_vs_ctl.c | 894 ip_vs_dest_stats = per_cpu_ptr(dest->stats.cpustats, i); in ip_vs_new_dest() 1217 ip_vs_stats = per_cpu_ptr(svc->stats.cpustats, i); in ip_vs_add_service() 2147 struct ip_vs_cpu_stats *u = per_cpu_ptr(cpustats, i); in ip_vs_stats_percpu_show() 3971 ipvs_tot_stats = per_cpu_ptr(ipvs->tot_stats.cpustats, i); in ip_vs_control_net_init()
|
/linux-4.4.14/net/ipv4/ |
D | icmp.c | 1167 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); in icmp_sk_exit() 1188 *per_cpu_ptr(net->ipv4.icmp_sk, i) = sk; in icmp_sk_init() 1229 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.icmp_sk, i)); in icmp_sk_init()
|
D | ip_tunnel_core.c | 201 per_cpu_ptr(dev->tstats, i); in ip_tunnel_get_stats64()
|
D | af_inet.c | 1450 return *(((unsigned long *)per_cpu_ptr(mib, cpu)) + offt); in snmp_get_cpu_field() 1475 bhptr = per_cpu_ptr(mib, cpu); in snmp_get_cpu_field64() 1543 af_inet_stats = per_cpu_ptr(net->mib.ip_statistics, i); in ipv4_mib_init_net()
|
D | tcp_ipv4.c | 2360 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); in tcp_sk_exit() 2379 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; in tcp_sk_init()
|
D | ip_tunnel.c | 99 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0); in ip_tunnel_dst_reset_all()
|
D | route.c | 348 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); in rt_acct_proc_show() 692 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i); in update_or_create_fnhe()
|
D | fib_semantics.c | 196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1); in rt_fibinfo_free_cpus()
|
D | fib_trie.c | 2179 const struct trie_use_stats *pcpu = per_cpu_ptr(stats, cpu); in trie_show_usage()
|
/linux-4.4.14/drivers/acpi/ |
D | processor_perflib.c | 627 if (!performance || !per_cpu_ptr(performance, i)) { in acpi_processor_preregister_performance() 639 pr->performance = per_cpu_ptr(performance, i); in acpi_processor_preregister_performance()
|
/linux-4.4.14/drivers/scsi/libfc/ |
D | fc_fcp.c | 162 per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++; in fc_fcp_pkt_alloc() 270 per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++; in fc_fcp_send_abort() 429 per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++; in fc_fcp_frame_alloc() 507 stats = per_cpu_ptr(lport->stats, get_cpu()); in fc_fcp_recv_data() 1862 stats = per_cpu_ptr(lport->stats, get_cpu()); in fc_queuecommand()
|
D | fc_exch.c | 818 pool = per_cpu_ptr(mp->pool, cpu); in fc_exch_em_alloc() 913 pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask); in fc_exch_find() 1917 per_cpu_ptr(ema->mp->pool, cpu), in fc_exch_mgr_reset() 2415 pool = per_cpu_ptr(mp->pool, cpu); in fc_exch_mgr_alloc()
|
D | fc_lport.c | 319 stats = per_cpu_ptr(lport->stats, cpu); in fc_get_host_stats()
|
/linux-4.4.14/Documentation/ |
D | this_cpu_ops.txt | 102 y = per_cpu_ptr(&x, cpu); 281 To access per-cpu data structure remotely, typically the per_cpu_ptr() 287 struct data *p = per_cpu_ptr(&datap, cpu);
|
/linux-4.4.14/net/openvswitch/ |
D | vport-internal_dev.c | 125 percpu_stats = per_cpu_ptr(dev->tstats, i); in internal_get_stats()
|
D | datapath.c | 692 percpu_stats = per_cpu_ptr(dp->stats_percpu, i); in get_dp_stats()
|
/linux-4.4.14/net/batman-adv/ |
D | main.h | 336 counters = per_cpu_ptr(bat_priv->bat_counters, cpu); in batadv_sum_counter()
|
/linux-4.4.14/include/linux/netfilter/ |
D | x_tables.h | 413 return per_cpu_ptr((void __percpu *) (unsigned long) cnt->pcnt, cpu); in xt_get_per_cpu_counter()
|
/linux-4.4.14/fs/ |
D | seq_file.c | 973 hlist_for_each(node, per_cpu_ptr(head, *cpu)) { in seq_hlist_start_percpu() 1004 struct hlist_head *bucket = per_cpu_ptr(head, *cpu); in seq_hlist_next_percpu()
|
D | namespace.c | 184 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; in mnt_get_count() 309 count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; in mnt_get_writers()
|
D | locks.c | 2734 INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); in filelock_init()
|
D | buffer.c | 1458 struct bh_lru *b = per_cpu_ptr(&bh_lrus, cpu); in has_bh_in_lru()
|
/linux-4.4.14/drivers/net/ethernet/intel/i40e/ |
D | i40e_fcoe.c | 487 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in i40e_fcoe_dma_pool_free() 513 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); in i40e_fcoe_dma_pool_create() 859 ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu()); in i40e_fcoe_ddp_setup()
|
/linux-4.4.14/drivers/scsi/fcoe/ |
D | fcoe.c | 1552 per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; in fcoe_rcv() 1704 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_xmit() 1769 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_filter_frames() 1817 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_recv_frame() 2061 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_device_notification()
|
D | fcoe_ctlr.c | 834 stats = per_cpu_ptr(fip->lp->stats, get_cpu()); in fcoe_ctlr_age_fcfs() 1291 stats = per_cpu_ptr(lport->stats, get_cpu()); in fcoe_ctlr_recv_els() 1403 per_cpu_ptr(lport->stats, in fcoe_ctlr_recv_clr_vlink() 1433 per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++; in fcoe_ctlr_recv_clr_vlink()
|
D | fcoe_transport.c | 173 stats = per_cpu_ptr(lport->stats, cpu); in __fcoe_get_lesb()
|
/linux-4.4.14/net/bridge/ |
D | br_device.c | 158 = per_cpu_ptr(br->stats, cpu); in br_get_stats64()
|
/linux-4.4.14/net/rds/ |
D | ib_recv.c | 110 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache() 141 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
|
/linux-4.4.14/kernel/events/ |
D | hw_breakpoint.c | 66 return per_cpu_ptr(bp_cpuinfo + type, cpu); in get_bp_info()
|
D | core.c | 403 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time() 638 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_set_shadow_time() 3465 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in find_get_context() 7477 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in update_pmu_context() 7552 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_event_mux_interval_ms_store() 7658 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); in perf_pmu_register() 8688 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; in perf_pmu_migrate_context() 8689 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; in perf_pmu_migrate_context() 9333 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx; in perf_event_exit_cpu_context()
|
/linux-4.4.14/drivers/base/ |
D | cpu.c | 155 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum)); in show_crash_notes()
|
/linux-4.4.14/drivers/net/ethernet/marvell/ |
D | mvneta.c | 602 cpu_stats = per_cpu_ptr(pp->stats, cpu); in mvneta_get_stats64() 2450 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_start_dev() 2476 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_stop_dev() 2760 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_percpu_notifier() 2773 per_cpu_ptr(pp->ports, other_cpu); in mvneta_percpu_notifier() 3398 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); in mvneta_probe()
|
D | mvpp2.c | 4230 txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_reserved_desc_num_proc() 4665 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_init() 4688 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_init() 4708 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_deinit() 4765 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_txq_clean() 5649 port_pcpu = per_cpu_ptr(port->pcpu, cpu); in mvpp2_stop() 5787 cpu_stats = per_cpu_ptr(port->stats, cpu); in mvpp2_get_stats64() 6042 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); in mvpp2_port_init() 6228 port_pcpu = per_cpu_ptr(port->pcpu, cpu); in mvpp2_port_probe()
|
/linux-4.4.14/drivers/bus/ |
D | mips_cdmm.c | 302 bus_p = per_cpu_ptr(&mips_cdmm_buses, cpu); in mips_cdmm_get_bus()
|
/linux-4.4.14/drivers/clocksource/ |
D | exynos_mct.c | 549 per_cpu_ptr(&percpu_mct_tick, cpu); in exynos4_timer_resources()
|
/linux-4.4.14/drivers/irqchip/ |
D | irq-gic.c | 1073 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; in __gic_init_bases() 1074 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; in __gic_init_bases()
|
/linux-4.4.14/net/sched/ |
D | cls_u32.c | 993 __u32 cnt = *per_cpu_ptr(n->pcpu_success, cpum); in u32_dump() 1023 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); in u32_dump()
|
/linux-4.4.14/drivers/net/hyperv/ |
D | netvsc_drv.c | 920 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats, in netvsc_get_stats64() 922 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats, in netvsc_get_stats64()
|
/linux-4.4.14/net/ipv6/ |
D | ip6_tunnel.c | 103 per_cpu_ptr(dev->tstats, i); in ip6_get_stats() 180 ip6_tnl_per_cpu_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); in ip6_tnl_dst_reset() 210 seqlock_init(&per_cpu_ptr(t->dst_cache, i)->lock); in ip6_tnl_dst_init()
|
D | af_inet6.c | 738 af_inet6_stats = per_cpu_ptr(net->mib.ipv6_statistics, i); in ipv6_init_mibs()
|
D | route.c | 150 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); in rt6_uncached_list_flush_dev() 354 p = per_cpu_ptr(rt->rt6i_pcpu, cpu); in ip6_dst_alloc() 3739 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); in ip6_route_init()
|
D | ip6_fib.c | 174 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt6i_pcpu, cpu); in rt6_free_pcpu()
|
D | addrconf.c | 309 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i); in snmp6_alloc_dev()
|
/linux-4.4.14/arch/arm/kvm/ |
D | arm.c | 1096 cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); in init_hyp_mode()
|
/linux-4.4.14/drivers/scsi/bnx2fc/ |
D | bnx2fc_fcoe.c | 381 stats = per_cpu_ptr(lport->stats, get_cpu()); in bnx2fc_xmit() 578 stats = per_cpu_ptr(lport->stats, smp_processor_id()); in bnx2fc_recv_frame() 908 per_cpu_ptr(lport->stats, in bnx2fc_indicate_netevent()
|
D | bnx2fc_io.c | 1964 stats = per_cpu_ptr(lport->stats, get_cpu()); in bnx2fc_post_io_req()
|
/linux-4.4.14/net/8021q/ |
D | vlan_dev.c | 666 p = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i); in vlan_dev_get_stats64()
|
/linux-4.4.14/drivers/net/ipvlan/ |
D | ipvlan_main.c | 259 pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx); in ipvlan_get_stats64()
|
/linux-4.4.14/drivers/edac/ |
D | amd64_edac.c | 2514 struct msr *reg = per_cpu_ptr(msrs, cpu); in nb_mce_bank_enabled_on_node() 2547 struct msr *reg = per_cpu_ptr(msrs, cpu); in toggle_ecc_err_reporting()
|
/linux-4.4.14/drivers/md/ |
D | dm-stats.c | 203 last = per_cpu_ptr(stats->last, cpu); in dm_stats_init()
|
D | raid5.c | 1924 percpu = per_cpu_ptr(conf->percpu, cpu); in raid_run_ops() 2099 percpu = per_cpu_ptr(conf->percpu, cpu); in resize_chunks() 6343 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_free_percpu() 6370 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); in raid456_cpu_notify() 6383 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid456_cpu_notify() 6411 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); in raid5_alloc_percpu()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_init.c | 1093 int_counter += *per_cpu_ptr(dd->int_counter, cpu); in qib_int_counter()
|
D | qib_mad.c | 1647 p = per_cpu_ptr(ibp->pmastats, cpu); in qib_snapshot_pmacounters()
|
/linux-4.4.14/net/mac80211/ |
D | iface.c | 1121 tstats = per_cpu_ptr(dev->tstats, i); in ieee80211_get_stats64()
|
/linux-4.4.14/fs/gfs2/ |
D | glock.c | 1739 const struct gfs2_pcpu_lkstats *lkstats = per_cpu_ptr(sdp->sd_lkstats, i); in gfs2_sbstats_seq_show()
|
D | rgrp.c | 1875 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP]; in gfs2_rgrp_congested()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb/ |
D | sge.c | 984 struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu); in t1_sge_get_port_stats()
|
/linux-4.4.14/fs/nfs/ |
D | super.c | 863 stats = per_cpu_ptr(nfss->io_stats, cpu); in nfs_show_stats()
|
/linux-4.4.14/arch/x86/kvm/ |
D | x86.c | 218 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in shared_msr_update() 251 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in kvm_set_shared_msr() 273 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); in drop_user_return_notifiers()
|
/linux-4.4.14/net/packet/ |
D | af_packet.c | 1213 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu); in packet_read_pending()
|
/linux-4.4.14/drivers/scsi/ |
D | hpsa.c | 2665 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in lockup_detected() 8075 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); in set_lockup_detected_for_all_cpus()
|
/linux-4.4.14/fs/ext4/ |
D | mballoc.c | 2663 lg = per_cpu_ptr(sbi->s_locality_groups, i); in ext4_mb_init()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | chip.c | 1461 counter += *per_cpu_ptr(cntr, cpu); in get_all_cpu_total()
|