Home
last modified time | relevance | path

Searched refs:__this_cpu_read (Results 1 – 114 of 114) sorted by relevance

/linux-4.4.14/arch/mips/kernel/
Dmips-r2-to-r6-emul.c2237 (unsigned long)__this_cpu_read(mipsr2emustats.movs), in mipsr2_stats_show()
2238 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); in mipsr2_stats_show()
2240 (unsigned long)__this_cpu_read(mipsr2emustats.hilo), in mipsr2_stats_show()
2241 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); in mipsr2_stats_show()
2243 (unsigned long)__this_cpu_read(mipsr2emustats.muls), in mipsr2_stats_show()
2244 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); in mipsr2_stats_show()
2246 (unsigned long)__this_cpu_read(mipsr2emustats.divs), in mipsr2_stats_show()
2247 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); in mipsr2_stats_show()
2249 (unsigned long)__this_cpu_read(mipsr2emustats.dsps), in mipsr2_stats_show()
2250 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); in mipsr2_stats_show()
[all …]
Dsmp-bmips.c331 action = __this_cpu_read(ipi_action_mask); in bmips43xx_ipi_interrupt()
Dkprobes.c388 p = __this_cpu_read(current_kprobe); in kprobe_handler()
/linux-4.4.14/kernel/
Dcontext_tracking.c69 if ( __this_cpu_read(context_tracking.state) != state) { in __context_tracking_enter()
70 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_enter()
149 if (__this_cpu_read(context_tracking.state) == state) { in __context_tracking_exit()
150 if (__this_cpu_read(context_tracking.active)) { in __context_tracking_exit()
Dwatchdog.c278 unsigned long hrint = __this_cpu_read(hrtimer_interrupts); in is_hardlockup()
280 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) in is_hardlockup()
318 if (__this_cpu_read(watchdog_nmi_touch) == true) { in watchdog_overflow_callback()
334 if (__this_cpu_read(hard_watchdog_warn) == true) in watchdog_overflow_callback()
379 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); in watchdog_timer_fn()
388 wake_up_process(__this_cpu_read(softlockup_watchdog)); in watchdog_timer_fn()
394 if (unlikely(__this_cpu_read(softlockup_touch_sync))) { in watchdog_timer_fn()
426 if (__this_cpu_read(soft_watchdog_warn) == true) { in watchdog_timer_fn()
435 if (__this_cpu_read(softlockup_task_ptr_saved) != in watchdog_timer_fn()
530 return __this_cpu_read(hrtimer_interrupts) != in watchdog_should_run()
[all …]
Dsoftirq.c74 struct task_struct *tsk = __this_cpu_read(ksoftirqd); in wakeup_softirqd()
455 *__this_cpu_read(tasklet_vec.tail) = t; in __tasklet_schedule()
468 *__this_cpu_read(tasklet_hi_vec.tail) = t; in __tasklet_hi_schedule()
479 t->next = __this_cpu_read(tasklet_hi_vec.head); in __tasklet_hi_schedule_first()
490 list = __this_cpu_read(tasklet_vec.head); in tasklet_action()
514 *__this_cpu_read(tasklet_vec.tail) = t; in tasklet_action()
526 list = __this_cpu_read(tasklet_hi_vec.head); in tasklet_hi_action()
550 *__this_cpu_read(tasklet_hi_vec.tail) = t; in tasklet_hi_action()
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets()
718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; in takeover_tasklets()
Dpadata.c206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { in padata_get_next()
Dkprobes.c1035 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_fault_handler()
1051 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_break_handler()
/linux-4.4.14/arch/sparc/kernel/
Dnmi.c103 if (__this_cpu_read(nmi_touch)) { in perfctr_irq()
107 if (!touched && __this_cpu_read(last_irq_sum) == sum) { in perfctr_irq()
109 if (__this_cpu_read(alert_counter) == 30 * nmi_hz) in perfctr_irq()
116 if (__this_cpu_read(wd_enabled)) { in perfctr_irq()
221 if (!__this_cpu_read(wd_enabled)) in nmi_adjust_hz_one()
Dkprobes.c158 p = __this_cpu_read(current_kprobe); in kprobe_handler()
/linux-4.4.14/kernel/time/
Dtick-oneshot.c29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_program_event()
55 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_resume_oneshot()
116 ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; in tick_oneshot_mode_active()
Dtick-common.c67 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_is_oneshot_available()
Dtick-sched.c410 return __this_cpu_read(tick_cpu_sched.tick_stopped); in tick_nohz_tick_stopped()
574 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_nohz_stop_sched_tick()
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
Dmce_intel.c126 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) in mce_intel_cmci_poll()
173 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { in cmci_intel_adjust_timer()
178 switch (__this_cpu_read(cmci_storm_state)) { in cmci_intel_adjust_timer()
212 unsigned int cnt = __this_cpu_read(cmci_storm_cnt); in cmci_storm_detect()
213 unsigned long ts = __this_cpu_read(cmci_time_stamp); in cmci_storm_detect()
217 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE) in cmci_storm_detect()
Dmce.c356 unsigned bank = __this_cpu_read(injectm.bank); in msr_to_offset()
376 if (__this_cpu_read(injectm.finished)) { in mce_rdmsrl()
399 if (__this_cpu_read(injectm.finished)) { in mce_wrmsrl()
1274 iv = __this_cpu_read(mce_next_interval); in mce_timer_fn()
1305 unsigned long iv = __this_cpu_read(mce_next_interval); in mce_timer_kick()
/linux-4.4.14/include/asm-generic/
Dirq_regs.h25 return __this_cpu_read(__irq_regs); in get_irq_regs()
32 old_regs = __this_cpu_read(__irq_regs); in set_irq_regs()
/linux-4.4.14/include/linux/
Dcontext_tracking_state.h35 return __this_cpu_read(context_tracking.active); in context_tracking_cpu_is_enabled()
40 return __this_cpu_read(context_tracking.state) == CONTEXT_USER; in context_tracking_in_user()
Dhighmem.h104 return __this_cpu_read(__kmap_atomic_idx) - 1; in kmap_atomic_idx()
Dkprobes.h353 return (__this_cpu_read(current_kprobe)); in kprobe_running()
Dpercpu-defs.h430 #define __this_cpu_read(pcp) \ macro
/linux-4.4.14/lib/
Dpercpu_test.c10 WARN(__this_cpu_read(pcp) != (expected), \
12 __this_cpu_read(pcp), __this_cpu_read(pcp), \
Dpercpu_counter.c80 count = __this_cpu_read(*fbc->counters) + amount; in __percpu_counter_add()
Didr.c119 new = __this_cpu_read(idr_preload_head); in idr_layer_alloc()
413 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { in idr_preload()
423 new->ary[0] = __this_cpu_read(idr_preload_head); in idr_preload()
Diommu-common.c104 unsigned int pool_hash = __this_cpu_read(iommu_hash_common); in iommu_tbl_range_alloc()
/linux-4.4.14/arch/x86/oprofile/
Dop_model_ppro.c87 __this_cpu_read(cpu_info.x86) == 6 && in ppro_setup_ctrs()
88 __this_cpu_read(cpu_info.x86_model) == 15)) { in ppro_setup_ctrs()
215 if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && in arch_perfmon_setup_counters()
216 __this_cpu_read(cpu_info.x86_model) == 15) { in arch_perfmon_setup_counters()
Dnmi_int.c140 return __this_cpu_read(switch_index) + phys; in op_x86_phys_to_virt()
/linux-4.4.14/arch/x86/kernel/
Dhw_breakpoint.c419 set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); in hw_breakpoint_restore()
420 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); in hw_breakpoint_restore()
421 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); in hw_breakpoint_restore()
422 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); in hw_breakpoint_restore()
424 set_debugreg(__this_cpu_read(cpu_dr7), 7); in hw_breakpoint_restore()
Dirq.c238 desc = __this_cpu_read(vector_irq[vector]); in do_IRQ()
361 desc = __this_cpu_read(vector_irq[vector]); in check_irq_vectors_for_cpu_disable()
530 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector]))) in fixup_irqs()
535 desc = __this_cpu_read(vector_irq[vector]); in fixup_irqs()
546 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) in fixup_irqs()
Dirq_32.c77 irqstk = __this_cpu_read(hardirq_stack); in execute_on_irq_stack()
138 irqstk = __this_cpu_read(softirq_stack); in do_softirq_own_stack()
Dirq_64.c51 irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); in stack_overflow_check()
Dnmi.c328 if (regs->ip == __this_cpu_read(last_nmi_rip)) in default_do_nmi()
403 if (b2b && __this_cpu_read(swallow_nmi)) in default_do_nmi()
Dkvm.c247 if (__this_cpu_read(apf_reason.enabled)) { in kvm_read_and_reset_pf_reason()
248 reason = __this_cpu_read(apf_reason.reason); in kvm_read_and_reset_pf_reason()
367 if (!__this_cpu_read(apf_reason.enabled)) in kvm_pv_disable_apf()
Dsmpboot.c1458 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) in mwait_play_dead()
1516 if (__this_cpu_read(cpu_info.x86) >= 4) in hlt_play_dead()
/linux-4.4.14/arch/tile/kernel/
Dirq.c133 unmask_irqs(~__this_cpu_read(irq_disable_mask)); in tile_dev_intr()
153 if (__this_cpu_read(irq_depth) == 0) in tile_irq_chip_enable()
199 if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq))) in tile_irq_chip_eoi()
Dkprobes.c230 p = __this_cpu_read(current_kprobe); in kprobe_handler()
/linux-4.4.14/arch/tile/include/asm/
Dmmu_context.h87 install_page_table(mm->pgd, __this_cpu_read(current_asid)); in enter_lazy_tlb()
99 int asid = __this_cpu_read(current_asid) + 1; in switch_mm()
/linux-4.4.14/arch/powerpc/include/asm/
Dcputime.h61 __this_cpu_read(cputime_last_delta)) in cputime_to_scaled()
63 __this_cpu_read(cputime_scaled_last_delta) / in cputime_to_scaled()
64 __this_cpu_read(cputime_last_delta); in cputime_to_scaled()
Dhardirq.h24 #define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
/linux-4.4.14/drivers/xen/events/
Devents_2l.c120 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in evtchn_2l_unmask()
170 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in evtchn_2l_handle_events()
189 start_word_idx = __this_cpu_read(current_word_idx); in evtchn_2l_handle_events()
190 start_bit_idx = __this_cpu_read(current_bit_idx); in evtchn_2l_handle_events()
Devents_base.c1229 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in __xen_evtchn_do_upcall()
1243 count = __this_cpu_read(xed_nesting_count); in __xen_evtchn_do_upcall()
/linux-4.4.14/arch/s390/kernel/
Dvtime.c132 u64 mult = __this_cpu_read(mt_scaling_mult); in do_account_vtime()
133 u64 div = __this_cpu_read(mt_scaling_div); in do_account_vtime()
198 u64 mult = __this_cpu_read(mt_scaling_mult); in vtime_account_irq_enter()
199 u64 div = __this_cpu_read(mt_scaling_div); in vtime_account_irq_enter()
Dkprobes.c252 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); in push_kprobe()
347 p = __this_cpu_read(current_kprobe); in kprobe_handler()
/linux-4.4.14/arch/arm64/kernel/
Dpsci.c183 u32 *state = __this_cpu_read(psci_power_state); in psci_suspend_finisher()
192 u32 *state = __this_cpu_read(psci_power_state); in cpu_psci_cpu_suspend()
Dfpsimd.c148 if (__this_cpu_read(fpsimd_last_state) == st in fpsimd_thread_switch()
/linux-4.4.14/drivers/irqchip/
Dirq-xtensa-mx.c77 mask = __this_cpu_read(cached_irq_mask) & ~mask; in xtensa_mx_irq_mask()
92 mask |= __this_cpu_read(cached_irq_mask); in xtensa_mx_irq_unmask()
/linux-4.4.14/arch/powerpc/kernel/
Dmce.c146 int index = __this_cpu_read(mce_nest_count) - 1; in get_mce_event()
211 while (__this_cpu_read(mce_queue_count) > 0) { in machine_check_process_queued_event()
212 index = __this_cpu_read(mce_queue_count) - 1; in machine_check_process_queued_event()
Dhw_breakpoint.c229 bp = __this_cpu_read(bp_per_reg); in hw_breakpoint_handler()
Dkprobes.c195 p = __this_cpu_read(current_kprobe); in kprobe_handler()
Dtime.c464 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
Dsysfs.c397 if (__this_cpu_read(pmcs_enabled)) in ppc_enable_pmcs()
Diommu.c211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
/linux-4.4.14/drivers/xen/
Dpreempt.c33 if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) in xen_maybe_preempt_hcall()
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event_intel_rapl.c216 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_hrtimer_handle()
263 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_start()
273 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_stop()
307 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_add()
777 pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_init()
Dcommon.c1226 return __this_cpu_read(debug_stack_usage) || in is_debug_stack()
1227 (addr <= __this_cpu_read(debug_stack_addr) && in is_debug_stack()
1228 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); in is_debug_stack()
Dintel_cacheinfo.c260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); in amd_cpuid4()
282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; in amd_cpuid4()
Dperf_event.c1157 if (__this_cpu_read(cpu_hw_events.enabled)) in x86_pmu_enable_event()
1804 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); in x86_pmu_cancel_txn()
1805 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); in x86_pmu_cancel_txn()
Dperf_event.h739 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); in __x86_pmu_enable_event()
Dperf_event_intel_ds.c1357 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); in perf_restore_debug_store()
Dperf_event_intel.c1699 if (!__this_cpu_read(cpu_hw_events.enabled)) in intel_pmu_enable_event()
1754 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); in intel_pmu_reset()
/linux-4.4.14/arch/ia64/include/asm/sn/
Dnodepda.h73 #define sn_nodepda __this_cpu_read(__sn_nodepda)
/linux-4.4.14/arch/x86/include/asm/
Ddebugreg.h89 return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; in hw_breakpoint_active()
/linux-4.4.14/drivers/clocksource/
Dnumachip.c66 unsigned local_apicid = __this_cpu_read(x86_cpu_to_apicid) & 0xff; in numachip_timer_each()
/linux-4.4.14/arch/x86/xen/
Dspinlock.c38 int irq = __this_cpu_read(lock_kicker_irq); in xen_qlock_wait()
158 int irq = __this_cpu_read(lock_kicker_irq); in xen_lock_spinning()
Dtime.c136 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); in do_stolen_accounting()
161 src = &__this_cpu_read(xen_vcpu)->time; in xen_clocksource_read()
Denlighten.c833 start = __this_cpu_read(idt_desc.address); in xen_write_idt_entry()
834 end = start + __this_cpu_read(idt_desc.size) + 1; in xen_write_idt_entry()
/linux-4.4.14/arch/ia64/include/asm/
Dswitch_to.h35 # define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
Dhw_irq.h154 return __this_cpu_read(vector_irq[vec]); in __ia64_local_vector_to_irq()
/linux-4.4.14/mm/
Dvmstat.c229 x = delta + __this_cpu_read(*p); in __mod_zone_page_state()
231 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state()
271 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state()
293 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state()
496 if (!__this_cpu_read(p->expire) || in refresh_cpu_vm_stats()
497 !__this_cpu_read(p->pcp.count)) in refresh_cpu_vm_stats()
511 if (__this_cpu_read(p->pcp.count)) { in refresh_cpu_vm_stats()
Dslab.c624 int node = __this_cpu_read(slab_reap_node); in next_reap_node()
947 int node = __this_cpu_read(slab_reap_node); in reap_alien()
Dmemcontrol.c767 val = __this_cpu_read(memcg->stat->nr_page_events); in mem_cgroup_event_ratelimit()
768 next = __this_cpu_read(memcg->stat->targets[target]); in mem_cgroup_event_ratelimit()
Dslub.c1829 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
/linux-4.4.14/kernel/rcu/
Dtree_plugin.h268 if (__this_cpu_read(rcu_data_p->cpu_no_qs.s)) { in rcu_preempt_qs()
270 __this_cpu_read(rcu_data_p->gpnum), in rcu_preempt_qs()
643 __this_cpu_read(rcu_data_p->core_needs_qs) && in rcu_preempt_check_callbacks()
644 __this_cpu_read(rcu_data_p->cpu_no_qs.b.norm)) in rcu_preempt_check_callbacks()
1126 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && in invoke_rcu_callbacks_kthread()
1127 current != __this_cpu_read(rcu_cpu_kthread_task)) { in invoke_rcu_callbacks_kthread()
1128 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), in invoke_rcu_callbacks_kthread()
1129 __this_cpu_read(rcu_cpu_kthread_status)); in invoke_rcu_callbacks_kthread()
1140 return __this_cpu_read(rcu_cpu_kthread_task) == current; in rcu_is_callbacks_kthread()
1211 return __this_cpu_read(rcu_cpu_has_work); in rcu_cpu_kthread_should_run()
Dtree.c251 if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) { in rcu_sched_qs()
253 __this_cpu_read(rcu_sched_data.gpnum), in rcu_sched_qs()
256 if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) in rcu_sched_qs()
259 if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) { in rcu_sched_qs()
271 if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { in rcu_bh_qs()
273 __this_cpu_read(rcu_bh_data.gpnum), in rcu_bh_qs()
1059 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; in rcu_is_cpu_rrupt_from_idle()
1770 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); in __note_gp_changes()
2361 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || in rcu_report_qs_rdp()
2372 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); in rcu_report_qs_rdp()
[all …]
/linux-4.4.14/arch/sh/kernel/
Dkprobes.c252 p = __this_cpu_read(current_kprobe); in kprobe_handler()
392 addr = __this_cpu_read(saved_current_opcode.addr); in post_kprobe_handler()
514 p = __this_cpu_read(current_kprobe); in kprobe_exceptions_notify()
/linux-4.4.14/arch/x86/kernel/acpi/
Dsleep.c81 if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { in x86_acpi_suspend_lowlevel()
/linux-4.4.14/arch/ia64/kernel/
Dirq.c45 return __this_cpu_read(vector_irq[vec]); in __ia64_local_vector_to_irq()
Dprocess.c276 info = __this_cpu_read(pfm_syst_info); in ia64_save_extra()
296 info = __this_cpu_read(pfm_syst_info); in ia64_load_extra()
Dirq_ia64.c331 irq = __this_cpu_read(vector_irq[vector]); in smp_irq_move_cleanup_interrupt()
Dkprobes.c826 p = __this_cpu_read(current_kprobe); in pre_kprobes_handler()
Dmca.c1344 if (__this_cpu_read(ia64_mca_tr_reload)) { in ia64_mca_handler()
/linux-4.4.14/arch/powerpc/perf/
Dhv-24x7.c1247 txn_flags = __this_cpu_read(hv_24x7_txn_flags); in h_24x7_event_read()
1260 if (__this_cpu_read(hv_24x7_txn_err)) in h_24x7_event_read()
1318 WARN_ON_ONCE(__this_cpu_read(hv_24x7_txn_flags)); in h_24x7_event_start_txn()
1364 txn_flags = __this_cpu_read(hv_24x7_txn_flags); in h_24x7_event_commit_txn()
1371 ret = __this_cpu_read(hv_24x7_txn_err); in h_24x7_event_commit_txn()
1413 WARN_ON_ONCE(!__this_cpu_read(hv_24x7_txn_flags)); in h_24x7_event_cancel_txn()
/linux-4.4.14/kernel/sched/
Didle.c135 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); in cpuidle_idle_call()
Dcputime.c59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); in irqtime_account_irq()
/linux-4.4.14/arch/arm64/kvm/
Ddebug.c113 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; in kvm_arm_setup_debug()
/linux-4.4.14/arch/x86/kernel/apic/
Dapic_numachip.c103 local_apicid = __this_cpu_read(x86_cpu_to_apicid); in numachip_send_IPI_one()
Dvector.c576 desc = __this_cpu_read(vector_irq[vector]); in smp_irq_move_cleanup_interrupt()
Dx2apic_uv_x.c342 id = x | __this_cpu_read(x2apic_extra_bits); in x2apic_get_apic_id()
/linux-4.4.14/arch/alpha/kernel/
Dtime.c60 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
/linux-4.4.14/arch/tile/mm/
Dinit.c592 __this_cpu_read(current_asid), in kernel_physical_mapping_init()
599 __install_page_table(pgd_base, __this_cpu_read(current_asid), in kernel_physical_mapping_init()
/linux-4.4.14/drivers/staging/media/lirc/
Dlirc_serial.c330 loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy); in init_timing_params()
348 freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy), in init_timing_params()
/linux-4.4.14/drivers/acpi/
Dprocessor_idle.c792 pr = __this_cpu_read(processors); in acpi_idle_enter()
832 struct acpi_processor *pr = __this_cpu_read(processors); in acpi_idle_enter_freeze()
/linux-4.4.14/arch/arm/kvm/
Darm.c77 return __this_cpu_read(kvm_arm_running_vcpu); in kvm_arm_get_running_vcpu()
970 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); in cpu_init_hyp_mode()
/linux-4.4.14/include/linux/netfilter/
Dx_tables.h316 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; in xt_write_recseq_begin()
/linux-4.4.14/net/xfrm/
Dxfrm_ipcomp.c286 tfm = __this_cpu_read(*pos->tfms); in ipcomp_alloc_tfms()
/linux-4.4.14/arch/arc/kernel/
Dkprobes.c240 p = __this_cpu_read(current_kprobe); in arc_kprobe_handler()
/linux-4.4.14/drivers/cpuidle/
Dcpuidle.c60 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); in cpuidle_play_dead()
/linux-4.4.14/drivers/lguest/x86/
Dcore.c93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info()
/linux-4.4.14/net/rds/
Dib_recv.c472 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put()
481 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) in rds_ib_recv_cache_put()
/linux-4.4.14/arch/ia64/sn/kernel/sn2/
Dsn2_smp.c303 if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max)) in sn2_global_tlb_purge()
/linux-4.4.14/arch/powerpc/kvm/
De500mc.c147 __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { in kvmppc_core_vcpu_load_e500mc()
De500.c111 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && in local_sid_lookup()
/linux-4.4.14/fs/
Dbuffer.c1294 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { in bh_lru_install()
1303 __this_cpu_read(bh_lrus.bhs[in]); in bh_lru_install()
1338 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru()
1345 __this_cpu_read(bh_lrus.bhs[i - 1])); in lookup_bh_lru()
/linux-4.4.14/arch/powerpc/platforms/pseries/
Diommu.c272 tcep = __this_cpu_read(tce_page); in tce_buildmulti_pSeriesLP()
474 tcep = __this_cpu_read(tce_page); in tce_setrange_multi_pSeriesLP()
/linux-4.4.14/arch/x86/kernel/kprobes/
Dcore.c657 p = __this_cpu_read(current_kprobe); in kprobe_int3_handler()
/linux-4.4.14/kernel/events/
Dcore.c257 local_samples_len = __this_cpu_read(running_sample_length); in perf_duration_warn()
279 local_samples_len = __this_cpu_read(running_sample_length); in perf_sample_event_took()
2681 if (__this_cpu_read(perf_sched_cb_usages)) in __perf_event_task_sched_out()
2880 if (__this_cpu_read(perf_sched_cb_usages)) in __perf_event_task_sched_in()
3117 __this_cpu_read(perf_throttled_count)) in perf_event_can_stop_tick()
6412 seq = __this_cpu_read(perf_throttled_seq); in __perf_event_overflow()
7423 unsigned int flags = __this_cpu_read(nop_txn_flags); in perf_pmu_commit_txn()
7436 unsigned int flags = __this_cpu_read(nop_txn_flags); in perf_pmu_cancel_txn()
/linux-4.4.14/arch/mips/cavium-octeon/
Docteon-irq.c1254 ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); in octeon_irq_ip2_ciu()
1271 ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); in octeon_irq_ip3_ciu()
/linux-4.4.14/Documentation/
Dthis_cpu_ops.txt227 __this_cpu_read(pcp)
/linux-4.4.14/drivers/cpufreq/
Dpowernow-k8.c1135 struct powernow_k8_data *data = __this_cpu_read(powernow_data); in query_values_on_cpu()
/linux-4.4.14/arch/x86/kvm/
Dx86.c1736 this_tsc_khz = __this_cpu_read(cpu_tsc_khz); in kvm_guest_time_update()
5652 return __this_cpu_read(current_vcpu) != NULL; in kvm_is_in_guest()
5659 if (__this_cpu_read(current_vcpu)) in kvm_is_user_mode()
5660 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode()
5669 if (__this_cpu_read(current_vcpu)) in kvm_get_guest_ip()
5670 ip = kvm_rip_read(__this_cpu_read(current_vcpu)); in kvm_get_guest_ip()
Dsvm.c1236 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) { in svm_vcpu_load()
/linux-4.4.14/net/ipv4/netfilter/
Dip_tables.c345 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); in ipt_do_table()
/linux-4.4.14/net/ipv6/netfilter/
Dip6_tables.c371 jumpstack += private->stacksize * __this_cpu_read(nf_skb_duplicated); in ip6t_do_table()
/linux-4.4.14/net/core/
Ddev.c2326 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq()
3141 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) in __dev_queue_xmit()
/linux-4.4.14/kernel/trace/
Dtrace.c1641 if (!__this_cpu_read(trace_cmdline_save)) in tracing_record_cmdline()
1943 if (__this_cpu_read(user_stack_count)) in ftrace_trace_userstack()