/linux-4.1.27/kernel/ |
D | watchdog.c | 241 unsigned long hrint = __this_cpu_read(hrtimer_interrupts); in is_hardlockup() 243 if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) in is_hardlockup() 281 if (__this_cpu_read(watchdog_nmi_touch) == true) { in watchdog_overflow_callback() 296 if (__this_cpu_read(hard_watchdog_warn) == true) in watchdog_overflow_callback() 326 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); in watchdog_timer_fn() 335 wake_up_process(__this_cpu_read(softlockup_watchdog)); in watchdog_timer_fn() 341 if (unlikely(__this_cpu_read(softlockup_touch_sync))) { in watchdog_timer_fn() 373 if (__this_cpu_read(soft_watchdog_warn) == true) { in watchdog_timer_fn() 382 if (__this_cpu_read(softlockup_task_ptr_saved) != in watchdog_timer_fn() 477 return __this_cpu_read(hrtimer_interrupts) != in watchdog_should_run() [all …]
|
D | context_tracking.c | 78 if ( __this_cpu_read(context_tracking.state) != state) { in context_tracking_enter() 79 if (__this_cpu_read(context_tracking.active)) { in context_tracking_enter() 142 if (__this_cpu_read(context_tracking.state) == state) { in context_tracking_exit() 143 if (__this_cpu_read(context_tracking.active)) { in context_tracking_exit()
|
D | softirq.c | 74 struct task_struct *tsk = __this_cpu_read(ksoftirqd); in wakeup_softirqd() 455 *__this_cpu_read(tasklet_vec.tail) = t; in __tasklet_schedule() 468 *__this_cpu_read(tasklet_hi_vec.tail) = t; in __tasklet_hi_schedule() 479 t->next = __this_cpu_read(tasklet_hi_vec.head); in __tasklet_hi_schedule_first() 490 list = __this_cpu_read(tasklet_vec.head); in tasklet_action() 514 *__this_cpu_read(tasklet_vec.tail) = t; in tasklet_action() 526 list = __this_cpu_read(tasklet_hi_vec.head); in tasklet_hi_action() 550 *__this_cpu_read(tasklet_hi_vec.tail) = t; in tasklet_hi_action() 710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; in takeover_tasklets() 718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; in takeover_tasklets()
|
D | padata.c | 206 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) { in padata_get_next()
|
D | kprobes.c | 1035 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_fault_handler() 1051 struct kprobe *cur = __this_cpu_read(kprobe_instance); in aggr_break_handler()
|
/linux-4.1.27/arch/mips/kernel/ |
D | mips-r2-to-r6-emul.c | 2236 (unsigned long)__this_cpu_read(mipsr2emustats.movs), in mipsr2_stats_show() 2237 (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); in mipsr2_stats_show() 2239 (unsigned long)__this_cpu_read(mipsr2emustats.hilo), in mipsr2_stats_show() 2240 (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); in mipsr2_stats_show() 2242 (unsigned long)__this_cpu_read(mipsr2emustats.muls), in mipsr2_stats_show() 2243 (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); in mipsr2_stats_show() 2245 (unsigned long)__this_cpu_read(mipsr2emustats.divs), in mipsr2_stats_show() 2246 (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); in mipsr2_stats_show() 2248 (unsigned long)__this_cpu_read(mipsr2emustats.dsps), in mipsr2_stats_show() 2249 (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); in mipsr2_stats_show() [all …]
|
D | smp-bmips.c | 331 action = __this_cpu_read(ipi_action_mask); in bmips43xx_ipi_interrupt()
|
D | kprobes.c | 388 p = __this_cpu_read(current_kprobe); in kprobe_handler()
|
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/ |
D | mce_intel.c | 96 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) in mce_intel_cmci_poll() 143 (__this_cpu_read(cmci_storm_state) == CMCI_STORM_ACTIVE)) { in cmci_intel_adjust_timer() 148 switch (__this_cpu_read(cmci_storm_state)) { in cmci_intel_adjust_timer() 182 unsigned int cnt = __this_cpu_read(cmci_storm_cnt); in cmci_storm_detect() 183 unsigned long ts = __this_cpu_read(cmci_time_stamp); in cmci_storm_detect() 187 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE) in cmci_storm_detect()
|
D | mce.c | 375 unsigned bank = __this_cpu_read(injectm.bank); in msr_to_offset() 395 if (__this_cpu_read(injectm.finished)) { in mce_rdmsrl() 418 if (__this_cpu_read(injectm.finished)) { in mce_wrmsrl() 1305 iv = __this_cpu_read(mce_next_interval); in mce_timer_fn() 1336 unsigned long iv = __this_cpu_read(mce_next_interval); in mce_timer_kick()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | nmi.c | 103 if (__this_cpu_read(nmi_touch)) { in perfctr_irq() 107 if (!touched && __this_cpu_read(last_irq_sum) == sum) { in perfctr_irq() 109 if (__this_cpu_read(alert_counter) == 30 * nmi_hz) in perfctr_irq() 116 if (__this_cpu_read(wd_enabled)) { in perfctr_irq() 221 if (!__this_cpu_read(wd_enabled)) in nmi_adjust_hz_one()
|
D | kprobes.c | 158 p = __this_cpu_read(current_kprobe); in kprobe_handler()
|
/linux-4.1.27/kernel/time/ |
D | tick-oneshot.c | 29 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_program_event() 39 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_resume_oneshot() 100 ret = __this_cpu_read(tick_cpu_device.mode) == TICKDEV_MODE_ONESHOT; in tick_oneshot_mode_active()
|
D | tick-common.c | 66 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_is_oneshot_available()
|
D | tick-sched.c | 421 return __this_cpu_read(tick_cpu_sched.tick_stopped); in tick_nohz_tick_stopped() 574 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_nohz_stop_sched_tick()
|
D | timer.c | 1360 struct tvec_base *base = __this_cpu_read(tvec_bases); in get_next_timer_interrupt() 1410 struct tvec_base *base = __this_cpu_read(tvec_bases); in run_timer_softirq()
|
D | hrtimer.c | 513 return __this_cpu_read(hrtimer_bases.hres_active); in hrtimer_hres_active()
|
/linux-4.1.27/include/asm-generic/ |
D | irq_regs.h | 25 return __this_cpu_read(__irq_regs); in get_irq_regs() 32 old_regs = __this_cpu_read(__irq_regs); in set_irq_regs()
|
/linux-4.1.27/include/linux/ |
D | context_tracking_state.h | 33 return __this_cpu_read(context_tracking.active); in context_tracking_cpu_is_enabled() 38 return __this_cpu_read(context_tracking.state) == CONTEXT_USER; in context_tracking_in_user()
|
D | highmem.h | 103 return __this_cpu_read(__kmap_atomic_idx) - 1; in kmap_atomic_idx()
|
D | kprobes.h | 351 return (__this_cpu_read(current_kprobe)); in kprobe_running()
|
D | percpu-defs.h | 430 #define __this_cpu_read(pcp) \ macro
|
/linux-4.1.27/lib/ |
D | percpu_test.c | 10 WARN(__this_cpu_read(pcp) != (expected), \ 12 __this_cpu_read(pcp), __this_cpu_read(pcp), \
|
D | percpu_counter.c | 80 count = __this_cpu_read(*fbc->counters) + amount; in __percpu_counter_add()
|
D | idr.c | 119 new = __this_cpu_read(idr_preload_head); in idr_layer_alloc() 413 while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { in idr_preload() 423 new->ary[0] = __this_cpu_read(idr_preload_head); in idr_preload()
|
D | iommu-common.c | 108 unsigned int pool_hash = __this_cpu_read(iommu_hash_common); in iommu_tbl_range_alloc()
|
/linux-4.1.27/arch/x86/oprofile/ |
D | op_model_ppro.c | 87 __this_cpu_read(cpu_info.x86) == 6 && in ppro_setup_ctrs() 88 __this_cpu_read(cpu_info.x86_model) == 15)) { in ppro_setup_ctrs() 215 if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 && in arch_perfmon_setup_counters() 216 __this_cpu_read(cpu_info.x86_model) == 15) { in arch_perfmon_setup_counters()
|
D | nmi_int.c | 140 return __this_cpu_read(switch_index) + phys; in op_x86_phys_to_virt()
|
/linux-4.1.27/arch/x86/kernel/ |
D | hw_breakpoint.c | 390 set_debugreg(__this_cpu_read(cpu_debugreg[0]), 0); in hw_breakpoint_restore() 391 set_debugreg(__this_cpu_read(cpu_debugreg[1]), 1); in hw_breakpoint_restore() 392 set_debugreg(__this_cpu_read(cpu_debugreg[2]), 2); in hw_breakpoint_restore() 393 set_debugreg(__this_cpu_read(cpu_debugreg[3]), 3); in hw_breakpoint_restore() 395 set_debugreg(__this_cpu_read(cpu_dr7), 7); in hw_breakpoint_restore()
|
D | irq.c | 198 irq = __this_cpu_read(vector_irq[vector]); in do_IRQ() 302 irq = __this_cpu_read(vector_irq[vector]); in check_irq_vectors_for_cpu_disable() 450 if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED) in fixup_irqs() 455 irq = __this_cpu_read(vector_irq[vector]); in fixup_irqs() 467 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED) in fixup_irqs()
|
D | irq_32.c | 84 irqstk = __this_cpu_read(hardirq_stack); in execute_on_irq_stack() 145 irqstk = __this_cpu_read(softirq_stack); in do_softirq_own_stack()
|
D | irq_64.c | 57 irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr); in stack_overflow_check()
|
D | nmi.c | 328 if (regs->ip == __this_cpu_read(last_nmi_rip)) in default_do_nmi() 403 if (b2b && __this_cpu_read(swallow_nmi)) in default_do_nmi()
|
D | kvm.c | 247 if (__this_cpu_read(apf_reason.enabled)) { in kvm_read_and_reset_pf_reason() 248 reason = __this_cpu_read(apf_reason.reason); in kvm_read_and_reset_pf_reason() 367 if (!__this_cpu_read(apf_reason.enabled)) in kvm_pv_disable_apf()
|
D | smpboot.c | 1416 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) in mwait_play_dead() 1474 if (__this_cpu_read(cpu_info.x86) >= 4) in hlt_play_dead()
|
/linux-4.1.27/arch/tile/kernel/ |
D | irq.c | 133 unmask_irqs(~__this_cpu_read(irq_disable_mask)); in tile_dev_intr() 153 if (__this_cpu_read(irq_depth) == 0) in tile_irq_chip_enable() 199 if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq))) in tile_irq_chip_eoi()
|
D | kprobes.c | 230 p = __this_cpu_read(current_kprobe); in kprobe_handler()
|
/linux-4.1.27/arch/tile/include/asm/ |
D | mmu_context.h | 87 install_page_table(mm->pgd, __this_cpu_read(current_asid)); in enter_lazy_tlb() 99 int asid = __this_cpu_read(current_asid) + 1; in switch_mm()
|
/linux-4.1.27/arch/powerpc/include/asm/ |
D | cputime.h | 61 __this_cpu_read(cputime_last_delta)) in cputime_to_scaled() 63 __this_cpu_read(cputime_scaled_last_delta) / in cputime_to_scaled() 64 __this_cpu_read(cputime_last_delta); in cputime_to_scaled()
|
D | hardirq.h | 24 #define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
|
/linux-4.1.27/drivers/xen/events/ |
D | events_2l.c | 120 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in evtchn_2l_unmask() 170 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in evtchn_2l_handle_events() 189 start_word_idx = __this_cpu_read(current_word_idx); in evtchn_2l_handle_events() 190 start_bit_idx = __this_cpu_read(current_bit_idx); in evtchn_2l_handle_events()
|
D | events_base.c | 1228 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); in __xen_evtchn_do_upcall() 1242 count = __this_cpu_read(xed_nesting_count); in __xen_evtchn_do_upcall()
|
/linux-4.1.27/arch/s390/kernel/ |
D | vtime.c | 122 u64 mult = __this_cpu_read(mt_scaling_mult); in do_account_vtime() 123 u64 div = __this_cpu_read(mt_scaling_div); in do_account_vtime() 183 u64 mult = __this_cpu_read(mt_scaling_mult); in vtime_account_irq_enter() 184 u64 div = __this_cpu_read(mt_scaling_div); in vtime_account_irq_enter()
|
D | kprobes.c | 252 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); in push_kprobe() 347 p = __this_cpu_read(current_kprobe); in kprobe_handler()
|
/linux-4.1.27/drivers/irqchip/ |
D | irq-xtensa-mx.c | 78 mask = __this_cpu_read(cached_irq_mask) & ~mask; in xtensa_mx_irq_mask() 93 mask |= __this_cpu_read(cached_irq_mask); in xtensa_mx_irq_unmask()
|
/linux-4.1.27/arch/powerpc/kernel/ |
D | mce.c | 146 int index = __this_cpu_read(mce_nest_count) - 1; in get_mce_event() 211 while (__this_cpu_read(mce_queue_count) > 0) { in machine_check_process_queued_event() 212 index = __this_cpu_read(mce_queue_count) - 1; in machine_check_process_queued_event()
|
D | hw_breakpoint.c | 229 bp = __this_cpu_read(bp_per_reg); in hw_breakpoint_handler()
|
D | kprobes.c | 195 p = __this_cpu_read(current_kprobe); in kprobe_handler()
|
D | time.c | 463 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
|
D | sysfs.c | 397 if (__this_cpu_read(pmcs_enabled)) in ppc_enable_pmcs()
|
D | iommu.c | 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
|
/linux-4.1.27/drivers/xen/ |
D | preempt.c | 33 if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) in xen_maybe_preempt_hcall()
|
/linux-4.1.27/arch/x86/kernel/cpu/ |
D | perf_event_intel_rapl.c | 219 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_hrtimer_handle() 266 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_start() 276 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_stop() 310 struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_event_add() 764 pmu = __this_cpu_read(rapl_pmu); in rapl_pmu_init()
|
D | common.c | 1247 return __this_cpu_read(debug_stack_usage) || in is_debug_stack() 1248 (addr <= __this_cpu_read(debug_stack_addr) && in is_debug_stack() 1249 addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ)); in is_debug_stack()
|
D | intel_cacheinfo.c | 260 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size); in amd_cpuid4() 282 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1; in amd_cpuid4()
|
D | perf_event.c | 1139 if (__this_cpu_read(cpu_hw_events.enabled)) in x86_pmu_enable_event() 1762 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn)); in x86_pmu_cancel_txn() 1763 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn)); in x86_pmu_cancel_txn()
|
D | perf_event.h | 721 u64 disable_mask = __this_cpu_read(cpu_hw_events.perf_ctr_virt_mask); in __x86_pmu_enable_event()
|
D | perf_event_intel_ds.c | 1110 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); in perf_restore_debug_store()
|
D | perf_event_intel.c | 1477 if (!__this_cpu_read(cpu_hw_events.enabled)) in intel_pmu_enable_event() 1532 struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); in intel_pmu_reset()
|
/linux-4.1.27/arch/ia64/include/asm/sn/ |
D | nodepda.h | 73 #define sn_nodepda __this_cpu_read(__sn_nodepda)
|
/linux-4.1.27/arch/x86/include/asm/ |
D | debugreg.h | 89 return __this_cpu_read(cpu_dr7) & DR_GLOBAL_ENABLE_MASK; in hw_breakpoint_active()
|
/linux-4.1.27/mm/ |
D | vmstat.c | 229 x = delta + __this_cpu_read(*p); in __mod_zone_page_state() 231 t = __this_cpu_read(pcp->stat_threshold); in __mod_zone_page_state() 271 t = __this_cpu_read(pcp->stat_threshold); in __inc_zone_state() 293 t = __this_cpu_read(pcp->stat_threshold); in __dec_zone_state() 496 if (!__this_cpu_read(p->expire) || in refresh_cpu_vm_stats() 497 !__this_cpu_read(p->pcp.count)) in refresh_cpu_vm_stats() 511 if (__this_cpu_read(p->pcp.count)) { in refresh_cpu_vm_stats()
|
D | slab.c | 623 int node = __this_cpu_read(slab_reap_node); in next_reap_node() 946 int node = __this_cpu_read(slab_reap_node); in reap_alien()
|
D | memcontrol.c | 905 val = __this_cpu_read(memcg->stat->nr_page_events); in mem_cgroup_event_ratelimit() 906 next = __this_cpu_read(memcg->stat->targets[target]); in mem_cgroup_event_ratelimit()
|
D | slub.c | 1786 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid); in note_cmpxchg_failure()
|
/linux-4.1.27/arch/ia64/include/asm/ |
D | switch_to.h | 35 # define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
|
D | hw_irq.h | 162 return __this_cpu_read(vector_irq[vec]); in __ia64_local_vector_to_irq()
|
/linux-4.1.27/net/netfilter/ |
D | xt_TEE.c | 91 if (__this_cpu_read(tee_active)) in tee_tg4() 172 if (__this_cpu_read(tee_active)) in tee_tg6()
|
/linux-4.1.27/kernel/rcu/ |
D | tree_plugin.h | 119 if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) { in rcu_preempt_qs() 121 __this_cpu_read(rcu_preempt_data.gpnum), in rcu_preempt_qs() 494 __this_cpu_read(rcu_preempt_data.qs_pending) && in rcu_preempt_check_callbacks() 495 !__this_cpu_read(rcu_preempt_data.passed_quiesce)) in rcu_preempt_check_callbacks() 1136 if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && in invoke_rcu_callbacks_kthread() 1137 current != __this_cpu_read(rcu_cpu_kthread_task)) { in invoke_rcu_callbacks_kthread() 1138 rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), in invoke_rcu_callbacks_kthread() 1139 __this_cpu_read(rcu_cpu_kthread_status)); in invoke_rcu_callbacks_kthread() 1150 return __this_cpu_read(rcu_cpu_kthread_task) == current; in rcu_is_callbacks_kthread() 1221 return __this_cpu_read(rcu_cpu_has_work); in rcu_cpu_kthread_should_run()
|
D | tree.c | 215 if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) { in rcu_sched_qs() 217 __this_cpu_read(rcu_sched_data.gpnum), in rcu_sched_qs() 225 if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) { in rcu_bh_qs() 227 __this_cpu_read(rcu_bh_data.gpnum), in rcu_bh_qs() 997 return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1; in rcu_is_cpu_rrupt_from_idle() 1704 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); in __note_gp_changes() 2269 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) || in rcu_report_qs_rdp() 2280 rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr); in rcu_report_qs_rdp() 2327 rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) in rcu_check_quiescent_state() 2794 rnp = __this_cpu_read(rsp->rda->mynode); in force_quiescent_state() [all …]
|
/linux-4.1.27/arch/sh/kernel/ |
D | kprobes.c | 252 p = __this_cpu_read(current_kprobe); in kprobe_handler() 392 addr = __this_cpu_read(saved_current_opcode.addr); in post_kprobe_handler() 514 p = __this_cpu_read(current_kprobe); in kprobe_exceptions_notify()
|
/linux-4.1.27/arch/x86/kernel/acpi/ |
D | sleep.c | 81 if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { in x86_acpi_suspend_lowlevel()
|
/linux-4.1.27/kernel/sched/ |
D | idle.c | 81 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); in cpuidle_idle_call()
|
D | cputime.c | 59 delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); in irqtime_account_irq()
|
/linux-4.1.27/arch/ia64/kernel/ |
D | irq.c | 45 return __this_cpu_read(vector_irq[vec]); in __ia64_local_vector_to_irq()
|
D | process.c | 276 info = __this_cpu_read(pfm_syst_info); in ia64_save_extra() 296 info = __this_cpu_read(pfm_syst_info); in ia64_load_extra()
|
D | irq_ia64.c | 331 irq = __this_cpu_read(vector_irq[vector]); in smp_irq_move_cleanup_interrupt()
|
D | kprobes.c | 826 p = __this_cpu_read(current_kprobe); in pre_kprobes_handler()
|
D | mca.c | 1344 if (__this_cpu_read(ia64_mca_tr_reload)) { in ia64_mca_handler()
|
/linux-4.1.27/arch/arm64/kernel/ |
D | psci.c | 529 struct psci_power_state *state = __this_cpu_read(psci_power_state); in psci_suspend_finisher() 538 struct psci_power_state *state = __this_cpu_read(psci_power_state); in cpu_psci_cpu_suspend()
|
D | fpsimd.c | 147 if (__this_cpu_read(fpsimd_last_state) == st in fpsimd_thread_switch()
|
/linux-4.1.27/arch/x86/xen/ |
D | time.c | 136 stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); in do_stolen_accounting() 161 src = &__this_cpu_read(xen_vcpu)->time; in xen_clocksource_read()
|
D | spinlock.c | 111 int irq = __this_cpu_read(lock_kicker_irq); in xen_lock_spinning()
|
D | enlighten.c | 831 start = __this_cpu_read(idt_desc.address); in xen_write_idt_entry() 832 end = start + __this_cpu_read(idt_desc.size) + 1; in xen_write_idt_entry()
|
/linux-4.1.27/arch/mips/loongson/loongson-3/ |
D | smp.c | 307 while (!__this_cpu_read(core0_c0count)) { in loongson3_init_secondary() 314 initcount = __this_cpu_read(core0_c0count) + i; in loongson3_init_secondary()
|
/linux-4.1.27/arch/alpha/kernel/ |
D | time.c | 60 #define test_irq_work_pending() __this_cpu_read(irq_work_pending)
|
/linux-4.1.27/arch/tile/mm/ |
D | init.c | 592 __this_cpu_read(current_asid), in kernel_physical_mapping_init() 599 __install_page_table(pgd_base, __this_cpu_read(current_asid), in kernel_physical_mapping_init()
|
/linux-4.1.27/arch/arm/kvm/ |
D | arm.c | 77 return __this_cpu_read(kvm_arm_running_vcpu); in kvm_arm_get_running_vcpu() 907 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page); in cpu_init_hyp_mode()
|
/linux-4.1.27/include/linux/netfilter/ |
D | x_tables.h | 303 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; in xt_write_recseq_begin()
|
/linux-4.1.27/drivers/staging/media/lirc/ |
D | lirc_serial.c | 341 loops_per_sec = __this_cpu_read(cpu.info.loops_per_jiffy); in init_timing_params() 362 freq, duty_cycle, __this_cpu_read(cpu_info.loops_per_jiffy), in init_timing_params()
|
/linux-4.1.27/drivers/acpi/ |
D | processor_idle.c | 796 pr = __this_cpu_read(processors); in acpi_idle_enter() 836 struct acpi_processor *pr = __this_cpu_read(processors); in acpi_idle_enter_freeze()
|
/linux-4.1.27/net/xfrm/ |
D | xfrm_ipcomp.c | 286 tfm = __this_cpu_read(*pos->tfms); in ipcomp_alloc_tfms()
|
/linux-4.1.27/drivers/cpuidle/ |
D | cpuidle.c | 60 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); in cpuidle_play_dead()
|
/linux-4.1.27/arch/arc/kernel/ |
D | kprobes.c | 240 p = __this_cpu_read(current_kprobe); in arc_kprobe_handler()
|
/linux-4.1.27/drivers/lguest/x86/ |
D | core.c | 93 if (__this_cpu_read(lg_last_cpu) != cpu || cpu->last_pages != pages) { in copy_in_guest_info()
|
/linux-4.1.27/net/rds/ |
D | ib_recv.c | 428 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put() 437 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) in rds_ib_recv_cache_put()
|
/linux-4.1.27/arch/ia64/sn/kernel/sn2/ |
D | sn2_smp.c | 303 if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max)) in sn2_global_tlb_purge()
|
/linux-4.1.27/arch/powerpc/kvm/ |
D | e500mc.c | 147 __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) { in kvmppc_core_vcpu_load_e500mc()
|
D | e500.c | 111 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && in local_sid_lookup()
|
/linux-4.1.27/kernel/trace/ |
D | trace_functions_graph.c | 286 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) in __trace_graph_entry() 401 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) in __trace_graph_return()
|
D | trace.c | 1619 if (!__this_cpu_read(trace_cmdline_save)) in tracing_record_cmdline() 1769 if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) in trace_function() 1946 if (__this_cpu_read(user_stack_count)) in ftrace_trace_userstack()
|
/linux-4.1.27/arch/powerpc/platforms/pseries/ |
D | iommu.c | 203 tcep = __this_cpu_read(tce_page); in tce_buildmulti_pSeriesLP() 402 tcep = __this_cpu_read(tce_page); in tce_setrange_multi_pSeriesLP()
|
/linux-4.1.27/fs/ |
D | buffer.c | 1270 if (__this_cpu_read(bh_lrus.bhs[0]) != bh) { in bh_lru_install() 1279 __this_cpu_read(bh_lrus.bhs[in]); in bh_lru_install() 1314 struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]); in lookup_bh_lru() 1321 __this_cpu_read(bh_lrus.bhs[i - 1])); in lookup_bh_lru()
|
/linux-4.1.27/arch/x86/kernel/apic/ |
D | vector.c | 411 irq = __this_cpu_read(vector_irq[vector]); in smp_irq_move_cleanup_interrupt()
|
D | x2apic_uv_x.c | 343 id = x | __this_cpu_read(x2apic_extra_bits); in x2apic_get_apic_id()
|
/linux-4.1.27/arch/x86/kernel/kprobes/ |
D | core.c | 657 p = __this_cpu_read(current_kprobe); in kprobe_int3_handler()
|
/linux-4.1.27/arch/mips/cavium-octeon/ |
D | octeon-irq.c | 1250 ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror); in octeon_irq_ip2_ciu() 1267 ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror); in octeon_irq_ip3_ciu()
|
/linux-4.1.27/Documentation/ |
D | this_cpu_ops.txt | 227 __this_cpu_read(pcp)
|
/linux-4.1.27/drivers/cpufreq/ |
D | powernow-k8.c | 1143 struct powernow_k8_data *data = __this_cpu_read(powernow_data); in query_values_on_cpu()
|
/linux-4.1.27/kernel/events/ |
D | core.c | 254 local_samples_len = __this_cpu_read(running_sample_length); in perf_duration_warn() 276 local_samples_len = __this_cpu_read(running_sample_length); in perf_sample_event_took() 2659 if (__this_cpu_read(perf_sched_cb_usages)) in __perf_event_task_sched_out() 2852 if (__this_cpu_read(perf_sched_cb_usages)) in __perf_event_task_sched_in() 3089 __this_cpu_read(perf_throttled_count)) in perf_event_can_stop_tick() 6111 seq = __this_cpu_read(perf_throttled_seq); in __perf_event_overflow()
|
/linux-4.1.27/arch/x86/kvm/ |
D | x86.c | 1617 this_tsc_khz = __this_cpu_read(cpu_tsc_khz); in kvm_guest_time_update() 5674 return __this_cpu_read(current_vcpu) != NULL; in kvm_is_in_guest() 5681 if (__this_cpu_read(current_vcpu)) in kvm_is_user_mode() 5682 user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu)); in kvm_is_user_mode() 5691 if (__this_cpu_read(current_vcpu)) in kvm_get_guest_ip() 5692 ip = kvm_rip_read(__this_cpu_read(current_vcpu)); in kvm_get_guest_ip()
|
D | svm.c | 1322 svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) { in svm_vcpu_load()
|
/linux-4.1.27/net/core/ |
D | dev.c | 2294 skb->next = __this_cpu_read(softnet_data.completion_queue); in __dev_kfree_skb_irq() 2987 if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) in __dev_queue_xmit()
|