Home
last modified time | relevance | path

Searched refs:READ_ONCE (Results 1 – 142 of 142) sorted by relevance

/linux-4.4.14/kernel/rcu/
Dtree.c228 return READ_ONCE(rnp->qsmaskinitnext); in rcu_rnp_online_cpus()
238 return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum); in rcu_gp_in_progress()
327 if (READ_ONCE(rdp->mynode->completed) != in rcu_momentary_dyntick_idle()
328 READ_ONCE(rdp->cond_resched_completed)) in rcu_momentary_dyntick_idle()
549 *flags = READ_ONCE(rsp->gp_flags); in rcutorture_get_gp_data()
550 *gpnum = READ_ONCE(rsp->gpnum); in rcutorture_get_gp_data()
551 *completed = READ_ONCE(rsp->completed); in rcutorture_get_gp_data()
597 int idx = (READ_ONCE(rnp->completed) + 1) & 0x1; in rcu_future_needs_gp()
600 return READ_ONCE(*fp); in rcu_future_needs_gp()
623 ULONG_CMP_LT(READ_ONCE(rsp->completed), in cpu_needs_another_gp()
[all …]
Dupdate.c202 if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s))) in __rcu_read_unlock()
209 int rrln = READ_ONCE(t->rcu_read_lock_nesting); in __rcu_read_unlock()
458 int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout); in rcu_jiffies_till_stall_check()
619 if (!READ_ONCE(t->rcu_tasks_holdout) || in check_holdout_task()
620 t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) || in check_holdout_task()
621 !READ_ONCE(t->on_rq) || in check_holdout_task()
708 if (t != current && READ_ONCE(t->on_rq) && in rcu_tasks_kthread()
711 t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw); in rcu_tasks_kthread()
741 rtst = READ_ONCE(rcu_task_stall_timeout); in rcu_tasks_kthread()
797 if (READ_ONCE(rcu_tasks_kthread_ptr)) { in rcu_spawn_tasks_kthread()
Dtree_plugin.h988 if (READ_ONCE(rnp->exp_tasks) == NULL && in rcu_boost()
989 READ_ONCE(rnp->boost_tasks) == NULL) in rcu_boost()
1042 return READ_ONCE(rnp->exp_tasks) != NULL || in rcu_boost()
1043 READ_ONCE(rnp->boost_tasks) != NULL; in rcu_boost()
1452 unlikely(READ_ONCE(rdp->gpwrap))) && in rcu_try_advance_all_cbs()
1531 tne = READ_ONCE(tick_nohz_active); in rcu_prepare_for_idle()
1755 READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart, in print_cpu_stall_info()
1866 if (!READ_ONCE(rdp_leader->nocb_kthread)) in wake_nocb_leader()
1868 if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { in wake_nocb_leader()
1902 rhp = READ_ONCE(rdp->nocb_head); in rcu_nocb_cpu_needs_barrier()
[all …]
Dsrcu.c154 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]); in srcu_readers_seq_idx()
171 t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]); in srcu_readers_active_idx()
269 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]); in srcu_readers_active()
270 sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]); in srcu_readers_active()
300 idx = READ_ONCE(sp->completed) & 0x1; in __srcu_read_lock()
Dtree_trace.c277 READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen); in print_one_rcu_state()
323 completed = READ_ONCE(rsp->completed); in show_one_rcugp()
324 gpnum = READ_ONCE(rsp->gpnum); in show_one_rcugp()
Dtiny_plugin.h147 js = READ_ONCE(rcp->jiffies_stall); in check_cpu_stall()
/linux-4.4.14/kernel/locking/
Dqspinlock_paravirt.h130 if (READ_ONCE(he->lock) == lock) { in pv_unhash()
131 node = READ_ONCE(he->node); in pv_unhash()
171 if (READ_ONCE(node->locked)) in pv_wait_node()
187 if (!READ_ONCE(node->locked)) in pv_wait_node()
259 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head()
264 if (!READ_ONCE(l->locked)) in pv_wait_head()
Drwsem-xadd.c282 long old, count = READ_ONCE(sem->count); in rwsem_try_write_lock_unqueued()
308 owner = READ_ONCE(sem->owner); in rwsem_can_spin_on_owner()
310 long count = READ_ONCE(sem->count); in rwsem_can_spin_on_owner()
353 if (READ_ONCE(sem->owner)) in rwsem_spin_on_owner()
361 count = READ_ONCE(sem->count); in rwsem_spin_on_owner()
380 owner = READ_ONCE(sem->owner); in rwsem_optimistic_spin()
467 count = READ_ONCE(sem->count); in rwsem_down_write_failed()
Dmcs_spinlock.h101 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock()
110 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
Dosq_lock.c118 while (!READ_ONCE(node->locked)) { in osq_lock()
157 prev = READ_ONCE(node->prev); in osq_lock()
Dmutex.c263 owner = READ_ONCE(lock->owner); in mutex_can_spin_on_owner()
337 if (READ_ONCE(ww->ctx)) in mutex_optimistic_spin()
345 owner = READ_ONCE(lock->owner); in mutex_optimistic_spin()
484 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); in __ww_mutex_lock_check_stamp()
Dqrwlock.c129 if (!READ_ONCE(l->wmode) && in queued_write_lock_slowpath()
Dqspinlock.c439 while (!(next = READ_ONCE(node->next))) in queued_spin_lock_slowpath()
/linux-4.4.14/arch/x86/include/asm/
Dspinlock.h118 inc.head = READ_ONCE(lock->tickets.head); in arch_spin_lock()
135 old.tickets = READ_ONCE(lock->tickets); in arch_spin_trylock()
166 struct __raw_tickets tmp = READ_ONCE(lock->tickets); in arch_spin_is_locked()
173 struct __raw_tickets tmp = READ_ONCE(lock->tickets); in arch_spin_is_contended()
188 __ticket_t head = READ_ONCE(lock->tickets.head); in arch_spin_unlock_wait()
191 struct __raw_tickets tmp = READ_ONCE(lock->tickets); in arch_spin_unlock_wait()
Dbarrier.h65 typeof(*p) ___p1 = READ_ONCE(*p); \
82 typeof(*p) ___p1 = READ_ONCE(*p); \
Datomic.h27 return READ_ONCE((v)->counter); in atomic_read()
Datomic64_64.h21 return READ_ONCE((v)->counter); in atomic64_read()
/linux-4.4.14/kernel/
Dtorture.c412 while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { in torture_shutdown_absorb()
483 if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) { in torture_shutdown_notify()
527 while (READ_ONCE(stutter_pause_test) || in stutter_wait()
528 (torture_runnable && !READ_ONCE(*torture_runnable))) { in stutter_wait()
530 if (READ_ONCE(stutter_pause_test) == 1) in stutter_wait()
533 while (READ_ONCE(stutter_pause_test)) in stutter_wait()
646 if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) { in torture_cleanup_begin()
685 return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP; in torture_must_stop_irq()
Dfork.c1104 cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); in posix_cpu_timers_init_group()
/linux-4.4.14/arch/x86/xen/
Dspinlock.c55 if (READ_ONCE(*byte) != val) in xen_qlock_wait()
94 u8 old = READ_ONCE(zero_stats); in check_zero()
217 head = READ_ONCE(lock->tickets.head); in xen_lock_spinning()
259 if (READ_ONCE(w->lock) == lock && in xen_unlock_kick()
260 READ_ONCE(w->want) == next) { in xen_unlock_kick()
Dp2m.c590 p2m_pfn = pte_pfn(READ_ONCE(*ptep)); in xen_alloc_p2m_entry()
/linux-4.4.14/arch/tile/include/asm/
Dspinlock_32.h46 int curr = READ_ONCE(lock->current_ticket); in arch_spin_is_locked()
47 int next = READ_ONCE(lock->next_ticket); in arch_spin_is_locked()
Datomic.h37 return READ_ONCE(v->counter); in atomic_read()
Dspinlock_64.h50 u32 val = READ_ONCE(lock->lock); in arch_spin_is_locked()
Datomic_64.h85 #define atomic64_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/tile/lib/
Dspinlock_32.c68 int curr = READ_ONCE(lock->current_ticket); in arch_spin_unlock_wait()
69 int next = READ_ONCE(lock->next_ticket); in arch_spin_unlock_wait()
78 } while (READ_ONCE(lock->current_ticket) == curr); in arch_spin_unlock_wait()
Dspinlock_64.c68 u32 val = READ_ONCE(lock->lock); in arch_spin_unlock_wait()
78 } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); in arch_spin_unlock_wait()
/linux-4.4.14/arch/frv/include/asm/
Datomic_defs.h87 long long *__v = READ_ONCE(v); \
138 long long *__v = READ_ONCE(v); \
Datomic.h35 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/powerpc/kvm/
Dbook3s_hv_rm_xics.c161 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
346 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
405 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_xirr()
471 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_ipi()
546 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_cppr()
Dbook3s_xics.c343 old_state = new_state = READ_ONCE(icp->state); in icp_try_to_deliver()
533 old_state = new_state = READ_ONCE(icp->state); in icp_down_cppr()
588 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_xirr()
655 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_ipi()
700 state = READ_ONCE(icp->state); in kvmppc_h_ipoll()
742 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_cppr()
925 state.raw = READ_ONCE(icp->state.raw); in xics_debug_show()
1134 old_state = READ_ONCE(icp->state); in kvmppc_xics_set_icp()
De500_mmu_host.c481 pte_t pte = READ_ONCE(*ptep); in kvmppc_e500_shadow_map()
/linux-4.4.14/arch/arm64/include/asm/
Dspinlock.h132 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
137 arch_spinlock_t lockval = READ_ONCE(*lock); in arch_spin_is_contended()
Datomic.h56 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/x86/entry/
Dcommon.c260 cached_flags = READ_ONCE(pt_regs_to_thread_info(regs)->flags); in exit_to_usermode_loop()
279 cached_flags = READ_ONCE(ti->flags); in prepare_exit_to_usermode()
331 u32 cached_flags = READ_ONCE(ti->flags); in syscall_return_slowpath()
373 if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) { in do_syscall_32_irqs_on()
/linux-4.4.14/Documentation/
Dmemory-barriers.txt197 WRITE_ONCE(Q, P); smp_read_barrier_depends(); D = READ_ONCE(*Q);
204 does nothing, but it is required for DEC Alpha. The READ_ONCE()
212 a = READ_ONCE(*X); WRITE_ONCE(*X, b);
220 WRITE_ONCE(*X, c); d = READ_ONCE(*X);
232 with memory references that are not protected by READ_ONCE() and
524 Q = READ_ONCE(P);
551 Q = READ_ONCE(P);
578 Q = READ_ONCE(P);
599 q = READ_ONCE(a);
602 p = READ_ONCE(b);
[all …]
/linux-4.4.14/include/linux/
Dseqlock.h112 ret = READ_ONCE(s->sequence); in __read_seqcount_begin()
131 unsigned ret = READ_ONCE(s->sequence); in raw_read_seqcount()
183 unsigned ret = READ_ONCE(s->sequence); in raw_seqcount_begin()
Dcompiler.h286 #define READ_ONCE(x) __READ_ONCE(x, 1) macro
525 typeof(p) _________p1 = READ_ONCE(p); \
Drcupdate.h397 if (READ_ONCE((t)->rcu_tasks_holdout)) \
602 typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
Dpage-flags.h400 return READ_ONCE(page->compound_head) & 1; in __PAGEFLAG()
415 unsigned long head = READ_ONCE(page->compound_head); in compound_head()
Dtcp.h395 int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); in fastopen_queue_tune()
Drculist.h288 struct list_head *__next = READ_ONCE(__ptr->next); \
Dsched.h3176 return READ_ONCE(tsk->signal->rlim[limit].rlim_cur); in task_rlimit()
3182 return READ_ONCE(tsk->signal->rlim[limit].rlim_max); in task_rlimit_max()
/linux-4.4.14/arch/sparc/include/asm/
Datomic_64.h17 #define atomic_read(v) READ_ONCE((v)->counter)
18 #define atomic64_read(v) READ_ONCE((v)->counter)
Dbarrier_64.h68 typeof(*p) ___p1 = READ_ONCE(*p); \
/linux-4.4.14/arch/arm/include/asm/
Dspinlock.h123 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
128 struct __raw_tickets tickets = READ_ONCE(lock->tickets); in arch_spin_is_contended()
Dbarrier.h82 typeof(*p) ___p1 = READ_ONCE(*p); \
Datomic.h30 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/x86/kernel/
Dkvm.c601 if (READ_ONCE(*ptr) != val) in kvm_wait()
645 old = READ_ONCE(zero_stats); in check_zero()
812 head = READ_ONCE(lock->tickets.head); in kvm_lock_spinning()
844 if (READ_ONCE(w->lock) == lock && in kvm_unlock_kick()
845 READ_ONCE(w->want) == ticket) { in kvm_unlock_kick()
Dprocess.c552 sp = READ_ONCE(p->thread.sp); in get_wchan()
/linux-4.4.14/kernel/sched/
Dcompletion.c277 if (!READ_ONCE(x->done)) in try_wait_for_completion()
300 if (!READ_ONCE(x->done)) in completion_done()
Dauto_group.h32 int enabled = READ_ONCE(sysctl_sched_autogroup_enabled); in autogroup_task_group()
Dstats.h178 if (!READ_ONCE(cputimer->running)) in cputimer_running()
Dauto_group.c142 if (!READ_ONCE(sysctl_sched_autogroup_enabled)) in autogroup_move_group()
Dwait.c604 unsigned long now = READ_ONCE(jiffies); in bit_wait_timeout()
616 unsigned long now = READ_ONCE(jiffies); in bit_wait_io_timeout()
Dfair.c843 unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size); in task_scan_min()
1824 seq = READ_ONCE(p->mm->numa_scan_seq); in task_numa_placement()
1968 tsk = READ_ONCE(cpu_rq(cpu)->curr); in task_numa_group()
2145 WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1); in reset_ptenuma_scan()
4371 unsigned long curr_jiffies = READ_ONCE(jiffies); in update_idle_cpu_load()
4393 unsigned long curr_jiffies = READ_ONCE(jiffies); in update_cpu_load_nohz()
4472 unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running); in cpu_avg_load_per_task()
6088 age_stamp = READ_ONCE(rq->age_stamp); in scale_rt_capacity()
6089 avg = READ_ONCE(rq->rt_avg); in scale_rt_capacity()
Dsched.h712 return READ_ONCE(rq->clock); in __rq_clock_broken()
Ddeadline.c1058 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_dl()
Drt.c1329 curr = READ_ONCE(rq->curr); /* unlocked access */ in select_task_rq_rt()
Dcore.c491 typeof(ti->flags) old, val = READ_ONCE(ti->flags); in set_nr_if_polling()
2902 unsigned long next, now = READ_ONCE(jiffies); in scheduler_tick_max_deferment()
/linux-4.4.14/kernel/time/
Dposix-cpu-timers.c236 if (!READ_ONCE(cputimer->running)) { in thread_group_cputimer()
609 if (READ_ONCE(tsk->signal->cputimer.running)) in posix_cpu_timers_can_stop_tick()
886 soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur); in check_thread_timers()
889 READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); in check_thread_timers()
975 if (!READ_ONCE(tsk->signal->cputimer.running)) in check_process_timers()
1003 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); in check_process_timers()
1007 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); in check_process_timers()
1167 if (READ_ONCE(sig->cputimer.running) && in fastpath_timer_check()
1168 !READ_ONCE(sig->cputimer.checking_timer)) { in fastpath_timer_check()
Dhrtimer.c1176 cpu_base = READ_ONCE(timer->base->cpu_base); in hrtimer_active()
1184 cpu_base != READ_ONCE(timer->base->cpu_base)); in hrtimer_active()
Dtimer.c470 site = READ_ONCE(timer->start_site); in timer_stats_account_timer()
/linux-4.4.14/arch/ia64/include/asm/
Dbarrier.h74 typeof(*p) ___p1 = READ_ONCE(*p); \
Datomic.h24 #define atomic_read(v) READ_ONCE((v)->counter)
25 #define atomic64_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/s390/include/asm/
Dbarrier.h50 typeof(*p) ___p1 = READ_ONCE(*p); \
/linux-4.4.14/net/sched/
Dact_gact.c129 int action = READ_ONCE(gact->tcf_action); in tcf_gact()
133 u32 ptype = READ_ONCE(gact->tcfg_ptype); in tcf_gact()
Dact_mirred.c153 retval = READ_ONCE(m->tcf_action); in tcf_mirred()
/linux-4.4.14/arch/alpha/include/asm/
Datomic.h20 #define atomic_read(v) READ_ONCE((v)->counter)
21 #define atomic64_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/s390/kvm/
Dgaccess.c279 old = READ_ONCE(*ic); in ipte_lock_simple()
282 old = READ_ONCE(*ic); in ipte_lock_simple()
301 old = READ_ONCE(*ic); in ipte_unlock_simple()
316 old = READ_ONCE(*ic); in ipte_lock_siif()
319 old = READ_ONCE(*ic); in ipte_lock_siif()
333 old = READ_ONCE(*ic); in ipte_unlock_siif()
Dinterrupt.c1369 u64 type = READ_ONCE(inti->type); in __inject_vm()
/linux-4.4.14/lib/
Dllist.c74 next = READ_ONCE(entry->next); in llist_del_first()
Dlockref.c13 old.lock_count = READ_ONCE(lockref->lock_count); \
/linux-4.4.14/include/asm-generic/
Dbarrier.h116 typeof(*p) ___p1 = READ_ONCE(*p); \
Datomic.h130 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/powerpc/include/asm/
Dbarrier.h84 typeof(*p) ___p1 = READ_ONCE(*p); \
Dkvm_book3s_64.h308 old_pte = READ_ONCE(*ptep); in kvmppc_read_update_linux_pte()
/linux-4.4.14/arch/metag/include/asm/
Dbarrier.h98 typeof(*p) ___p1 = READ_ONCE(*p); \
Datomic_lock1.h13 return READ_ONCE((v)->counter); in atomic_read()
/linux-4.4.14/arch/sh/include/asm/
Datomic.h17 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/powerpc/mm/
Dhugetlbpage.c716 pte = READ_ONCE(*ptep); in follow_huge_addr()
995 pgd = READ_ONCE(*pgdp); in __find_linux_pte_or_hugepte()
1017 pud = READ_ONCE(*pudp); in __find_linux_pte_or_hugepte()
1029 pmd = READ_ONCE(*pmdp); in __find_linux_pte_or_hugepte()
1082 pte = READ_ONCE(*ptep); in gup_hugepte()
Dhugepage-hash64.c36 pmd_t pmd = READ_ONCE(*pmdp); in __hash_page_thp()
/linux-4.4.14/arch/mips/include/asm/
Dbarrier.h141 typeof(*p) ___p1 = READ_ONCE(*p); \
Datomic.h33 #define atomic_read(v) READ_ONCE((v)->counter)
318 #define atomic64_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/hexagon/include/asm/
Datomic.h51 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/tools/include/linux/
Dcompiler.h112 #define READ_ONCE(x) \ macro
/linux-4.4.14/arch/h8300/include/asm/
Datomic.h14 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/mn10300/include/asm/
Datomic.h37 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/arm64/kernel/
Dalternative.c133 while (!READ_ONCE(patched)) in __apply_alternatives_multi_stop()
/linux-4.4.14/arch/xtensa/include/asm/
Datomic.h50 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/avr32/include/asm/
Datomic.h22 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/m32r/include/asm/
Datomic.h31 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/arc/include/asm/
Datomic.h20 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/arch/m68k/include/asm/
Datomic.h20 #define atomic_read(v) READ_ONCE((v)->counter)
/linux-4.4.14/mm/
Dgup.c1068 pte_t pte = READ_ONCE(*ptep); in gup_pte_range()
1259 pmd_t pmd = READ_ONCE(*pmdp); in gup_pmd_range()
1301 pud_t pud = READ_ONCE(*pudp); in gup_pud_range()
1358 pgd_t pgd = READ_ONCE(*pgdp); in __get_user_pages_fast()
Dksm.c546 kpfn = READ_ONCE(stable_node->kpfn); in get_ksm_page()
555 if (READ_ONCE(page->mapping) != expected_mapping) in get_ksm_page()
581 if (READ_ONCE(page->mapping) != expected_mapping) { in get_ksm_page()
588 if (READ_ONCE(page->mapping) != expected_mapping) { in get_ksm_page()
604 if (READ_ONCE(stable_node->kpfn) != kpfn) in get_ksm_page()
Dinternal.h255 #define page_order_unsafe(page) READ_ONCE(page_private(page))
Drmap.c464 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
508 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
515 root_anon_vma = READ_ONCE(anon_vma->root); in page_lock_anon_vma_read()
Dmemcontrol.c465 memcg = READ_ONCE(page->mem_cgroup); in page_cgroup_ino()
552 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
908 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1183 limit = READ_ONCE(memcg->memory.limit); in mem_cgroup_margin()
1189 limit = READ_ONCE(memcg->memsw.limit); in mem_cgroup_margin()
2373 kmemcg_id = READ_ONCE(memcg->kmemcg_id); in __memcg_kmem_get_cache()
4858 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5085 unsigned long low = READ_ONCE(memcg->low); in memory_low_show()
5115 unsigned long high = READ_ONCE(memcg->high); in memory_high_show()
5152 unsigned long max = READ_ONCE(memcg->memory.limit); in memory_max_show()
Dswap_state.c408 max_pages = 1 << READ_ONCE(page_cluster); in swapin_nr_pages()
Doom_kill.c497 struct mm_struct *t_mm = READ_ONCE(t->mm); in process_shares_mm()
Dmmap.c1167 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); in reusable_anon_vma()
2116 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) in acct_stack_growth()
2124 limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); in acct_stack_growth()
Dslub.c2508 unlikely(tid != READ_ONCE(c->tid))); in slab_alloc_node()
2785 unlikely(tid != READ_ONCE(c->tid))); in slab_free()
4495 page = READ_ONCE(c->page); in show_slab_objects()
4510 page = READ_ONCE(c->partial); in show_slab_objects()
Dhuge_memory.c181 return READ_ONCE(huge_zero_page); in get_huge_zero_page()
200 return READ_ONCE(huge_zero_page); in get_huge_zero_page()
Dpage_alloc.c1883 batch = READ_ONCE(pcp->batch); in drain_zone_pages()
2082 unsigned long batch = READ_ONCE(pcp->batch); in free_hot_cold_page()
6507 word = READ_ONCE(bitmap[word_bitidx]); in set_pfnblock_flags_mask()
Dswapfile.c1357 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
Dmemory.c2944 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; in do_fault_around()
/linux-4.4.14/drivers/block/xen-blkback/
Dcommon.h411 dst->operation = READ_ONCE(src->operation); in blkif_get_x86_32_req()
459 dst->operation = READ_ONCE(src->operation); in blkif_get_x86_64_req()
Dblkback.c965 first_sect = READ_ONCE(segments[i].first_sect); in xen_blkbk_parse_indirect()
966 last_sect = READ_ONCE(segments[i].last_sect); in xen_blkbk_parse_indirect()
/linux-4.4.14/arch/sh/mm/
Dgup.c20 return READ_ONCE(*ptep); in gup_get_pte()
/linux-4.4.14/drivers/gpu/drm/vmwgfx/
Dvmwgfx_irq.c40 masked_status = status & READ_ONCE(dev_priv->irq_mask); in vmw_irq_handler()
Dvmwgfx_drv.h1220 return READ_ONCE(*addr); in vmw_mmio_read()
/linux-4.4.14/arch/parisc/include/asm/
Datomic.h70 return READ_ONCE((v)->counter); in atomic_read()
/linux-4.4.14/fs/fscache/
Doperation.c170 flags = READ_ONCE(object->flags); in fscache_submit_exclusive_op()
255 flags = READ_ONCE(object->flags); in fscache_submit_op()
/linux-4.4.14/net/ipv4/
Dtcp_minisocks.c373 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); in tcp_openreq_init_rwin()
382 window_clamp = READ_ONCE(tp->window_clamp); in tcp_openreq_init_rwin()
Dinet_diag.c220 ca_ops = READ_ONCE(icsk->icsk_ca_ops); in inet_sk_diag_fill()
236 ca_ops = READ_ONCE(icsk->icsk_ca_ops); in inet_sk_diag_fill()
Dinet_connection_sock.c599 defer_accept = READ_ONCE(queue->rskq_defer_accept); in reqsk_timer_handler()
Dtcp.c2706 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
2710 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
Dtcp_ipv4.c1464 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v4_early_demux()
Dudp.c2015 dst = READ_ONCE(sk->sk_rx_dst); in udp_v4_early_demux()
Dtcp_output.c2979 user_mss = READ_ONCE(tp->rx_opt.user_mss); in tcp_make_synack()
/linux-4.4.14/arch/mips/mm/
Dgup.c33 return READ_ONCE(*ptep); in gup_get_pte()
/linux-4.4.14/arch/x86/mm/
Dgup.c18 return READ_ONCE(*ptep); in gup_get_pte()
/linux-4.4.14/kernel/trace/
Dring_buffer_benchmark.c190 while (!READ_ONCE(reader_finish)) { in ring_buffer_consumer()
/linux-4.4.14/arch/powerpc/perf/
Dcallchain.c139 pte = READ_ONCE(*ptep); in read_user_stack_slow()
/linux-4.4.14/drivers/tty/
Dtty_buffer.c454 tty = READ_ONCE(port->itty); in flush_to_ldisc()
/linux-4.4.14/drivers/gpu/drm/
Ddrm_sysfs.c235 dpms = READ_ONCE(connector->dpms); in dpms_show()
/linux-4.4.14/include/net/
Dip.h283 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc); in ip_dont_fragment()
/linux-4.4.14/kernel/events/
Dring_buffer.c144 tail = READ_ONCE(rb->user_page->data_tail); in perf_output_begin()
/linux-4.4.14/drivers/net/ethernet/sfc/
Dnet_driver.h490 unsigned long prev, old = READ_ONCE(channel->busy_poll_state); in efx_channel_lock_napi()
/linux-4.4.14/kernel/bpf/
Dcore.c459 prog = READ_ONCE(array->ptrs[index]); in __bpf_prog_run()
/linux-4.4.14/fs/
Duserfaultfd.c220 _pmd = READ_ONCE(*pmd); in userfaultfd_must_wait()
Ddcache.c279 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
287 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
Dexec.c1305 mode = READ_ONCE(inode->i_mode); in bprm_fill_uid()
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x.h642 unsigned long prev, old = READ_ONCE(fp->busy_poll_state); in bnx2x_fp_lock_napi()
686 return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; in bnx2x_fp_ll_polling()
/linux-4.4.14/fs/jbd2/
Dtransaction.c1037 jh = READ_ONCE(bh->b_private); in jbd2_write_access_granted()
/linux-4.4.14/drivers/net/ethernet/freescale/
Dfec_main.c1221 while (bdp != READ_ONCE(txq->cur_tx)) { in fec_enet_tx_queue()
1224 status = READ_ONCE(bdp->cbd_sc); in fec_enet_tx_queue()
/linux-4.4.14/arch/x86/kernel/cpu/mcheck/
Dmce.c1903 if (READ_ONCE(mcelog.next)) in mce_chrdev_poll()
/linux-4.4.14/arch/x86/kernel/cpu/
Dperf_event.c892 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
/linux-4.4.14/net/ipv6/
Dtcp_ipv6.c1556 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v6_early_demux()
/linux-4.4.14/net/packet/
Daf_packet.c1441 unsigned int num = READ_ONCE(f->num_members); in packet_rcv_fanout()
/linux-4.4.14/fs/ext4/
Dinode.c682 old_state = READ_ONCE(bh->b_state); in ext4_update_bh_state()