Home
last modified time | relevance | path

Searched refs:READ_ONCE (Results 1 – 52 of 52) sorted by relevance

/linux-4.1.27/arch/x86/include/asm/
Dspinlock.h114 inc.head = READ_ONCE(lock->tickets.head); in arch_spin_lock()
131 old.tickets = READ_ONCE(lock->tickets); in arch_spin_trylock()
162 struct __raw_tickets tmp = READ_ONCE(lock->tickets); in arch_spin_is_locked()
169 struct __raw_tickets tmp = READ_ONCE(lock->tickets); in arch_spin_is_contended()
184 __ticket_t head = READ_ONCE(lock->tickets.head); in arch_spin_unlock_wait()
187 struct __raw_tickets tmp = READ_ONCE(lock->tickets); in arch_spin_unlock_wait()
/linux-4.1.27/kernel/locking/
Drwsem-xadd.c282 long old, count = READ_ONCE(sem->count); in rwsem_try_write_lock_unqueued()
307 owner = READ_ONCE(sem->owner); in rwsem_can_spin_on_owner()
309 long count = READ_ONCE(sem->count); in rwsem_can_spin_on_owner()
352 if (READ_ONCE(sem->owner)) in rwsem_spin_on_owner()
360 count = READ_ONCE(sem->count); in rwsem_spin_on_owner()
379 owner = READ_ONCE(sem->owner); in rwsem_optimistic_spin()
453 count = READ_ONCE(sem->count); in rwsem_down_write_failed()
Dmcs_spinlock.h94 struct mcs_spinlock *next = READ_ONCE(node->next); in mcs_spin_unlock()
103 while (!(next = READ_ONCE(node->next))) in mcs_spin_unlock()
Dosq_lock.c112 while (!READ_ONCE(node->locked)) { in osq_lock()
151 prev = READ_ONCE(node->prev); in osq_lock()
Dmutex.c263 owner = READ_ONCE(lock->owner); in mutex_can_spin_on_owner()
337 if (READ_ONCE(ww->ctx)) in mutex_optimistic_spin()
345 owner = READ_ONCE(lock->owner); in mutex_optimistic_spin()
484 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); in __ww_mutex_lock_check_stamp()
516 if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) in __mutex_lock_common()
/linux-4.1.27/arch/x86/xen/
Dspinlock.c44 u8 old = READ_ONCE(zero_stats); in check_zero()
170 head = READ_ONCE(lock->tickets.head); in xen_lock_spinning()
212 if (READ_ONCE(w->lock) == lock && in xen_unlock_kick()
213 READ_ONCE(w->want) == next) { in xen_unlock_kick()
Dp2m.c561 p2m_pfn = pte_pfn(READ_ONCE(*ptep)); in alloc_p2m()
/linux-4.1.27/arch/arm64/include/asm/
Dspinlock.h102 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
107 arch_spinlock_t lockval = READ_ONCE(*lock); in arch_spin_is_contended()
/linux-4.1.27/arch/powerpc/kvm/
Dbook3s_hv_rm_xics.c163 old_state = new_state = READ_ONCE(icp->state); in icp_rm_try_to_deliver()
348 old_state = new_state = READ_ONCE(icp->state); in icp_rm_down_cppr()
407 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_xirr()
473 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_ipi()
548 old_state = new_state = READ_ONCE(icp->state); in kvmppc_rm_h_cppr()
Dbook3s_xics.c343 old_state = new_state = READ_ONCE(icp->state); in icp_try_to_deliver()
533 old_state = new_state = READ_ONCE(icp->state); in icp_down_cppr()
588 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_xirr()
655 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_ipi()
700 state = READ_ONCE(icp->state); in kvmppc_h_ipoll()
742 old_state = new_state = READ_ONCE(icp->state); in kvmppc_h_cppr()
925 state.raw = READ_ONCE(icp->state.raw); in xics_debug_show()
1134 old_state = READ_ONCE(icp->state); in kvmppc_xics_set_icp()
De500_mmu_host.c481 pte_t pte = READ_ONCE(*ptep); in kvmppc_e500_shadow_map()
/linux-4.1.27/include/linux/
Dseqlock.h111 ret = READ_ONCE(s->sequence); in __read_seqcount_begin()
130 unsigned ret = READ_ONCE(s->sequence); in raw_read_seqcount()
182 unsigned ret = READ_ONCE(s->sequence); in raw_seqcount_begin()
Dcompiler.h249 #define READ_ONCE(x) \ macro
/linux-4.1.27/arch/arm/include/asm/
Dspinlock.h123 return !arch_spin_value_unlocked(READ_ONCE(*lock)); in arch_spin_is_locked()
128 struct __raw_tickets tickets = READ_ONCE(lock->tickets); in arch_spin_is_contended()
/linux-4.1.27/kernel/sched/
Dcompletion.c277 if (!READ_ONCE(x->done)) in try_wait_for_completion()
300 if (!READ_ONCE(x->done)) in completion_done()
/linux-4.1.27/arch/x86/kernel/
Dprocess_64.c554 sp = READ_ONCE(p->thread.sp); in get_wchan()
558 fp = READ_ONCE(*(unsigned long *)sp); in get_wchan()
562 ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long))); in get_wchan()
565 fp = READ_ONCE(*(unsigned long *)fp); in get_wchan()
Dkvm.c612 old = READ_ONCE(zero_stats); in check_zero()
779 head = READ_ONCE(lock->tickets.head); in kvm_lock_spinning()
811 if (READ_ONCE(w->lock) == lock && in kvm_unlock_kick()
812 READ_ONCE(w->want) == ticket) { in kvm_unlock_kick()
/linux-4.1.27/arch/s390/kvm/
Dgaccess.c279 old = READ_ONCE(*ic); in ipte_lock_simple()
282 old = READ_ONCE(*ic); in ipte_lock_simple()
301 old = READ_ONCE(*ic); in ipte_unlock_simple()
316 old = READ_ONCE(*ic); in ipte_lock_siif()
319 old = READ_ONCE(*ic); in ipte_lock_siif()
333 old = READ_ONCE(*ic); in ipte_unlock_siif()
Dinterrupt.c1352 u64 type = READ_ONCE(inti->type); in __inject_vm()
/linux-4.1.27/arch/powerpc/mm/
Dhugetlbpage.c701 pte = READ_ONCE(*ptep); in follow_huge_addr()
985 pgd = READ_ONCE(*pgdp); in __find_linux_pte_or_hugepte()
1007 pud = READ_ONCE(*pudp); in __find_linux_pte_or_hugepte()
1019 pmd = READ_ONCE(*pmdp); in __find_linux_pte_or_hugepte()
1065 pte = READ_ONCE(*ptep); in gup_hugepte()
Dhugepage-hash64.c36 pmd_t pmd = READ_ONCE(*pmdp); in __hash_page_thp()
/linux-4.1.27/lib/
Dlockref.c21 old.lock_count = READ_ONCE(lockref->lock_count); \
/linux-4.1.27/mm/
Dgup.c1022 pte_t pte = READ_ONCE(*ptep); in gup_pte_range()
1213 pmd_t pmd = READ_ONCE(*pmdp); in gup_pmd_range()
1255 pud_t pud = READ_ONCE(*pudp); in gup_pud_range()
1312 pgd_t pgd = READ_ONCE(*pgdp); in __get_user_pages_fast()
Dksm.c545 kpfn = READ_ONCE(stable_node->kpfn); in get_ksm_page()
554 if (READ_ONCE(page->mapping) != expected_mapping) in get_ksm_page()
580 if (READ_ONCE(page->mapping) != expected_mapping) { in get_ksm_page()
587 if (READ_ONCE(page->mapping) != expected_mapping) { in get_ksm_page()
603 if (READ_ONCE(stable_node->kpfn) != kpfn) in get_ksm_page()
Dinternal.h234 #define page_order_unsafe(page) READ_ONCE(page_private(page))
Dswap_state.c393 max_pages = 1 << READ_ONCE(page_cluster); in swapin_nr_pages()
Drmap.c459 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
503 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
510 root_anon_vma = READ_ONCE(anon_vma->root); in page_lock_anon_vma_read()
Dmemcontrol.c677 unsigned long soft_limit = READ_ONCE(memcg->soft_limit); in soft_limit_excess()
1045 pos = READ_ONCE(iter->position); in mem_cgroup_iter()
1362 limit = READ_ONCE(memcg->memory.limit); in mem_cgroup_margin()
1368 limit = READ_ONCE(memcg->memsw.limit); in mem_cgroup_margin()
2642 kmemcg_id = READ_ONCE(memcg->kmemcg_id); in __memcg_kmem_get_cache()
5014 move_flags = READ_ONCE(memcg->move_charge_at_immigrate); in mem_cgroup_can_attach()
5248 unsigned long low = READ_ONCE(memcg->low); in memory_low_show()
5278 unsigned long high = READ_ONCE(memcg->high); in memory_high_show()
5314 unsigned long max = READ_ONCE(memcg->memory.limit); in memory_max_show()
Dmmap.c1154 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); in reusable_anon_vma()
2107 if (actual_size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur)) in acct_stack_growth()
2115 limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); in acct_stack_growth()
Dslub.c2451 unlikely(tid != READ_ONCE(c->tid))); in slab_alloc_node()
2721 unlikely(tid != READ_ONCE(c->tid))); in slab_free()
4280 page = READ_ONCE(c->page); in show_slab_objects()
4295 page = READ_ONCE(c->partial); in show_slab_objects()
Dhuge_memory.c184 return READ_ONCE(huge_zero_page); in get_huge_zero_page()
203 return READ_ONCE(huge_zero_page); in get_huge_zero_page()
Dpage_alloc.c1402 batch = READ_ONCE(pcp->batch); in drain_zone_pages()
1601 unsigned long batch = READ_ONCE(pcp->batch); in free_hot_cold_page()
6240 word = READ_ONCE(bitmap[word_bitidx]); in set_pfnblock_flags_mask()
Dswapfile.c1315 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
Dmemory.c2886 nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; in do_fault_around()
/linux-4.1.27/arch/sh/mm/
Dgup.c20 return READ_ONCE(*ptep); in gup_get_pte()
/linux-4.1.27/arch/powerpc/include/asm/
Dkvm_book3s_64.h308 old_pte = READ_ONCE(*ptep); in kvmppc_read_update_linux_pte()
/linux-4.1.27/arch/mips/mm/
Dgup.c33 return READ_ONCE(*ptep); in gup_get_pte()
/linux-4.1.27/arch/x86/mm/
Dgup.c18 return READ_ONCE(*ptep); in gup_get_pte()
/linux-4.1.27/arch/powerpc/perf/
Dcallchain.c139 pte = READ_ONCE(*ptep); in read_user_stack_slow()
/linux-4.1.27/drivers/gpu/drm/
Ddrm_sysfs.c240 dpms = READ_ONCE(connector->dpms); in dpms_show()
/linux-4.1.27/net/ipv4/
Dinet_diag.c216 ca_ops = READ_ONCE(icsk->icsk_ca_ops); in inet_sk_diag_fill()
232 ca_ops = READ_ONCE(icsk->icsk_ca_ops); in inet_sk_diag_fill()
Dinet_connection_sock.c648 defer_accept = READ_ONCE(queue->rskq_defer_accept); in reqsk_timer_handler()
Dtcp.c2669 rate = READ_ONCE(sk->sk_pacing_rate); in tcp_get_info()
2673 rate = READ_ONCE(sk->sk_max_pacing_rate); in tcp_get_info()
Dtcp_ipv4.c1477 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v4_early_demux()
Dudp.c2000 dst = READ_ONCE(sk->sk_rx_dst); in udp_v4_early_demux()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x.h631 unsigned long prev, old = READ_ONCE(fp->busy_poll_state); in bnx2x_fp_lock_napi()
675 return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL; in bnx2x_fp_ll_polling()
/linux-4.1.27/fs/
Ddcache.c279 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
287 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
Dexec.c1299 mode = READ_ONCE(inode->i_mode); in bprm_fill_uid()
/linux-4.1.27/arch/x86/kernel/cpu/
Dperf_event.c874 READ_ONCE(cpuc->excl_cntrs->exclusive_present)) in x86_schedule_events()
/linux-4.1.27/net/ipv6/
Dtcp_ipv6.c1544 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); in tcp_v6_early_demux()
/linux-4.1.27/net/packet/
Daf_packet.c1342 unsigned int num = READ_ONCE(f->num_members); in packet_rcv_fanout()
/linux-4.1.27/fs/ext4/
Dinode.c681 old_state = READ_ONCE(bh->b_state); in ext4_update_bh_state()