smp_load_acquire 13 arch/arm/include/asm/mcs_spinlock.h while (!(smp_load_acquire(lock))) \ smp_load_acquire 148 arch/arm64/include/asm/barrier.h VAL = smp_load_acquire(__PTR); \ smp_load_acquire 3958 arch/x86/kvm/mmu.c if (!smp_load_acquire(&sp->unsync) && smp_load_acquire 3959 arch/x86/kvm/mmu.c !smp_load_acquire(&sp->unsync_children)) smp_load_acquire 92 drivers/infiniband/core/uverbs_main.c struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext); smp_load_acquire 234 drivers/infiniband/hw/mlx5/odp.c if (smp_load_acquire(&imr->live)) { smp_load_acquire 812 drivers/infiniband/hw/mlx5/odp.c if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) { smp_load_acquire 1680 drivers/infiniband/hw/mlx5/odp.c if (!smp_load_acquire(&mr->live)) { smp_load_acquire 1971 drivers/infiniband/sw/rdmavt/qp.c slast = smp_load_acquire(&qp->s_last); smp_load_acquire 1019 drivers/input/keyboard/applespi.c input = smp_load_acquire(&applespi->touchpad_input_dev); smp_load_acquire 313 drivers/lightnvm/pblk-rb.c sync = smp_load_acquire(&rb->sync); smp_load_acquire 747 drivers/lightnvm/pblk-rb.c flush_point = smp_load_acquire(&rb->flush_point); smp_load_acquire 752 drivers/lightnvm/pblk-rb.c sync = smp_load_acquire(&rb->sync); smp_load_acquire 60 drivers/media/dvb-core/dvb_ringbuffer.c return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); smp_load_acquire 90 drivers/media/dvb-core/dvb_ringbuffer.c avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; smp_load_acquire 106 drivers/media/dvb-core/dvb_ringbuffer.c smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); smp_load_acquire 334 drivers/net/ethernet/cadence/macb_ptp.c head = smp_load_acquire(&queue->tx_ts_head); smp_load_acquire 565 drivers/net/ethernet/hisilicon/hns3/hns3_enet.h int begin = smp_load_acquire(&ring->next_to_clean); smp_load_acquire 191 drivers/net/ethernet/intel/e1000/e1000.h unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ smp_load_acquire 87 drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c sign = smp_load_acquire(&clock_info->sign); smp_load_acquire 100 drivers/net/ethernet/qlogic/qed/qed_spq.c if (smp_load_acquire(&comp_done->done) == 1) { /* ^^^ */ smp_load_acquire 1721 drivers/tty/n_tty.c size_t tail = smp_load_acquire(&ldata->read_tail); smp_load_acquire 1971 drivers/tty/n_tty.c size_t head = smp_load_acquire(&ldata->commit_head); smp_load_acquire 2032 drivers/tty/n_tty.c n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail); smp_load_acquire 231 drivers/tty/tty_buffer.c while ((next = smp_load_acquire(&buf->head->next)) != NULL) { smp_load_acquire 520 drivers/tty/tty_buffer.c next = smp_load_acquire(&head->next); smp_load_acquire 524 drivers/tty/tty_buffer.c count = smp_load_acquire(&head->commit) - head->read; smp_load_acquire 196 drivers/tty/tty_ldsem.c if (!smp_load_acquire(&waiter.task)) smp_load_acquire 607 drivers/usb/gadget/function/f_mass_storage.c bh && smp_load_acquire(&bh->state) >= smp_load_acquire 611 drivers/usb/gadget/function/f_mass_storage.c bh && smp_load_acquire(&bh->state) >= smp_load_acquire 292 fs/afs/cell.c state = smp_load_acquire(&cell->state); /* vs error */ smp_load_acquire 46 fs/afs/vl_rotate.c dns_lookup_count = smp_load_acquire(&cell->dns_lookup_count); smp_load_acquire 53 fs/afs/vl_rotate.c smp_load_acquire(&cell->dns_lookup_count) smp_load_acquire 40 fs/d_path.c const char *dname = smp_load_acquire(&name->name); /* ^^^ */ smp_load_acquire 2528 fs/dcache.c seq = smp_load_acquire(&parent->d_inode->i_dir_seq); smp_load_acquire 598 fs/eventpoll.c whead = smp_load_acquire(&pwq->whead); smp_load_acquire 759 fs/io_uring.c return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; smp_load_acquire 2661 fs/io_uring.c if (head == smp_load_acquire(&rings->sq.tail)) smp_load_acquire 254 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 273 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 783 fs/locks.c if (!smp_load_acquire(&waiter->fl_blocker) && smp_load_acquire 958 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 1414 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 1618 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 1723 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 1769 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 1955 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 2704 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 2777 fs/locks.c ctx = smp_load_acquire(&locks_inode(filp)->i_flctx); smp_load_acquire 2946 fs/locks.c ctx = smp_load_acquire(&inode->i_flctx); smp_load_acquire 156 include/asm-generic/barrier.h #ifndef smp_load_acquire smp_load_acquire 183 include/asm-generic/barrier.h #ifndef smp_load_acquire smp_load_acquire 82 include/linux/atomic-fallback.h return smp_load_acquire(&(v)->counter); smp_load_acquire 1192 include/linux/atomic-fallback.h return smp_load_acquire(&(v)->counter); smp_load_acquire 378 include/linux/backing-dev.h cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; smp_load_acquire 437 include/linux/key.h return smp_load_acquire(&key->state); smp_load_acquire 318 include/net/inet_sock.h return smp_load_acquire(&sk->sk_state); smp_load_acquire 1268 include/net/tcp.h return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED; smp_load_acquire 455 include/net/tls.h (smp_load_acquire(&sk->sk_validate_xmit_skb) == smp_load_acquire 68 include/rdma/rdmavt_cq.h #define RDMA_READ_UAPI_ATOMIC(member) smp_load_acquire(&(member).val) smp_load_acquire 404 ipc/sem.c if (!smp_load_acquire(&sma->use_global_lock)) { smp_load_acquire 6828 kernel/events/core.c if (!smp_load_acquire(&event->ctx)) smp_load_acquire 169 kernel/locking/osq_lock.c if (smp_load_acquire(&node->locked)) smp_load_acquire 67 kernel/locking/percpu-rwsem.c if (likely(!smp_load_acquire(&sem->readers_block))) smp_load_acquire 1090 kernel/locking/rwsem.c if (!smp_load_acquire(&waiter.task)) { smp_load_acquire 846 kernel/rcu/rcutorture.c if (!smp_load_acquire(&rbi.inflight)) { smp_load_acquire 866 kernel/rcu/rcutorture.c if (!failed && smp_load_acquire(&rbi.inflight)) smp_load_acquire 892 kernel/rcu/rcutorture.c while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) { smp_load_acquire 2022 kernel/rcu/rcutorture.c smp_load_acquire(&barrier_phase)) != lastphase || smp_load_acquire 232 kernel/rcu/srcutree.c if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed))) /*^^^*/ smp_load_acquire 1107 kernel/rcu/srcutree.c idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq)); /* ^^^ */ smp_load_acquire 2236 kernel/rcu/tree.c if (smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { smp_load_acquire 77 kernel/rcu/tree_exp.h int ncpus = smp_load_acquire(&rcu_state.ncpus); /* Order vs. locking. */ smp_load_acquire 826 kernel/rcu/tree_plugin.h if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) { smp_load_acquire 849 kernel/rcu/tree_plugin.h if (!smp_load_acquire(this_cpu_ptr(&rcu_data.rcu_urgent_qs))) smp_load_acquire 2126 kernel/rcu/tree_plugin.h if (!smp_load_acquire(&rdp->nocb_cb_sleep)) { /* VVV */ smp_load_acquire 560 kernel/taskstats.c stats = smp_load_acquire(&sig->stats); smp_load_acquire 57 lib/llist.c entry = smp_load_acquire(&head->first); smp_load_acquire 82 lib/stackdepot.c if (smp_load_acquire(&next_slab_inited)) smp_load_acquire 245 lib/stackdepot.c found = find_stack(smp_load_acquire(bucket), entries, smp_load_acquire 258 lib/stackdepot.c if (unlikely(!smp_load_acquire(&next_slab_inited))) { smp_load_acquire 617 net/ipv4/tcp_bpf.c unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) { smp_load_acquire 96 net/netfilter/nf_conntrack_core.c if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false)) smp_load_acquire 277 net/rxrpc/call_accept.c call_head = smp_load_acquire(&b->call_backlog_head); smp_load_acquire 280 net/rxrpc/call_accept.c conn_head = smp_load_acquire(&b->conn_backlog_head); smp_load_acquire 284 net/rxrpc/call_accept.c peer_head = smp_load_acquire(&b->peer_backlog_head); smp_load_acquire 85 net/rxrpc/output.c top = smp_load_acquire(&call->rx_top); smp_load_acquire 187 net/rxrpc/recvmsg.c top = smp_load_acquire(&call->rx_top); smp_load_acquire 339 net/rxrpc/recvmsg.c while (top = smp_load_acquire(&call->rx_top), smp_load_acquire 757 net/rxrpc/recvmsg.c top = smp_load_acquire(&call->rx_top); smp_load_acquire 300 net/sunrpc/xprtmultipath.c old = smp_load_acquire(cursor); smp_load_acquire 619 net/tls/tls_main.c unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) { smp_load_acquire 629 net/tls/tls_main.c unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv4_prot))) { smp_load_acquire 1494 net/unix/af_unix.c addr = smp_load_acquire(&unix_sk(sk)->addr); smp_load_acquire 2034 net/unix/af_unix.c struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); smp_load_acquire 2542 net/unix/af_unix.c if (!smp_load_acquire(&unix_sk(sk)->addr)) smp_load_acquire 17 net/unix/diag.c struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); smp_load_acquire 352 security/lsm_audit.c addr = smp_load_acquire(&u->addr); smp_load_acquire 134 security/selinux/ss/sidtab.c u32 count = smp_load_acquire(&s->count); smp_load_acquire 256 security/selinux/ss/sidtab.c count = smp_load_acquire(&s->count); smp_load_acquire 58 tools/include/asm/barrier.h #ifndef smp_load_acquire smp_load_acquire 59 tools/include/linux/ring_buffer.h return smp_load_acquire(&base->data_head); smp_load_acquire 298 virt/kvm/kvm_main.c long dirty_count = smp_load_acquire(&kvm->tlbs_dirty);