atomic_sub_return 53 arch/arm64/include/asm/atomic.h ATOMIC_FETCH_OPS(atomic_sub_return) atomic_sub_return 185 arch/csky/include/asm/atomic.h #define atomic_sub_return atomic_sub_return atomic_sub_return 219 arch/ia64/include/asm/atomic.h #define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) atomic_sub_return 87 arch/openrisc/include/asm/atomic.h #define atomic_sub_return atomic_sub_return atomic_sub_return 146 arch/riscv/include/asm/atomic.h #define atomic_sub_return atomic_sub_return atomic_sub_return 190 arch/x86/kernel/cpu/mce/intel.c if (!atomic_sub_return(1, &cmci_storm_on_cpus)) atomic_sub_return 333 drivers/block/drbd/drbd_receiver.c i = atomic_sub_return(i, a); atomic_sub_return 462 drivers/crypto/qat/qat_common/adf_dev_mgr.c if (atomic_sub_return(1, &accel_dev->ref_count) == 0) atomic_sub_return 152 drivers/crypto/virtio/virtio_crypto_mgr.c if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0) atomic_sub_return 1625 drivers/gpu/drm/i915/i915_gem_gtt.c if (!atomic_sub_return(count, &pt->used)) atomic_sub_return 2743 drivers/infiniband/ulp/srpt/ib_srpt.c if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) { atomic_sub_return 2847 drivers/infiniband/ulp/srpt/ib_srpt.c if (unlikely(atomic_sub_return(1 + ioctx->n_rdma, atomic_sub_return 179 drivers/lightnvm/pblk-rl.c free_blocks = atomic_sub_return(blk_in_line, atomic_sub_return 340 drivers/md/bcache/btree.c !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked)) atomic_sub_return 46 drivers/md/bcache/closure.c closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining)); atomic_sub_return 207 drivers/md/bcache/request.c if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) atomic_sub_return 390 drivers/misc/mic/scif/scif_fence.c if (!atomic_sub_return(1, &ep->rma_info.fence_refcount)) atomic_sub_return 322 drivers/misc/mic/scif/scif_rma.c j = atomic_sub_return(1, &pinned_pages->ref_count); atomic_sub_return 1438 drivers/misc/mic/scif/scif_rma.c ret = atomic_sub_return(1, &pinned_pages->ref_count); atomic_sub_return 752 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { atomic_sub_return 2955 drivers/net/ethernet/ibm/ibmvnic.c if (atomic_sub_return(num_entries, &scrq->used) <= atomic_sub_return 199 drivers/net/wireless/ath/carl9170/tx.c if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) { atomic_sub_return 1228 drivers/net/wireless/st/cw1200/wsm.c tx_lock = atomic_sub_return(1, &priv->tx_lock); atomic_sub_return 735 drivers/nvme/target/rdma.c if (unlikely(atomic_sub_return(1 + rsp->n_rdma, atomic_sub_return 526 drivers/s390/cio/qdio_main.c if (atomic_sub_return(count, &q->nr_buf_used) == 0) atomic_sub_return 533 drivers/s390/cio/qdio_main.c if (atomic_sub_return(count, &q->nr_buf_used) == 0) atomic_sub_return 2308 drivers/staging/isdn/gigaset/capi.c while (atomic_sub_return(1, &iif->sendqlen) > 0) { atomic_sub_return 199 drivers/tty/tty_buffer.c WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); atomic_sub_return 251 drivers/vhost/net.c int r = atomic_sub_return(1, &ubufs->refcount); atomic_sub_return 303 drivers/vhost/vsock.c new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); atomic_sub_return 352 drivers/video/fbdev/sh_mobile_lcdcfb.c if (atomic_sub_return(1, &priv->hw_usecnt) == -1) { atomic_sub_return 71 drivers/w1/slaves/w1_therm.c int refcnt = atomic_sub_return(1, THERM_REFCNT(sl->family_data)); atomic_sub_return 789 drivers/w1/w1.c refcnt = atomic_sub_return(1, &sl->refcnt); atomic_sub_return 64 drivers/w1/w1_netlink.c if (atomic_sub_return(1, &block->refcnt) == 0) { atomic_sub_return 1180 fs/btrfs/inode.c if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) < atomic_sub_return 162 include/asm-generic/atomic-instrumented.h #define atomic_sub_return atomic_sub_return atomic_sub_return 606 include/asm-generic/atomic-long.h return atomic_sub_return(i, v); atomic_sub_return 117 include/asm-generic/atomic.h #ifndef atomic_sub_return atomic_sub_return 192 include/asm-generic/atomic.h atomic_sub_return(i, v); atomic_sub_return 181 include/linux/atomic-fallback.h #define atomic_sub_return_acquire atomic_sub_return atomic_sub_return 182 include/linux/atomic-fallback.h #define atomic_sub_return_release atomic_sub_return atomic_sub_return 183 include/linux/atomic-fallback.h #define atomic_sub_return_relaxed atomic_sub_return atomic_sub_return 207 include/linux/atomic-fallback.h #ifndef atomic_sub_return atomic_sub_return 217 include/linux/atomic-fallback.h #define atomic_sub_return atomic_sub_return atomic_sub_return 455 include/linux/atomic-fallback.h return atomic_sub_return(1, v); atomic_sub_return 1016 include/linux/atomic-fallback.h return atomic_sub_return(i, v) == 0; atomic_sub_return 1478 kernel/bpf/syscall.c WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); atomic_sub_return 928 kernel/module.c ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt); atomic_sub_return 431 net/9p/trans_rdma.c if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { atomic_sub_return 600 net/core/skbuff.c atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, atomic_sub_return 229 net/rds/ib_send.c if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && atomic_sub_return 587 net/sched/sch_taprio.c atomic_sub_return(len, &entry->budget) < 0) { atomic_sub_return 323 net/sunrpc/xprtrdma/svc_rdma_rw.c if (atomic_sub_return(cc->cc_sqecount, atomic_sub_return 276 net/vmw_vsock/virtio_transport.c new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);