nreq 87 arch/ia64/include/asm/perfmon.h extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs); nreq 88 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *regs); nreq 89 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); nreq 90 arch/ia64/include/asm/perfmon.h extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs); nreq 3333 arch/ia64/kernel/perfmon.c pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) nreq 3349 arch/ia64/kernel/perfmon.c return pfm_write_pmcs(ctx, req, nreq, regs); nreq 3354 arch/ia64/kernel/perfmon.c pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) nreq 3370 arch/ia64/kernel/perfmon.c return pfm_read_pmds(ctx, req, nreq, regs); nreq 3834 arch/ia64/kernel/perfmon.c pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) nreq 3850 arch/ia64/kernel/perfmon.c return pfm_write_ibrs(ctx, req, nreq, regs); nreq 3855 arch/ia64/kernel/perfmon.c pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) nreq 3871 arch/ia64/kernel/perfmon.c return pfm_write_dbrs(ctx, req, nreq, regs); nreq 45 crypto/echainiv.c SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); nreq 47 crypto/echainiv.c skcipher_request_set_sync_tfm(nreq, ctx->sknull); nreq 48 crypto/echainiv.c skcipher_request_set_callback(nreq, req->base.flags, nreq 50 crypto/echainiv.c skcipher_request_set_crypt(nreq, req->src, req->dst, nreq 54 crypto/echainiv.c err = crypto_skcipher_encrypt(nreq); nreq 1026 crypto/gcm.c SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->null); nreq 1028 crypto/gcm.c skcipher_request_set_sync_tfm(nreq, ctx->null); nreq 1029 crypto/gcm.c skcipher_request_set_callback(nreq, req->base.flags, NULL, NULL); nreq 1030 crypto/gcm.c skcipher_request_set_crypt(nreq, req->src, req->dst, nbytes, NULL); nreq 1032 crypto/gcm.c return crypto_skcipher_encrypt(nreq); nreq 71 crypto/seqiv.c SYNC_SKCIPHER_REQUEST_ON_STACK(nreq, ctx->sknull); nreq 73 crypto/seqiv.c skcipher_request_set_sync_tfm(nreq, ctx->sknull); nreq 74 crypto/seqiv.c skcipher_request_set_callback(nreq, req->base.flags, nreq 76 crypto/seqiv.c skcipher_request_set_crypt(nreq, req->src, req->dst, nreq 80 crypto/seqiv.c err = crypto_skcipher_encrypt(nreq); nreq 773 drivers/crypto/inside-secure/safexcel.c int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results; nreq 813 drivers/crypto/inside-secure/safexcel.c nreq++; nreq 824 drivers/crypto/inside-secure/safexcel.c if (!nreq) nreq 829 drivers/crypto/inside-secure/safexcel.c priv->ring[ring].requests += nreq; nreq 969 drivers/crypto/inside-secure/safexcel.c int ret, i, nreq, ndesc, tot_descs, handled = 0; nreq 975 drivers/crypto/inside-secure/safexcel.c nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT); nreq 976 drivers/crypto/inside-secure/safexcel.c nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET; nreq 977 drivers/crypto/inside-secure/safexcel.c nreq &= EIP197_xDR_PROC_xD_PKT_MASK; nreq 978 drivers/crypto/inside-secure/safexcel.c if (!nreq) nreq 981 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < nreq; i++) { nreq 1012 drivers/crypto/inside-secure/safexcel.c if (nreq == EIP197_xDR_PROC_xD_PKT_MASK) nreq 297 drivers/dma/bcm-sba-raid.c struct sba_request *nreq; nreq 303 drivers/dma/bcm-sba-raid.c list_for_each_entry(nreq, &req->next, next) nreq 304 drivers/dma/bcm-sba-raid.c _sba_free_request(sba, nreq); nreq 420 drivers/dma/bcm-sba-raid.c struct sba_request *nreq, *first = req->first; nreq 442 drivers/dma/bcm-sba-raid.c list_for_each_entry(nreq, &first->next, next) nreq 443 drivers/dma/bcm-sba-raid.c _sba_free_request(sba, nreq); nreq 528 drivers/dma/bcm-sba-raid.c struct sba_request *req, *nreq; nreq 540 drivers/dma/bcm-sba-raid.c list_for_each_entry(nreq, &req->next, next) nreq 541 drivers/dma/bcm-sba-raid.c _sba_pending_request(sba, nreq); nreq 5185 drivers/infiniband/hw/hfi1/tid_rdma.c struct tid_rdma_request *req, *nreq; nreq 5259 drivers/infiniband/hw/hfi1/tid_rdma.c nreq = ack_to_tid_req(&qp->s_ack_queue[next]); nreq 5260 drivers/infiniband/hw/hfi1/tid_rdma.c if (!nreq->comp_seg || nreq->ack_seg == nreq->comp_seg) nreq 1245 drivers/infiniband/hw/hns/hns_roce_device.h bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, nreq 78 drivers/infiniband/hw/hns/hns_roce_hw_v1.c int nreq = 0; nreq 92 drivers/infiniband/hw/hns/hns_roce_hw_v1.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 93 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { nreq 99 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); nreq 316 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (likely(nreq)) { nreq 317 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp->sq.head += nreq; nreq 359 drivers/infiniband/hw/hns/hns_roce_hw_v1.c int nreq = 0; nreq 365 drivers/infiniband/hw/hns/hns_roce_hw_v1.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 366 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (hns_roce_wq_overflow(&hr_qp->rq, nreq, nreq 373 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); nreq 399 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (likely(nreq)) { nreq 400 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->rq.head += nreq; nreq 258 drivers/infiniband/hw/hns/hns_roce_hw_v2.c int nreq; nreq 279 drivers/infiniband/hw/hns/hns_roce_hw_v2.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 280 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { nreq 286 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); nreq 299 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); nreq 576 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (likely(nreq)) { nreq 577 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp->sq.head += nreq; nreq 632 drivers/infiniband/hw/hns/hns_roce_hw_v2.c int nreq; nreq 643 drivers/infiniband/hw/hns/hns_roce_hw_v2.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 644 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (hns_roce_wq_overflow(&hr_qp->rq, nreq, nreq 651 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); nreq 691 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (likely(nreq)) { nreq 692 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->rq.head += nreq; nreq 6241 drivers/infiniband/hw/hns/hns_roce_hw_v2.c int nreq; nreq 6249 drivers/infiniband/hw/hns/hns_roce_hw_v2.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 6289 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (likely(nreq)) { nreq 6290 drivers/infiniband/hw/hns/hns_roce_hw_v2.c srq->head += nreq; nreq 1287 drivers/infiniband/hw/hns/hns_roce_qp.c bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, nreq 1294 drivers/infiniband/hw/hns/hns_roce_qp.c if (likely(cur + nreq < hr_wq->max_post)) nreq 1302 drivers/infiniband/hw/hns/hns_roce_qp.c return cur + nreq >= hr_wq->max_post; nreq 3289 drivers/infiniband/hw/mlx4/qp.c static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) nreq 3295 drivers/infiniband/hw/mlx4/qp.c if (likely(cur + nreq < wq->max_post)) nreq 3303 drivers/infiniband/hw/mlx4/qp.c return cur + nreq >= wq->max_post; nreq 3541 drivers/infiniband/hw/mlx4/qp.c int nreq; nreq 3577 drivers/infiniband/hw/mlx4/qp.c nreq = 0; nreq 3583 drivers/infiniband/hw/mlx4/qp.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 3587 drivers/infiniband/hw/mlx4/qp.c if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { nreq 3600 drivers/infiniband/hw/mlx4/qp.c qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; nreq 3827 drivers/infiniband/hw/mlx4/qp.c if (likely(nreq)) { nreq 3828 drivers/infiniband/hw/mlx4/qp.c qp->sq.head += nreq; nreq 3862 drivers/infiniband/hw/mlx4/qp.c int nreq; nreq 3875 drivers/infiniband/hw/mlx4/qp.c nreq = 0; nreq 3881 drivers/infiniband/hw/mlx4/qp.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 3882 drivers/infiniband/hw/mlx4/qp.c if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { nreq 3926 drivers/infiniband/hw/mlx4/qp.c if (likely(nreq)) { nreq 3927 drivers/infiniband/hw/mlx4/qp.c qp->rq.head += nreq; nreq 308 drivers/infiniband/hw/mlx4/srq.c int nreq; nreq 316 drivers/infiniband/hw/mlx4/srq.c nreq = 0; nreq 320 drivers/infiniband/hw/mlx4/srq.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 352 drivers/infiniband/hw/mlx4/srq.c if (likely(nreq)) { nreq 353 drivers/infiniband/hw/mlx4/srq.c srq->wqe_ctr += nreq; nreq 4070 drivers/infiniband/hw/mlx5/qp.c static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) nreq 4076 drivers/infiniband/hw/mlx5/qp.c if (likely(cur + nreq < wq->max_post)) nreq 4084 drivers/infiniband/hw/mlx5/qp.c return cur + nreq >= wq->max_post; nreq 4899 drivers/infiniband/hw/mlx5/qp.c int *size, void **cur_edge, int nreq, nreq 4902 drivers/infiniband/hw/mlx5/qp.c if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) nreq 4924 drivers/infiniband/hw/mlx5/qp.c int *size, void **cur_edge, int nreq) nreq 4926 drivers/infiniband/hw/mlx5/qp.c return __begin_wqe(qp, seg, ctrl, wr, idx, size, cur_edge, nreq, nreq 4934 drivers/infiniband/hw/mlx5/qp.c unsigned int idx, u64 wr_id, int nreq, u8 fence, nreq 4948 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_head[idx] = qp->sq.head + nreq; nreq 4983 drivers/infiniband/hw/mlx5/qp.c int nreq; nreq 5002 drivers/infiniband/hw/mlx5/qp.c for (nreq = 0; wr; nreq++, wr = wr->next) { nreq 5019 drivers/infiniband/hw/mlx5/qp.c nreq); nreq 5111 drivers/infiniband/hw/mlx5/qp.c nreq, fence, nreq 5116 drivers/infiniband/hw/mlx5/qp.c nreq); nreq 5154 drivers/infiniband/hw/mlx5/qp.c wr->wr_id, nreq, fence, nreq 5163 drivers/infiniband/hw/mlx5/qp.c &size, &cur_edge, nreq, false, nreq 5180 drivers/infiniband/hw/mlx5/qp.c wr->wr_id, nreq, next_fence, nreq 5184 drivers/infiniband/hw/mlx5/qp.c &size, &cur_edge, nreq, false, nreq 5201 drivers/infiniband/hw/mlx5/qp.c wr->wr_id, nreq, next_fence, nreq 5309 drivers/infiniband/hw/mlx5/qp.c finish_wqe(qp, ctrl, seg, size, cur_edge, idx, wr->wr_id, nreq, nreq 5317 drivers/infiniband/hw/mlx5/qp.c if (likely(nreq)) { nreq 5318 drivers/infiniband/hw/mlx5/qp.c qp->sq.head += nreq; nreq 5365 drivers/infiniband/hw/mlx5/qp.c int nreq; nreq 5382 drivers/infiniband/hw/mlx5/qp.c for (nreq = 0; wr; nreq++, wr = wr->next) { nreq 5383 drivers/infiniband/hw/mlx5/qp.c if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { nreq 5419 drivers/infiniband/hw/mlx5/qp.c if (likely(nreq)) { nreq 5420 drivers/infiniband/hw/mlx5/qp.c qp->rq.head += nreq; nreq 430 drivers/infiniband/hw/mlx5/srq.c int nreq; nreq 441 drivers/infiniband/hw/mlx5/srq.c for (nreq = 0; wr; nreq++, wr = wr->next) { nreq 473 drivers/infiniband/hw/mlx5/srq.c if (likely(nreq)) { nreq 474 drivers/infiniband/hw/mlx5/srq.c srq->wqe_ctr += nreq; nreq 1566 drivers/infiniband/hw/mthca/mthca_qp.c static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, nreq 1573 drivers/infiniband/hw/mthca/mthca_qp.c if (likely(cur + nreq < wq->max)) nreq 1581 drivers/infiniband/hw/mthca/mthca_qp.c return cur + nreq >= wq->max; nreq 1632 drivers/infiniband/hw/mthca/mthca_qp.c int nreq; nreq 1653 drivers/infiniband/hw/mthca/mthca_qp.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 1654 drivers/infiniband/hw/mthca/mthca_qp.c if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { nreq 1658 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max, nreq); nreq 1787 drivers/infiniband/hw/mthca/mthca_qp.c cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | nreq 1791 drivers/infiniband/hw/mthca/mthca_qp.c if (!nreq) { nreq 1804 drivers/infiniband/hw/mthca/mthca_qp.c if (likely(nreq)) { nreq 1815 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += nreq; nreq 1828 drivers/infiniband/hw/mthca/mthca_qp.c int nreq; nreq 1849 drivers/infiniband/hw/mthca/mthca_qp.c for (nreq = 0; wr; wr = wr->next) { nreq 1850 drivers/infiniband/hw/mthca/mthca_qp.c if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { nreq 1854 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.max, nreq); nreq 1888 drivers/infiniband/hw/mthca/mthca_qp.c if (!nreq) nreq 1895 drivers/infiniband/hw/mthca/mthca_qp.c ++nreq; nreq 1896 drivers/infiniband/hw/mthca/mthca_qp.c if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { nreq 1897 drivers/infiniband/hw/mthca/mthca_qp.c nreq = 0; nreq 1911 drivers/infiniband/hw/mthca/mthca_qp.c if (likely(nreq)) { nreq 1915 drivers/infiniband/hw/mthca/mthca_qp.c qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL, nreq 1920 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head += nreq; nreq 1936 drivers/infiniband/hw/mthca/mthca_qp.c int nreq; nreq 1957 drivers/infiniband/hw/mthca/mthca_qp.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 1958 drivers/infiniband/hw/mthca/mthca_qp.c if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { nreq 1959 drivers/infiniband/hw/mthca/mthca_qp.c nreq = 0; nreq 1984 drivers/infiniband/hw/mthca/mthca_qp.c if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { nreq 1988 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max, nreq); nreq 2121 drivers/infiniband/hw/mthca/mthca_qp.c if (!nreq) { nreq 2134 drivers/infiniband/hw/mthca/mthca_qp.c if (likely(nreq)) { nreq 2135 drivers/infiniband/hw/mthca/mthca_qp.c dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; nreq 2137 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += nreq; nreq 2167 drivers/infiniband/hw/mthca/mthca_qp.c int nreq; nreq 2178 drivers/infiniband/hw/mthca/mthca_qp.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 2179 drivers/infiniband/hw/mthca/mthca_qp.c if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { nreq 2183 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.max, nreq); nreq 2216 drivers/infiniband/hw/mthca/mthca_qp.c if (likely(nreq)) { nreq 2217 drivers/infiniband/hw/mthca/mthca_qp.c qp->rq.head += nreq; nreq 493 drivers/infiniband/hw/mthca/mthca_srq.c int nreq; nreq 502 drivers/infiniband/hw/mthca/mthca_srq.c for (nreq = 0; wr; wr = wr->next) { nreq 543 drivers/infiniband/hw/mthca/mthca_srq.c ++nreq; nreq 544 drivers/infiniband/hw/mthca/mthca_srq.c if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { nreq 545 drivers/infiniband/hw/mthca/mthca_srq.c nreq = 0; nreq 561 drivers/infiniband/hw/mthca/mthca_srq.c if (likely(nreq)) { nreq 568 drivers/infiniband/hw/mthca/mthca_srq.c mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, nreq 586 drivers/infiniband/hw/mthca/mthca_srq.c int nreq; nreq 592 drivers/infiniband/hw/mthca/mthca_srq.c for (nreq = 0; wr; ++nreq, wr = wr->next) { nreq 627 drivers/infiniband/hw/mthca/mthca_srq.c if (likely(nreq)) { nreq 628 drivers/infiniband/hw/mthca/mthca_srq.c srq->counter += nreq; nreq 2189 drivers/infiniband/sw/rdmavt/qp.c unsigned nreq = 0; nreq 2216 drivers/infiniband/sw/rdmavt/qp.c nreq++; nreq 2220 drivers/infiniband/sw/rdmavt/qp.c if (nreq) { nreq 2225 drivers/infiniband/sw/rdmavt/qp.c if (nreq == 1 && call_send) nreq 2939 drivers/net/ethernet/mellanox/mlx4/main.c int nreq = min3(dev->caps.num_ports * nreq 2945 drivers/net/ethernet/mellanox/mlx4/main.c nreq = min_t(int, nreq, msi_x); nreq 2947 drivers/net/ethernet/mellanox/mlx4/main.c entries = kcalloc(nreq, sizeof(*entries), GFP_KERNEL); nreq 2951 drivers/net/ethernet/mellanox/mlx4/main.c for (i = 0; i < nreq; ++i) nreq 2954 drivers/net/ethernet/mellanox/mlx4/main.c nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, nreq 2955 drivers/net/ethernet/mellanox/mlx4/main.c nreq); nreq 2957 drivers/net/ethernet/mellanox/mlx4/main.c if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { nreq 2962 drivers/net/ethernet/mellanox/mlx4/main.c dev->caps.num_comp_vectors = nreq - 1; nreq 226 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c u32 opcode, int nreq) nreq 258 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c if (nreq) nreq 2325 drivers/net/ethernet/neterion/vxge/vxge-config.c u32 nreq = 0, i; nreq 2329 drivers/net/ethernet/neterion/vxge/vxge-config.c nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; nreq 2330 drivers/net/ethernet/neterion/vxge/vxge-config.c blockpool->req_out += nreq; nreq 2333 drivers/net/ethernet/neterion/vxge/vxge-config.c for (i = 0; i < nreq; i++) nreq 73 drivers/nvme/host/fc.c struct nvme_request nreq; /* nreq 772 drivers/usb/isp1760/isp1760-udc.c struct isp1760_request *req, *nreq; nreq 798 drivers/usb/isp1760/isp1760-udc.c list_for_each_entry_safe(req, nreq, &req_list, queue) { nreq 3212 fs/ceph/mds_client.c struct ceph_mds_request *req, *nreq; nreq 3219 fs/ceph/mds_client.c list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { nreq 194 fs/nfs/pnfs_nfs.c unsigned int nreq = 0; nreq 206 fs/nfs/pnfs_nfs.c nreq++; nreq 211 fs/nfs/pnfs_nfs.c return nreq; nreq 267 fs/nfs/pnfs_nfs.c unsigned int nreq = 0; nreq 273 fs/nfs/pnfs_nfs.c nreq++; nreq 276 fs/nfs/pnfs_nfs.c nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); nreq 278 fs/nfs/pnfs_nfs.c if (nreq == 0) nreq 281 fs/nfs/pnfs_nfs.c atomic_add(nreq, &cinfo->mds->rpcs_out); nreq 1728 fs/nilfs2/btree.c union nilfs_bmap_ptr_req *nreq, nreq 1751 fs/nilfs2/btree.c if (nreq != NULL) { nreq 1752 fs/nilfs2/btree.c nreq->bpr_ptr = dreq->bpr_ptr + 1; nreq 1753 fs/nilfs2/btree.c ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); nreq 1757 fs/nilfs2/btree.c ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); nreq 1770 fs/nilfs2/btree.c nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); nreq 1784 fs/nilfs2/btree.c union nilfs_bmap_ptr_req *nreq, nreq 1802 fs/nilfs2/btree.c if (nreq != NULL) { nreq 1804 fs/nilfs2/btree.c nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); nreq 1820 fs/nilfs2/btree.c tmpptr = nreq->bpr_ptr; nreq 1856 fs/nilfs2/btree.c union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; nreq 1866 fs/nilfs2/btree.c ni = &nreq;