/linux-4.4.14/arch/ia64/include/asm/ |
D | perfmon.h | 86 extern int pfm_mod_read_pmds(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *re… 87 extern int pfm_mod_write_pmcs(struct task_struct *, void *req, unsigned int nreq, struct pt_regs *r… 88 extern int pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg… 89 extern int pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_reg…
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 1545 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1552 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1560 return cur + nreq >= wq->max; in mthca_wq_overflow() 1611 int nreq; in mthca_tavor_post_send() local 1632 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_tavor_post_send() 1633 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mthca_tavor_post_send() 1637 qp->sq.max, nreq); in mthca_tavor_post_send() 1766 cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size | in mthca_tavor_post_send() 1770 if (!nreq) { in mthca_tavor_post_send() 1783 if (likely(nreq)) { in mthca_tavor_post_send() [all …]
|
D | mthca_srq.c | 485 int nreq; in mthca_tavor_post_srq_recv() local 494 for (nreq = 0; wr; wr = wr->next) { in mthca_tavor_post_srq_recv() 535 ++nreq; in mthca_tavor_post_srq_recv() 536 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { in mthca_tavor_post_srq_recv() 537 nreq = 0; in mthca_tavor_post_srq_recv() 553 if (likely(nreq)) { in mthca_tavor_post_srq_recv() 560 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq, in mthca_tavor_post_srq_recv() 584 int nreq; in mthca_arbel_post_srq_recv() local 590 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mthca_arbel_post_srq_recv() 625 if (likely(nreq)) { in mthca_arbel_post_srq_recv() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | srq.c | 322 int nreq; in mlx4_ib_post_srq_recv() local 330 nreq = 0; in mlx4_ib_post_srq_recv() 334 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_srq_recv() 366 if (likely(nreq)) { in mlx4_ib_post_srq_recv() 367 srq->wqe_ctr += nreq; in mlx4_ib_post_srq_recv()
|
D | qp.c | 2486 static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx4_wq_overflow() argument 2492 if (likely(cur + nreq < wq->max_post)) in mlx4_wq_overflow() 2500 return cur + nreq >= wq->max_post; in mlx4_wq_overflow() 2756 int nreq; in mlx4_ib_post_send() local 2773 nreq = 0; in mlx4_ib_post_send() 2779 for (nreq = 0; wr; ++nreq, wr = wr->next) { in mlx4_ib_post_send() 2783 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send() 2796 qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; in mlx4_ib_post_send() 3038 if (likely(nreq)) { in mlx4_ib_post_send() 3039 qp->sq.head += nreq; in mlx4_ib_post_send() [all …]
|
/linux-4.4.14/fs/nfs/ |
D | pnfs_nfs.c | 213 unsigned int nreq = 0; in pnfs_generic_alloc_ds_commits() local 225 nreq++; in pnfs_generic_alloc_ds_commits() 230 return nreq; in pnfs_generic_alloc_ds_commits() 258 unsigned int nreq = 0; in pnfs_generic_commit_pagelist() local 265 nreq++; in pnfs_generic_commit_pagelist() 274 nreq += pnfs_generic_alloc_ds_commits(cinfo, &list); in pnfs_generic_commit_pagelist() 276 if (nreq == 0) { in pnfs_generic_commit_pagelist() 281 atomic_add(nreq, &cinfo->mds->rpcs_out); in pnfs_generic_commit_pagelist()
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | srq.c | 433 int nreq; in mlx5_ib_post_srq_recv() local 438 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_srq_recv() 470 if (likely(nreq)) { in mlx5_ib_post_srq_recv() 471 srq->wqe_ctr += nreq; in mlx5_ib_post_srq_recv()
|
D | qp.c | 1813 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5_wq_overflow() argument 1819 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow() 1827 return cur + nreq >= wq->max_post; in mlx5_wq_overflow() 2537 int *size, int nreq) in begin_wqe() argument 2541 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) { in begin_wqe() 2566 int nreq, u8 fence, u8 next_fence, in finish_wqe() argument 2581 qp->sq.wqe_head[idx] = qp->sq.head + nreq; in finish_wqe() 2605 int nreq; in mlx5_ib_post_send() local 2612 for (nreq = 0; wr; nreq++, wr = wr->next) { in mlx5_ib_post_send() 2629 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq); in mlx5_ib_post_send() [all …]
|
/linux-4.4.14/fs/nilfs2/ |
D | btree.c | 1727 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_prepare_convert_and_insert() argument 1750 if (nreq != NULL) { in nilfs_btree_prepare_convert_and_insert() 1751 nreq->bpr_ptr = dreq->bpr_ptr + 1; in nilfs_btree_prepare_convert_and_insert() 1752 ret = nilfs_bmap_prepare_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1756 ret = nilfs_btree_get_new_block(btree, nreq->bpr_ptr, &bh); in nilfs_btree_prepare_convert_and_insert() 1769 nilfs_bmap_abort_alloc_ptr(btree, nreq, dat); in nilfs_btree_prepare_convert_and_insert() 1783 union nilfs_bmap_ptr_req *nreq, in nilfs_btree_commit_convert_and_insert() argument 1801 if (nreq != NULL) { in nilfs_btree_commit_convert_and_insert() 1803 nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); in nilfs_btree_commit_convert_and_insert() 1819 tmpptr = nreq->bpr_ptr; in nilfs_btree_commit_convert_and_insert() [all …]
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
D | main.c | 2675 int nreq = dev->caps.num_ports * num_online_cpus() + 1; in mlx4_enable_msi_x() local 2677 nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, in mlx4_enable_msi_x() 2678 nreq); in mlx4_enable_msi_x() 2679 if (nreq > MAX_MSIX) in mlx4_enable_msi_x() 2680 nreq = MAX_MSIX; in mlx4_enable_msi_x() 2682 entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); in mlx4_enable_msi_x() 2686 for (i = 0; i < nreq; ++i) in mlx4_enable_msi_x() 2689 nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2, in mlx4_enable_msi_x() 2690 nreq); in mlx4_enable_msi_x() 2692 if (nreq < 0 || nreq < MLX4_EQ_ASYNC) { in mlx4_enable_msi_x() [all …]
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | verbs.c | 490 unsigned nreq = 0; in post_send() local 509 nreq++; in post_send() 512 if (nreq && !call_send) in post_send() 515 if (nreq && call_send) in post_send()
|
/linux-4.4.14/arch/ia64/kernel/ |
D | perfmon.c | 3388 pfm_mod_write_pmcs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_pmcs() argument 3404 return pfm_write_pmcs(ctx, req, nreq, regs); in pfm_mod_write_pmcs() 3409 pfm_mod_read_pmds(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_read_pmds() argument 3425 return pfm_read_pmds(ctx, req, nreq, regs); in pfm_mod_read_pmds() 3889 pfm_mod_write_ibrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_ibrs() argument 3905 return pfm_write_ibrs(ctx, req, nreq, regs); in pfm_mod_write_ibrs() 3910 pfm_mod_write_dbrs(struct task_struct *task, void *req, unsigned int nreq, struct pt_regs *regs) in pfm_mod_write_dbrs() argument 3926 return pfm_write_dbrs(ctx, req, nreq, regs); in pfm_mod_write_dbrs()
|
/linux-4.4.14/net/ceph/ |
D | osd_client.c | 934 struct ceph_osd_request *req, *nreq; in __kick_osd_requests() local 982 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, in __kick_osd_requests() 988 list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item) in __kick_osd_requests() 1974 struct ceph_osd_request *req, *nreq; in kick_requests() local 2025 list_for_each_entry_safe(req, nreq, &osdc->req_linger, in kick_requests()
|
/linux-4.4.14/drivers/usb/isp1760/ |
D | isp1760-udc.c | 775 struct isp1760_request *req, *nreq; in isp1760_ep_disable() local 801 list_for_each_entry_safe(req, nreq, &req_list, queue) { in isp1760_ep_disable()
|
/linux-4.4.14/drivers/net/ethernet/neterion/vxge/ |
D | vxge-config.c | 2336 u32 nreq = 0, i; in __vxge_hw_blockpool_blocks_add() local 2340 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE; in __vxge_hw_blockpool_blocks_add() 2341 blockpool->req_out += nreq; in __vxge_hw_blockpool_blocks_add() 2344 for (i = 0; i < nreq; i++) in __vxge_hw_blockpool_blocks_add()
|
/linux-4.4.14/fs/ceph/ |
D | mds_client.c | 2740 struct ceph_mds_request *req, *nreq; in replay_unsafe_requests() local 2747 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { in replay_unsafe_requests()
|