/linux-4.4.14/Documentation/filesystems/caching/ |
D | fscache.txt | 229 nul=N Number of acq reqs given a NULL parent 230 noc=N Number of acq reqs rejected due to no cache available 231 ok=N Number of acq reqs succeeded 232 nbf=N Number of acq reqs rejected due to error 233 oom=N Number of acq reqs failed on ENOMEM 240 nul=N Number of upd reqs given a NULL parent 241 run=N Number of upd reqs granted CPU time 243 nul=N Number of rlq reqs given a NULL parent 244 wcr=N Number of rlq reqs waited on completion of creation 251 ok=N Number of successful alloc reqs [all …]
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | backchannel.c | 116 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) in xprt_rdma_bc_setup() argument 133 if (reqs > RPCRDMA_BACKWARD_WRS >> 1) in xprt_rdma_bc_setup() 136 for (i = 0; i < (reqs << 1); i++) { in xprt_rdma_bc_setup() 156 rc = rpcrdma_bc_setup_reps(r_xprt, reqs); in xprt_rdma_bc_setup() 160 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); in xprt_rdma_bc_setup() 164 buffer->rb_bc_srv_max_requests = reqs; in xprt_rdma_bc_setup() 170 xprt_rdma_bc_destroy(xprt, reqs); in xprt_rdma_bc_setup() 247 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) in xprt_rdma_bc_destroy() argument
|
/linux-4.4.14/include/linux/ |
D | pci-ats.h | 8 int pci_enable_pri(struct pci_dev *pdev, u32 reqs); 14 static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) in pci_enable_pri() argument
|
/linux-4.4.14/drivers/pci/ |
D | ats.c | 150 int pci_enable_pri(struct pci_dev *pdev, u32 reqs) in pci_enable_pri() argument 167 reqs = min(max_requests, reqs); in pci_enable_pri() 168 pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); in pci_enable_pri()
|
/linux-4.4.14/drivers/net/wireless/zd1211rw/ |
D | zd_chip.c | 381 struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]}; in zd_write_mac_addr_common() local 384 reqs[0].value = (mac_addr[3] << 24) in zd_write_mac_addr_common() 388 reqs[1].value = (mac_addr[5] << 8) in zd_write_mac_addr_common() 396 r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); in zd_write_mac_addr_common() 406 static const struct zd_ioreq32 reqs[2] = { in zd_write_mac_addr() local 411 return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac"); in zd_write_mac_addr() 416 static const struct zd_ioreq32 reqs[2] = { in zd_write_bssid() local 421 return zd_write_mac_addr_common(chip, bssid, reqs, "bssid"); in zd_write_bssid() 873 struct zd_ioreq32 reqs[3]; in set_aw_pt_bi() local 883 reqs[0].addr = CR_ATIM_WND_PERIOD; in set_aw_pt_bi() [all …]
|
/linux-4.4.14/drivers/lightnvm/ |
D | rrpc.h | 39 struct list_head reqs; member 177 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { in __rrpc_lock_laddr() 188 list_add_tail(&r->list, &rrpc->inflights.reqs); in __rrpc_lock_laddr()
|
D | rrpc.c | 1070 INIT_LIST_HEAD(&rrpc->inflights.reqs); in rrpc_core_init()
|
/linux-4.4.14/fs/ |
D | aio.c | 89 struct percpu_ref reqs; member 579 percpu_ref_exit(&ctx->reqs); in free_ioctx() 586 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); in free_ioctx_reqs() 618 percpu_ref_kill(&ctx->reqs); in free_ioctx_users() 619 percpu_ref_put(&ctx->reqs); in free_ioctx_users() 736 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) in ioctx_alloc() 764 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ in ioctx_alloc() 787 percpu_ref_exit(&ctx->reqs); in ioctx_alloc() 1011 percpu_ref_get(&ctx->reqs); in aio_get_req() 1155 percpu_ref_put(&ctx->reqs); in aio_complete() [all …]
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | user_sdma.h | 76 struct user_sdma_request *reqs; member
|
D | user_sdma.c | 379 memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size; in hfi1_user_sdma_alloc_queues() 380 pq->reqs = kmalloc(memsize, GFP_KERNEL); in hfi1_user_sdma_alloc_queues() 381 if (!pq->reqs) in hfi1_user_sdma_alloc_queues() 431 kfree(pq->reqs); in hfi1_user_sdma_alloc_queues() 458 if (pq->reqs) { in hfi1_user_sdma_free_queues() 461 struct user_sdma_request *req = &pq->reqs[j]; in hfi1_user_sdma_free_queues() 469 kfree(pq->reqs); in hfi1_user_sdma_free_queues() 536 req = pq->reqs + info.comp_idx; in hfi1_user_sdma_process_request()
|
D | file_ops.c | 442 int ret = 0, done = 0, reqs = 0; in hfi1_write_iter() local 477 reqs++; in hfi1_write_iter() 480 return ret ? ret : reqs; in hfi1_write_iter()
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/ |
D | mbx.c | 180 hw->mbx.stats.reqs++; in ixgbevf_check_for_msg_vf() 331 mbx->stats.reqs = 0; in ixgbevf_init_mbx_params_vf()
|
D | vf.h | 113 u32 reqs; member
|
/linux-4.4.14/drivers/net/ethernet/intel/igbvf/ |
D | mbx.c | 185 hw->mbx.stats.reqs++; in e1000_check_for_msg_vf() 344 mbx->stats.reqs = 0; in e1000_init_mbx_params_vf()
|
D | vf.h | 220 u32 reqs; member
|
/linux-4.4.14/drivers/scsi/ |
D | hptiop.c | 199 req = hba->reqs[tag >> 8].req_virt; in hptiop_request_callback_mv() 250 req = hba->reqs[(_tag >> 4) & 0xff].req_virt; in hptiop_request_callback_mvfrey() 737 scp = hba->reqs[tag].scp; in hptiop_finish_scsi_req() 782 free_req(hba, &hba->reqs[tag]); in hptiop_finish_scsi_req() 792 req = hba->reqs[tag].req_virt; in hptiop_host_request_callback_itl() 797 req = hba->reqs[tag].req_virt; in hptiop_host_request_callback_itl() 1462 hba->reqs[i].next = NULL; in hptiop_probe() 1463 hba->reqs[i].req_virt = start_virt; in hptiop_probe() 1464 hba->reqs[i].req_shifted_phy = start_phy >> 5; in hptiop_probe() 1465 hba->reqs[i].index = i; in hptiop_probe() [all …]
|
D | virtio_scsi.c | 92 atomic_t reqs; member 212 atomic_dec(&tgt->reqs); in virtscsi_complete_cmd() 576 atomic_inc(&tgt->reqs); in virtscsi_queuecommand_single() 597 if (atomic_inc_return(&tgt->reqs) > 1) { in virtscsi_pick_vq() 609 if (unlikely(atomic_read(&tgt->reqs) > 1)) { in virtscsi_pick_vq() 746 atomic_set(&tgt->reqs, 0); in virtscsi_target_alloc()
|
D | hptiop.h | 327 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; member
|
/linux-4.4.14/sound/pci/hda/ |
D | patch_ca0132.c | 142 int reqs[EFFECT_VALS_MAX_COUNT]; /*effect module request*/ member 156 .reqs = {0, 1}, 164 .reqs = {7, 8}, 172 .reqs = {2, 3}, 180 .reqs = {4, 5, 6}, 188 .reqs = {24, 23, 25}, 196 .reqs = {9, 10, 11, 12, 13, 14, 207 .reqs = {0, 1, 2, 3}, 215 .reqs = {6, 7, 8, 9}, 223 .reqs = {44, 45}, [all …]
|
/linux-4.4.14/net/9p/ |
D | client.c | 251 c->reqs[row] = kcalloc(P9_ROW_MAXTAG, in p9_tag_alloc() 254 if (!c->reqs[row]) { in p9_tag_alloc() 260 c->reqs[row][col].status = REQ_STATUS_IDLE; in p9_tag_alloc() 261 c->reqs[row][col].tc = NULL; in p9_tag_alloc() 270 req = &c->reqs[row][col]; in p9_tag_alloc() 324 return &c->reqs[row][col]; in p9_tag_lookup() 369 if (c->reqs[row][col].status != REQ_STATUS_IDLE) { in p9_tag_cleanup() 387 kfree(c->reqs[row][col].wq); in p9_tag_cleanup() 388 kfree(c->reqs[row][col].tc); in p9_tag_cleanup() 389 kfree(c->reqs[row][col].rc); in p9_tag_cleanup() [all …]
|
/linux-4.4.14/drivers/net/ethernet/intel/igb/ |
D | e1000_mbx.c | 268 hw->mbx.stats.reqs++; in igb_check_for_msg_pf() 437 mbx->stats.reqs = 0; in igb_init_mbx_params_pf()
|
D | e1000_hw.h | 507 u32 reqs; member
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_mbx.c | 262 hw->mbx.stats.reqs++; in ixgbe_check_for_msg_pf() 441 mbx->stats.reqs = 0; in ixgbe_init_mbx_params_pf()
|
D | ixgbe_type.h | 3427 u32 reqs; member
|
/linux-4.4.14/drivers/mmc/card/ |
D | block.c | 1652 u8 reqs = 0; in mmc_blk_prep_packed_list() local 1689 if (reqs >= max_packed_rw - 1) { in mmc_blk_prep_packed_list() 1727 reqs++; in mmc_blk_prep_packed_list() 1736 if (reqs > 0) { in mmc_blk_prep_packed_list() 1738 mqrq->packed->nr_entries = ++reqs; in mmc_blk_prep_packed_list() 1739 mqrq->packed->retries = reqs; in mmc_blk_prep_packed_list() 1740 return reqs; in mmc_blk_prep_packed_list() 1944 u8 reqs = 0; in mmc_blk_issue_rw_rq() local 1950 reqs = mmc_blk_prep_packed_list(mq, rqc); in mmc_blk_issue_rw_rq() 1966 if (reqs >= packed_nr) in mmc_blk_issue_rw_rq()
|
/linux-4.4.14/fs/nfs/ |
D | direct.c | 654 LIST_HEAD(reqs); in nfs_direct_write_reschedule() 660 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); in nfs_direct_write_reschedule() 671 req = nfs_list_entry(reqs.next); in nfs_direct_write_reschedule() 674 list_for_each_entry_safe(req, tmp, &reqs, wb_list) { in nfs_direct_write_reschedule()
|
/linux-4.4.14/include/net/9p/ |
D | client.h | 164 struct p9_req_t *reqs[P9_ROW_MAXTAG]; member
|
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/ |
D | usb.c | 403 struct brcmf_usbreq *req, *reqs; in brcmf_usbdev_qinit() local 405 reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC); in brcmf_usbdev_qinit() 406 if (reqs == NULL) in brcmf_usbdev_qinit() 409 req = reqs; in brcmf_usbdev_qinit() 420 return reqs; in brcmf_usbdev_qinit()
|
/linux-4.4.14/drivers/scsi/isci/ |
D | host.c | 260 struct isci_request *ireq = ihost->reqs[index]; in sci_controller_task_completion() 283 ireq = ihost->reqs[index]; in sci_controller_sdma_completion() 403 ireq = ihost->reqs[index]; in sci_controller_event_completion() 411 ireq = ihost->reqs[index]; in sci_controller_event_completion() 2292 ihost->reqs[i] = ireq; in sci_controller_dma_alloc() 2468 struct isci_request *ireq = ihost->reqs[task_index]; in sci_request_by_tag()
|
D | host.h | 208 struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; member
|
D | remote_device.c | 130 struct isci_request *ireq = ihost->reqs[i]; in sci_remote_device_terminate_reqs_checkabort()
|
D | request.c | 3405 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; in isci_request_from_tag()
|
/linux-4.4.14/fs/fuse/ |
D | fuse_i.h | 250 unsigned reqs; member
|
D | file.c | 574 left = --io->reqs; in fuse_aio_complete() 624 io->reqs++; in fuse_async_req_send() 2818 io->reqs = 1; in fuse_direct_IO()
|
/linux-4.4.14/drivers/iommu/ |
D | amd_iommu.c | 2073 int reqs, ret; in pdev_iommuv2_enable() local 2076 reqs = 32; in pdev_iommuv2_enable() 2078 reqs = 1; in pdev_iommuv2_enable() 2092 ret = pci_enable_pri(pdev, reqs); in pdev_iommuv2_enable()
|