/linux-4.4.14/include/linux/ |
H A D | pci-ats.h | 8 int pci_enable_pri(struct pci_dev *pdev, u32 reqs); 14 static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) pci_enable_pri() argument
|
H A D | nfs_page.h | 51 struct nfs_page *wb_this_page; /* list of reqs for this page */
|
H A D | nfs4.h | 225 NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
H A D | backchannel.c | 112 * @reqs: number of concurrent incoming requests to expect 116 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) xprt_rdma_bc_setup() argument 133 if (reqs > RPCRDMA_BACKWARD_WRS >> 1) xprt_rdma_bc_setup() 136 for (i = 0; i < (reqs << 1); i++) { xprt_rdma_bc_setup() 156 rc = rpcrdma_bc_setup_reps(r_xprt, reqs); xprt_rdma_bc_setup() 160 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); xprt_rdma_bc_setup() 164 buffer->rb_bc_srv_max_requests = reqs; xprt_rdma_bc_setup() 170 xprt_rdma_bc_destroy(xprt, reqs); xprt_rdma_bc_setup() 245 * @reqs: number of incoming requests to destroy; ignored 247 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) xprt_rdma_bc_destroy() argument
|
/linux-4.4.14/drivers/net/wireless/zd1211rw/ |
H A D | zd_chip.c | 381 struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]}; zd_write_mac_addr_common() local 384 reqs[0].value = (mac_addr[3] << 24) zd_write_mac_addr_common() 388 reqs[1].value = (mac_addr[5] << 8) zd_write_mac_addr_common() 396 r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); zd_write_mac_addr_common() 406 static const struct zd_ioreq32 reqs[2] = { zd_write_mac_addr() local 411 return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac"); zd_write_mac_addr() 416 static const struct zd_ioreq32 reqs[2] = { zd_write_bssid() local 421 return zd_write_mac_addr_common(chip, bssid, reqs, "bssid"); zd_write_bssid() 873 struct zd_ioreq32 reqs[3]; set_aw_pt_bi() local 883 reqs[0].addr = CR_ATIM_WND_PERIOD; set_aw_pt_bi() 884 reqs[0].value = s->atim_wnd_period; set_aw_pt_bi() 885 reqs[1].addr = CR_PRE_TBTT; set_aw_pt_bi() 886 reqs[1].value = s->pre_tbtt; set_aw_pt_bi() 887 reqs[2].addr = CR_BCN_INTERVAL; set_aw_pt_bi() 888 reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval; set_aw_pt_bi() 890 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); set_aw_pt_bi()
|
/linux-4.4.14/drivers/pci/ |
H A D | ats.c | 150 int pci_enable_pri(struct pci_dev *pdev, u32 reqs) pci_enable_pri() argument 167 reqs = min(max_requests, reqs); pci_enable_pri() 168 pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); pci_enable_pri()
|
/linux-4.4.14/fs/nfsd/ |
H A D | stats.h | 15 unsigned int rcnocache; /* uncached reqs */
|
/linux-4.4.14/drivers/lightnvm/ |
H A D | rrpc.h | 39 struct list_head reqs; member in struct:rrpc_inflight 177 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { list_for_each_entry() 188 list_add_tail(&r->list, &rrpc->inflights.reqs);
|
H A D | rrpc.c | 1070 INIT_LIST_HEAD(&rrpc->inflights.reqs); rrpc_core_init()
|
/linux-4.4.14/include/net/ |
H A D | request_sock.h | 134 * max_qlen - max TFO reqs allowed before TFO is disabled. 141 * to 0 implying no more outstanding TFO reqs. One solution is to keep 153 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/ |
H A D | lustre_import.h | 75 __u32 paa_count; /** the total count of reqs */ 76 time64_t paa_deadline; /** the earliest deadline of reqs */ 77 __u32 *paa_reqs_count; /** the count of reqs in each entry */
|
H A D | lustre_net.h | 1971 /** # hp per lp reqs to handle */ 2061 /** # incoming reqs */ 2067 /** incoming reqs */ 2069 /** timeout before re-posting reqs, in tick */ 2093 /** # reqs in either of the NRS heads below */ 2094 /** # reqs being served */ 2116 /** reqs waiting for replies */
|
H A D | lustre_sec.h | 498 struct list_head cc_req_list; /* waiting reqs linked here */
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | user_sdma.h | 76 struct user_sdma_request *reqs; member in struct:hfi1_user_sdma_pkt_q
|
H A D | user_sdma.c | 379 memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size; hfi1_user_sdma_alloc_queues() 380 pq->reqs = kmalloc(memsize, GFP_KERNEL); hfi1_user_sdma_alloc_queues() 381 if (!pq->reqs) hfi1_user_sdma_alloc_queues() 431 kfree(pq->reqs); hfi1_user_sdma_alloc_queues() 458 if (pq->reqs) { hfi1_user_sdma_free_queues() 461 struct user_sdma_request *req = &pq->reqs[j]; hfi1_user_sdma_free_queues() 469 kfree(pq->reqs); hfi1_user_sdma_free_queues() 536 req = pq->reqs + info.comp_idx; hfi1_user_sdma_process_request()
|
H A D | file_ops.c | 442 int ret = 0, done = 0, reqs = 0; hfi1_write_iter() local 477 reqs++; hfi1_write_iter() 480 return ret ? ret : reqs; hfi1_write_iter()
|
/linux-4.4.14/drivers/staging/rdma/amso1100/ |
H A D | c2_vq.h | 42 wait_queue_head_t wait_object; /* wait object for vq reqs */
|
/linux-4.4.14/drivers/scsi/snic/ |
H A D | snic_io.h | 43 SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
|
H A D | snic_io.c | 344 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
|
H A D | snic_main.c | 527 /* Configure Maximum Outstanding IO reqs */ snic_probe()
|
H A D | snic_scsi.c | 2398 * snic_scsi_cleanup: Walks through tag map and releases the reqs
|
/linux-4.4.14/drivers/scsi/ |
H A D | virtio_scsi.c | 82 * Decrements of reqs are never concurrent with writes of req_vq: before the 83 * decrement reqs will be != 0; after the decrement the virtqueue completion 85 * Thus they can happen outside the tgt_seq, provided of course we make reqs 92 atomic_t reqs; member in struct:virtio_scsi_target_state 212 atomic_dec(&tgt->reqs); virtscsi_complete_cmd() 576 atomic_inc(&tgt->reqs); virtscsi_queuecommand_single() 597 if (atomic_inc_return(&tgt->reqs) > 1) { virtscsi_pick_vq() 609 if (unlikely(atomic_read(&tgt->reqs) > 1)) { virtscsi_pick_vq() 746 atomic_set(&tgt->reqs, 0); virtscsi_target_alloc()
|
H A D | hptiop.c | 199 req = hba->reqs[tag >> 8].req_virt; hptiop_request_callback_mv() 250 req = hba->reqs[(_tag >> 4) & 0xff].req_virt; hptiop_request_callback_mvfrey() 737 scp = hba->reqs[tag].scp; hptiop_finish_scsi_req() 782 free_req(hba, &hba->reqs[tag]); hptiop_finish_scsi_req() 792 req = hba->reqs[tag].req_virt; hptiop_host_request_callback_itl() 797 req = hba->reqs[tag].req_virt; hptiop_host_request_callback_itl() 1462 hba->reqs[i].next = NULL; hptiop_probe() 1463 hba->reqs[i].req_virt = start_virt; hptiop_probe() 1464 hba->reqs[i].req_shifted_phy = start_phy >> 5; hptiop_probe() 1465 hba->reqs[i].index = i; hptiop_probe() 1466 free_req(hba, &hba->reqs[i]); hptiop_probe()
|
H A D | qlogicfas408.h | 65 i.e. how many reqs can occur before an ack is given.
|
H A D | hptiop.h | 327 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; member in struct:hptiop_hba
|
H A D | stex.c | 200 __le16 req_cnt; /* count of reqs the buffer can hold */
|
H A D | scsi_transport_fc.c | 2498 * Must unblock to flush queued IO. scsi-ml will fail incoming reqs. fc_terminate_rport_io()
|
H A D | hpsa.c | 8158 /* Stop sending new RAID offload reqs via the IO accelerator */ hpsa_ack_ctlr_events()
|
/linux-4.4.14/fs/ |
H A D | aio.c | 89 struct percpu_ref reqs; member in struct:kioctx 579 percpu_ref_exit(&ctx->reqs); free_ioctx() 586 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); free_ioctx_reqs() 618 percpu_ref_kill(&ctx->reqs); free_ioctx_users() 619 percpu_ref_put(&ctx->reqs); free_ioctx_users() 736 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) ioctx_alloc() 764 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ ioctx_alloc() 787 percpu_ref_exit(&ctx->reqs); ioctx_alloc() 1011 percpu_ref_get(&ctx->reqs); aio_get_req() 1155 percpu_ref_put(&ctx->reqs); aio_complete() 1567 percpu_ref_put(&ctx->reqs); io_submit_one()
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/ |
H A D | vf.h | 113 u32 reqs; member in struct:ixgbe_mbx_stats
|
H A D | mbx.c | 180 hw->mbx.stats.reqs++; ixgbevf_check_for_msg_vf() 331 mbx->stats.reqs = 0; ixgbevf_init_mbx_params_vf()
|
/linux-4.4.14/net/9p/ |
H A D | client.c | 251 c->reqs[row] = kcalloc(P9_ROW_MAXTAG, p9_tag_alloc() 254 if (!c->reqs[row]) { p9_tag_alloc() 260 c->reqs[row][col].status = REQ_STATUS_IDLE; p9_tag_alloc() 261 c->reqs[row][col].tc = NULL; p9_tag_alloc() 270 req = &c->reqs[row][col]; p9_tag_alloc() 324 return &c->reqs[row][col]; p9_tag_lookup() 369 if (c->reqs[row][col].status != REQ_STATUS_IDLE) { p9_tag_cleanup() 387 kfree(c->reqs[row][col].wq); p9_tag_cleanup() 388 kfree(c->reqs[row][col].tc); p9_tag_cleanup() 389 kfree(c->reqs[row][col].rc); p9_tag_cleanup() 391 kfree(c->reqs[row]); p9_tag_cleanup()
|
/linux-4.4.14/drivers/net/ethernet/intel/igbvf/ |
H A D | mbx.c | 185 hw->mbx.stats.reqs++; e1000_check_for_msg_vf() 344 mbx->stats.reqs = 0; e1000_init_mbx_params_vf()
|
H A D | vf.h | 220 u32 reqs; member in struct:e1000_mbx_stats
|
/linux-4.4.14/include/net/9p/ |
H A D | client.h | 134 * @reqs - 2D array of requests 164 struct p9_req_t *reqs[P9_ROW_MAXTAG]; member in struct:p9_client
|
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/ |
H A D | usb.c | 403 struct brcmf_usbreq *req, *reqs; brcmf_usbdev_qinit() local 405 reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC); brcmf_usbdev_qinit() 406 if (reqs == NULL) brcmf_usbdev_qinit() 409 req = reqs; brcmf_usbdev_qinit() 420 return reqs; brcmf_usbdev_qinit()
|
H A D | p2p.c | 1883 /* Filter any P2P probe reqs arriving during the GO-NEG Phase */ brcmf_p2p_notify_rx_mgmt_p2p_probereq()
|
/linux-4.4.14/sound/pci/hda/ |
H A D | patch_ca0132.c | 142 int reqs[EFFECT_VALS_MAX_COUNT]; /*effect module request*/ member in struct:ct_effect 156 .reqs = {0, 1}, 164 .reqs = {7, 8}, 172 .reqs = {2, 3}, 180 .reqs = {4, 5, 6}, 188 .reqs = {24, 23, 25}, 196 .reqs = {9, 10, 11, 12, 13, 14, 207 .reqs = {0, 1, 2, 3}, 215 .reqs = {6, 7, 8, 9}, 223 .reqs = {44, 45}, 231 .reqs = {4, 5}, 239 .reqs = {10, 11, 12, 13, 14, 15, 16, 17, 18}, 386 int reqs[VOICEFX_MAX_PARAM_COUNT]; /*effect module request*/ member in struct:ct_voicefx 398 .reqs = {10, 11, 12, 13, 14, 15, 16, 17, 18} 3405 ca0132_voicefx.reqs[0], tmp); ca0132_voicefx_set() 3447 ca0132_effects[idx].reqs[0], on); ca0132_effects_set() 3649 ca0132_voicefx.reqs[i], ca0132_voicefx_put() 4306 ca0132_effects[idx].reqs[i], ca0132_setup_defaults() 4537 on = (unsigned int)ca0132_effects[i].reqs[0]; ca0132_init_chip()
|
/linux-4.4.14/drivers/staging/lustre/lustre/fld/ |
H A D | fld_cache.c | 114 CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count); fld_cache_fini() 115 CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache); fld_cache_fini()
|
/linux-4.4.14/drivers/net/ethernet/intel/igb/ |
H A D | e1000_mbx.c | 268 hw->mbx.stats.reqs++; igb_check_for_msg_pf() 437 mbx->stats.reqs = 0; igb_init_mbx_params_pf()
|
H A D | e1000_hw.h | 507 u32 reqs; member in struct:e1000_mbx_stats
|
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_mbx.c | 262 hw->mbx.stats.reqs++; ixgbe_check_for_msg_pf() 441 mbx->stats.reqs = 0; ixgbe_init_mbx_params_pf()
|
H A D | ixgbe_type.h | 3427 u32 reqs; member in struct:ixgbe_mbx_stats
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | lproc_ptlrpc.c | 216 svc_counter_config, "req_qdepth", "reqs"); ptlrpc_ldebugfs_register() 218 svc_counter_config, "req_active", "reqs"); ptlrpc_ldebugfs_register() 232 units = "reqs"; ptlrpc_ldebugfs_register() 742 * Since the service history is LRU (i.e. culled reqs will ptlrpc_lprocfs_svc_req_history_seek()
|
H A D | events.c | 355 * drop incoming reqs since we set the portal lazy */ request_in_callback()
|
H A D | service.c | 767 /* remove rqbd's reqs from svc's req history while ptlrpc_server_drop_request() 793 * now all reqs including the embedded req has been ptlrpc_server_drop_request() 874 * Failing over, don't handle any more reqs, send ptlrpc_check_req() 1439 * Handle freshly incoming reqs, add to timed early reply list, 2093 /* Process all incoming reqs before handling any */ ptlrpc_main()
|
H A D | client.c | 1949 /* A timeout expired. See which reqs it applies to... */ ptlrpc_expired_set() 2149 * -ETIMEDOUT => someone timed out. When all reqs have ptlrpc_set_wait() 2833 * Last chance to free reqs left on the replay list, but we ptlrpc_abort_inflight() 2834 * will still leak reqs that haven't committed. ptlrpc_abort_inflight()
|
H A D | niobuf.c | 326 /* Report service time estimate for future client reqs, but report 0 ptlrpc_at_set_reply()
|
/linux-4.4.14/fs/nfs/ |
H A D | direct.c | 654 LIST_HEAD(reqs); nfs_direct_write_reschedule() 660 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); nfs_direct_write_reschedule() 671 req = nfs_list_entry(reqs.next); nfs_direct_write_reschedule() 674 list_for_each_entry_safe(req, tmp, &reqs, wb_list) { nfs_direct_write_reschedule()
|
H A D | write.c | 309 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 971 * @cinfo: mds and ds lists of reqs ready to commit 1305 /* If a nfs_flush_* function fails, it should remove reqs from @head and
|
H A D | pnfs_nfs.c | 137 /* Move reqs from written to committing lists, returning count
|
/linux-4.4.14/drivers/mmc/card/ |
H A D | block.c | 1652 u8 reqs = 0; mmc_blk_prep_packed_list() local 1689 if (reqs >= max_packed_rw - 1) { mmc_blk_prep_packed_list() 1727 reqs++; mmc_blk_prep_packed_list() 1736 if (reqs > 0) { mmc_blk_prep_packed_list() 1738 mqrq->packed->nr_entries = ++reqs; mmc_blk_prep_packed_list() 1739 mqrq->packed->retries = reqs; mmc_blk_prep_packed_list() 1740 return reqs; mmc_blk_prep_packed_list() 1944 u8 reqs = 0; mmc_blk_issue_rw_rq() local 1950 reqs = mmc_blk_prep_packed_list(mq, rqc); mmc_blk_issue_rw_rq() 1966 if (reqs >= packed_nr) mmc_blk_issue_rw_rq()
|
/linux-4.4.14/drivers/scsi/bfa/ |
H A D | bfa_defs_svc.h | 48 u16 num_ioim_reqs; /* number of IO reqs */ 50 u16 num_fwtio_reqs; /* number of TM IO reqs in FW */ 68 u16 num_tio_reqs; /* number of TM IO reqs */ 437 u32 ic_reqs; /* interrupt coalesce reqs */ 439 u32 set_intr_reqs; /* set interrupt reqs */
|
H A D | bfi.h | 34 /* Get num dma reqs - that fit in a segment */
|
/linux-4.4.14/net/sunrpc/ |
H A D | backchannel_rqst.c | 211 * of reqs specified by the caller.
|
/linux-4.4.14/drivers/scsi/isci/ |
H A D | host.c | 260 struct isci_request *ireq = ihost->reqs[index]; sci_controller_task_completion() 283 ireq = ihost->reqs[index]; sci_controller_sdma_completion() 403 ireq = ihost->reqs[index]; sci_controller_event_completion() 411 ireq = ihost->reqs[index]; sci_controller_event_completion() 2292 ihost->reqs[i] = ireq; sci_controller_dma_alloc() 2468 struct isci_request *ireq = ihost->reqs[task_index]; sci_request_by_tag()
|
H A D | host.h | 208 struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; member in struct:isci_host
|
H A D | remote_device.c | 130 struct isci_request *ireq = ihost->reqs[i]; sci_remote_device_terminate_reqs_checkabort()
|
H A D | request.c | 3405 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; isci_request_from_tag()
|
/linux-4.4.14/drivers/staging/lustre/lustre/mdc/ |
H A D | mdc_lib.c | 544 /* Empty waiting list? Decrease reqs in-flight number */ mdc_exit_request()
|
/linux-4.4.14/drivers/vhost/ |
H A D | scsi.c | 64 /* Refcount for the inflight reqs */ 177 * Reference counting for inflight reqs, used for flush operation. At 1172 * when all the reqs are finished. vhost_scsi_flush() 1183 /* Wait for all reqs issued before the flush to be finished */ vhost_scsi_flush()
|
/linux-4.4.14/include/scsi/ |
H A D | libiscsi.h | 287 /* This tracks the reqs queued into the initiator */
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
H A D | ehca_reqs.c | 480 /* loop processes list of send reqs */ ehca_post_send() 527 /* loop processes list of recv reqs */ internal_post_recv()
|
/linux-4.4.14/include/xen/interface/ |
H A D | xen.h | 131 * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[], 134 * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
|
/linux-4.4.14/drivers/scsi/csiostor/ |
H A D | csio_hw.h | 233 struct list_head mgmt_req_freelist; /* Free poll of reqs */
|
H A D | csio_hw.c | 3716 * module for future use. Allocate and save off mgmt reqs in the
|
/linux-4.4.14/drivers/iommu/ |
H A D | amd_iommu.c | 2073 int reqs, ret; pdev_iommuv2_enable() local 2076 reqs = 32; pdev_iommuv2_enable() 2078 reqs = 1; pdev_iommuv2_enable() 2092 ret = pci_enable_pri(pdev, reqs); pdev_iommuv2_enable()
|
/linux-4.4.14/fs/fuse/ |
H A D | fuse_i.h | 250 unsigned reqs; member in struct:fuse_io_priv
|
H A D | file.c | 574 left = --io->reqs; fuse_aio_complete() 624 io->reqs++; fuse_async_req_send() 2818 io->reqs = 1; fuse_direct_IO()
|
/linux-4.4.14/include/uapi/linux/ |
H A D | pci_regs.h | 853 #define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */ 854 #define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
|
/linux-4.4.14/drivers/dma/ |
H A D | pl330.c | 485 /* Holds list of reqs with due callbacks */ 516 /* Hook to attach to DMAC's list of reqs with due callback */ 2461 * Ideally we should lookout for reqs bigger than __pl330_prep_dma_memcpy()
|
/linux-4.4.14/drivers/s390/scsi/ |
H A D | zfcp_fc.c | 194 /* wait 10 milliseconds, other reqs might pop in */ zfcp_fc_wka_port_put()
|
/linux-4.4.14/drivers/scsi/bnx2i/ |
H A D | bnx2i.h | 370 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
|
/linux-4.4.14/drivers/scsi/fnic/ |
H A D | fnic_main.c | 691 /* Configure Maximum Outstanding IO reqs*/ fnic_probe()
|
/linux-4.4.14/arch/mips/include/asm/sn/sn0/ |
H A D | hubio.h | 596 pnd_req: 1; /* reqs not issued due to IOQ full */
|
/linux-4.4.14/net/ipv4/ |
H A D | inet_connection_sock.c | 867 /* Free all the reqs queued in rskq_rst_head. */ inet_csk_listen_stop()
|
/linux-4.4.14/net/rds/ |
H A D | iw_send.c | 647 /* if there's data reference it with a chain of work reqs */ rds_iw_xmit()
|
/linux-4.4.14/drivers/usb/gadget/udc/ |
H A D | pxa27x_udc.c | 194 seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n", eps_dbg_show()
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
H A D | sumo_dpm.c | 1095 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ sumo_apply_state_adjust_rules()
|
H A D | trinity_dpm.c | 1541 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ trinity_apply_state_adjust_rules()
|
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/ |
H A D | ldlm_request.c | 973 * reqs have no reference to the OBD export and thus access to ldlm_cli_update_pool()
|
/linux-4.4.14/fs/cifs/ |
H A D | cifsglob.h | 485 bool mand_lock:1; /* send mandatory not posix byte range lock reqs */
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
H A D | intel_lrc.c | 589 "More than 2 already-submitted reqs queued\n"); execlists_context_queue()
|
H A D | i915_reg.h | 2753 #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
|
/linux-4.4.14/drivers/block/ |
H A D | skd_main.c | 3544 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", skd_start_device()
|
/linux-4.4.14/fs/ext4/ |
H A D | ext4.h | 1386 atomic_t s_bal_reqs; /* number of reqs with len > 1 */
|
H A D | mballoc.c | 2738 "mballoc: %u blocks %u reqs (%u success)", ext4_mb_release()
|