Searched refs:reqs (Results 1 - 87 of 87) sorted by relevance

/linux-4.4.14/include/linux/
H A Dpci-ats.h8 int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
14 static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs) pci_enable_pri() argument
H A Dnfs_page.h51 struct nfs_page *wb_this_page; /* list of reqs for this page */
H A Dnfs4.h225 NFS4ERR_BACK_CHAN_BUSY = 10057, /* backchan reqs outstanding */
/linux-4.4.14/net/sunrpc/xprtrdma/
H A Dbackchannel.c112 * @reqs: number of concurrent incoming requests to expect
116 int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) xprt_rdma_bc_setup() argument
133 if (reqs > RPCRDMA_BACKWARD_WRS >> 1) xprt_rdma_bc_setup()
136 for (i = 0; i < (reqs << 1); i++) { xprt_rdma_bc_setup()
156 rc = rpcrdma_bc_setup_reps(r_xprt, reqs); xprt_rdma_bc_setup()
160 rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); xprt_rdma_bc_setup()
164 buffer->rb_bc_srv_max_requests = reqs; xprt_rdma_bc_setup()
170 xprt_rdma_bc_destroy(xprt, reqs); xprt_rdma_bc_setup()
245 * @reqs: number of incoming requests to destroy; ignored
247 void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) xprt_rdma_bc_destroy() argument
/linux-4.4.14/drivers/net/wireless/zd1211rw/
H A Dzd_chip.c381 struct zd_ioreq32 reqs[2] = {in_reqs[0], in_reqs[1]}; zd_write_mac_addr_common() local
384 reqs[0].value = (mac_addr[3] << 24) zd_write_mac_addr_common()
388 reqs[1].value = (mac_addr[5] << 8) zd_write_mac_addr_common()
396 r = zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); zd_write_mac_addr_common()
406 static const struct zd_ioreq32 reqs[2] = { zd_write_mac_addr() local
411 return zd_write_mac_addr_common(chip, mac_addr, reqs, "mac"); zd_write_mac_addr()
416 static const struct zd_ioreq32 reqs[2] = { zd_write_bssid() local
421 return zd_write_mac_addr_common(chip, bssid, reqs, "bssid"); zd_write_bssid()
873 struct zd_ioreq32 reqs[3]; set_aw_pt_bi() local
883 reqs[0].addr = CR_ATIM_WND_PERIOD; set_aw_pt_bi()
884 reqs[0].value = s->atim_wnd_period; set_aw_pt_bi()
885 reqs[1].addr = CR_PRE_TBTT; set_aw_pt_bi()
886 reqs[1].value = s->pre_tbtt; set_aw_pt_bi()
887 reqs[2].addr = CR_BCN_INTERVAL; set_aw_pt_bi()
888 reqs[2].value = (s->beacon_interval & ~0xffff) | b_interval; set_aw_pt_bi()
890 return zd_iowrite32a_locked(chip, reqs, ARRAY_SIZE(reqs)); set_aw_pt_bi()
/linux-4.4.14/drivers/pci/
H A Dats.c150 int pci_enable_pri(struct pci_dev *pdev, u32 reqs) pci_enable_pri() argument
167 reqs = min(max_requests, reqs); pci_enable_pri()
168 pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); pci_enable_pri()
/linux-4.4.14/fs/nfsd/
H A Dstats.h15 unsigned int rcnocache; /* uncached reqs */
/linux-4.4.14/drivers/lightnvm/
H A Drrpc.h39 struct list_head reqs; member in struct:rrpc_inflight
177 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { list_for_each_entry()
188 list_add_tail(&r->list, &rrpc->inflights.reqs);
H A Drrpc.c1070 INIT_LIST_HEAD(&rrpc->inflights.reqs); rrpc_core_init()
/linux-4.4.14/include/net/
H A Drequest_sock.h134 * max_qlen - max TFO reqs allowed before TFO is disabled.
141 * to 0 implying no more outstanding TFO reqs. One solution is to keep
153 int qlen; /* # of pending (TCP_SYN_RECV) reqs */
/linux-4.4.14/drivers/staging/lustre/lustre/include/
H A Dlustre_import.h75 __u32 paa_count; /** the total count of reqs */
76 time64_t paa_deadline; /** the earliest deadline of reqs */
77 __u32 *paa_reqs_count; /** the count of reqs in each entry */
H A Dlustre_net.h1971 /** # hp per lp reqs to handle */
2061 /** # incoming reqs */
2067 /** incoming reqs */
2069 /** timeout before re-posting reqs, in tick */
2093 /** # reqs in either of the NRS heads below */
2094 /** # reqs being served */
2116 /** reqs waiting for replies */
H A Dlustre_sec.h498 struct list_head cc_req_list; /* waiting reqs linked here */
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Duser_sdma.h76 struct user_sdma_request *reqs; member in struct:hfi1_user_sdma_pkt_q
H A Duser_sdma.c379 memsize = sizeof(*pq->reqs) * hfi1_sdma_comp_ring_size; hfi1_user_sdma_alloc_queues()
380 pq->reqs = kmalloc(memsize, GFP_KERNEL); hfi1_user_sdma_alloc_queues()
381 if (!pq->reqs) hfi1_user_sdma_alloc_queues()
431 kfree(pq->reqs); hfi1_user_sdma_alloc_queues()
458 if (pq->reqs) { hfi1_user_sdma_free_queues()
461 struct user_sdma_request *req = &pq->reqs[j]; hfi1_user_sdma_free_queues()
469 kfree(pq->reqs); hfi1_user_sdma_free_queues()
536 req = pq->reqs + info.comp_idx; hfi1_user_sdma_process_request()
H A Dfile_ops.c442 int ret = 0, done = 0, reqs = 0; hfi1_write_iter() local
477 reqs++; hfi1_write_iter()
480 return ret ? ret : reqs; hfi1_write_iter()
/linux-4.4.14/drivers/staging/rdma/amso1100/
H A Dc2_vq.h42 wait_queue_head_t wait_object; /* wait object for vq reqs */
/linux-4.4.14/drivers/scsi/snic/
H A Dsnic_io.h43 SNIC_REQ_TM_CACHE, /* cache for task mgmt reqs contains
H A Dsnic_io.c344 * snic_free_all_untagged_reqs: Walks through untagged reqs and frees them.
H A Dsnic_main.c527 /* Configure Maximum Outstanding IO reqs */ snic_probe()
H A Dsnic_scsi.c2398 * snic_scsi_cleanup: Walks through tag map and releases the reqs
/linux-4.4.14/drivers/scsi/
H A Dvirtio_scsi.c82 * Decrements of reqs are never concurrent with writes of req_vq: before the
83 * decrement reqs will be != 0; after the decrement the virtqueue completion
85 * Thus they can happen outside the tgt_seq, provided of course we make reqs
92 atomic_t reqs; member in struct:virtio_scsi_target_state
212 atomic_dec(&tgt->reqs); virtscsi_complete_cmd()
576 atomic_inc(&tgt->reqs); virtscsi_queuecommand_single()
597 if (atomic_inc_return(&tgt->reqs) > 1) { virtscsi_pick_vq()
609 if (unlikely(atomic_read(&tgt->reqs) > 1)) { virtscsi_pick_vq()
746 atomic_set(&tgt->reqs, 0); virtscsi_target_alloc()
H A Dhptiop.c199 req = hba->reqs[tag >> 8].req_virt; hptiop_request_callback_mv()
250 req = hba->reqs[(_tag >> 4) & 0xff].req_virt; hptiop_request_callback_mvfrey()
737 scp = hba->reqs[tag].scp; hptiop_finish_scsi_req()
782 free_req(hba, &hba->reqs[tag]); hptiop_finish_scsi_req()
792 req = hba->reqs[tag].req_virt; hptiop_host_request_callback_itl()
797 req = hba->reqs[tag].req_virt; hptiop_host_request_callback_itl()
1462 hba->reqs[i].next = NULL; hptiop_probe()
1463 hba->reqs[i].req_virt = start_virt; hptiop_probe()
1464 hba->reqs[i].req_shifted_phy = start_phy >> 5; hptiop_probe()
1465 hba->reqs[i].index = i; hptiop_probe()
1466 free_req(hba, &hba->reqs[i]); hptiop_probe()
H A Dqlogicfas408.h65 i.e. how many reqs can occur before an ack is given.
H A Dhptiop.h327 struct hptiop_request reqs[HPTIOP_MAX_REQUESTS]; member in struct:hptiop_hba
H A Dstex.c200 __le16 req_cnt; /* count of reqs the buffer can hold */
H A Dscsi_transport_fc.c2498 * Must unblock to flush queued IO. scsi-ml will fail incoming reqs. fc_terminate_rport_io()
H A Dhpsa.c8158 /* Stop sending new RAID offload reqs via the IO accelerator */ hpsa_ack_ctlr_events()
/linux-4.4.14/fs/
H A Daio.c89 struct percpu_ref reqs; member in struct:kioctx
579 percpu_ref_exit(&ctx->reqs); free_ioctx()
586 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); free_ioctx_reqs()
618 percpu_ref_kill(&ctx->reqs); free_ioctx_users()
619 percpu_ref_put(&ctx->reqs); free_ioctx_users()
736 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) ioctx_alloc()
764 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ ioctx_alloc()
787 percpu_ref_exit(&ctx->reqs); ioctx_alloc()
1011 percpu_ref_get(&ctx->reqs); aio_get_req()
1155 percpu_ref_put(&ctx->reqs); aio_complete()
1567 percpu_ref_put(&ctx->reqs); io_submit_one()
/linux-4.4.14/drivers/net/ethernet/intel/ixgbevf/
H A Dvf.h113 u32 reqs; member in struct:ixgbe_mbx_stats
H A Dmbx.c180 hw->mbx.stats.reqs++; ixgbevf_check_for_msg_vf()
331 mbx->stats.reqs = 0; ixgbevf_init_mbx_params_vf()
/linux-4.4.14/net/9p/
H A Dclient.c251 c->reqs[row] = kcalloc(P9_ROW_MAXTAG, p9_tag_alloc()
254 if (!c->reqs[row]) { p9_tag_alloc()
260 c->reqs[row][col].status = REQ_STATUS_IDLE; p9_tag_alloc()
261 c->reqs[row][col].tc = NULL; p9_tag_alloc()
270 req = &c->reqs[row][col]; p9_tag_alloc()
324 return &c->reqs[row][col]; p9_tag_lookup()
369 if (c->reqs[row][col].status != REQ_STATUS_IDLE) { p9_tag_cleanup()
387 kfree(c->reqs[row][col].wq); p9_tag_cleanup()
388 kfree(c->reqs[row][col].tc); p9_tag_cleanup()
389 kfree(c->reqs[row][col].rc); p9_tag_cleanup()
391 kfree(c->reqs[row]); p9_tag_cleanup()
/linux-4.4.14/drivers/net/ethernet/intel/igbvf/
H A Dmbx.c185 hw->mbx.stats.reqs++; e1000_check_for_msg_vf()
344 mbx->stats.reqs = 0; e1000_init_mbx_params_vf()
H A Dvf.h220 u32 reqs; member in struct:e1000_mbx_stats
/linux-4.4.14/include/net/9p/
H A Dclient.h134 * @reqs - 2D array of requests
164 struct p9_req_t *reqs[P9_ROW_MAXTAG]; member in struct:p9_client
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
H A Dusb.c403 struct brcmf_usbreq *req, *reqs; brcmf_usbdev_qinit() local
405 reqs = kcalloc(qsize, sizeof(struct brcmf_usbreq), GFP_ATOMIC); brcmf_usbdev_qinit()
406 if (reqs == NULL) brcmf_usbdev_qinit()
409 req = reqs; brcmf_usbdev_qinit()
420 return reqs; brcmf_usbdev_qinit()
H A Dp2p.c1883 /* Filter any P2P probe reqs arriving during the GO-NEG Phase */ brcmf_p2p_notify_rx_mgmt_p2p_probereq()
/linux-4.4.14/sound/pci/hda/
H A Dpatch_ca0132.c142 int reqs[EFFECT_VALS_MAX_COUNT]; /*effect module request*/ member in struct:ct_effect
156 .reqs = {0, 1},
164 .reqs = {7, 8},
172 .reqs = {2, 3},
180 .reqs = {4, 5, 6},
188 .reqs = {24, 23, 25},
196 .reqs = {9, 10, 11, 12, 13, 14,
207 .reqs = {0, 1, 2, 3},
215 .reqs = {6, 7, 8, 9},
223 .reqs = {44, 45},
231 .reqs = {4, 5},
239 .reqs = {10, 11, 12, 13, 14, 15, 16, 17, 18},
386 int reqs[VOICEFX_MAX_PARAM_COUNT]; /*effect module request*/ member in struct:ct_voicefx
398 .reqs = {10, 11, 12, 13, 14, 15, 16, 17, 18}
3405 ca0132_voicefx.reqs[0], tmp); ca0132_voicefx_set()
3447 ca0132_effects[idx].reqs[0], on); ca0132_effects_set()
3649 ca0132_voicefx.reqs[i], ca0132_voicefx_put()
4306 ca0132_effects[idx].reqs[i], ca0132_setup_defaults()
4537 on = (unsigned int)ca0132_effects[i].reqs[0]; ca0132_init_chip()
/linux-4.4.14/drivers/staging/lustre/lustre/fld/
H A Dfld_cache.c114 CDEBUG(D_INFO, " Total reqs: %llu\n", cache->fci_stat.fst_count); fld_cache_fini()
115 CDEBUG(D_INFO, " Cache reqs: %llu\n", cache->fci_stat.fst_cache); fld_cache_fini()
/linux-4.4.14/drivers/net/ethernet/intel/igb/
H A De1000_mbx.c268 hw->mbx.stats.reqs++; igb_check_for_msg_pf()
437 mbx->stats.reqs = 0; igb_init_mbx_params_pf()
H A De1000_hw.h507 u32 reqs; member in struct:e1000_mbx_stats
/linux-4.4.14/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_mbx.c262 hw->mbx.stats.reqs++; ixgbe_check_for_msg_pf()
441 mbx->stats.reqs = 0; ixgbe_init_mbx_params_pf()
H A Dixgbe_type.h3427 u32 reqs; member in struct:ixgbe_mbx_stats
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
H A Dlproc_ptlrpc.c216 svc_counter_config, "req_qdepth", "reqs"); ptlrpc_ldebugfs_register()
218 svc_counter_config, "req_active", "reqs"); ptlrpc_ldebugfs_register()
232 units = "reqs"; ptlrpc_ldebugfs_register()
742 * Since the service history is LRU (i.e. culled reqs will ptlrpc_lprocfs_svc_req_history_seek()
H A Devents.c355 * drop incoming reqs since we set the portal lazy */ request_in_callback()
H A Dservice.c767 /* remove rqbd's reqs from svc's req history while ptlrpc_server_drop_request()
793 * now all reqs including the embedded req has been ptlrpc_server_drop_request()
874 * Failing over, don't handle any more reqs, send ptlrpc_check_req()
1439 * Handle freshly incoming reqs, add to timed early reply list,
2093 /* Process all incoming reqs before handling any */ ptlrpc_main()
H A Dclient.c1949 /* A timeout expired. See which reqs it applies to... */ ptlrpc_expired_set()
2149 * -ETIMEDOUT => someone timed out. When all reqs have ptlrpc_set_wait()
2833 * Last chance to free reqs left on the replay list, but we ptlrpc_abort_inflight()
2834 * will still leak reqs that haven't committed. ptlrpc_abort_inflight()
H A Dniobuf.c326 /* Report service time estimate for future client reqs, but report 0 ptlrpc_at_set_reply()
/linux-4.4.14/fs/nfs/
H A Ddirect.c654 LIST_HEAD(reqs); nfs_direct_write_reschedule()
660 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); nfs_direct_write_reschedule()
671 req = nfs_list_entry(reqs.next); nfs_direct_write_reschedule()
674 list_for_each_entry_safe(req, tmp, &reqs, wb_list) { nfs_direct_write_reschedule()
H A Dwrite.c309 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
971 * @cinfo: mds and ds lists of reqs ready to commit
1305 /* If a nfs_flush_* function fails, it should remove reqs from @head and
H A Dpnfs_nfs.c137 /* Move reqs from written to committing lists, returning count
/linux-4.4.14/drivers/mmc/card/
H A Dblock.c1652 u8 reqs = 0; mmc_blk_prep_packed_list() local
1689 if (reqs >= max_packed_rw - 1) { mmc_blk_prep_packed_list()
1727 reqs++; mmc_blk_prep_packed_list()
1736 if (reqs > 0) { mmc_blk_prep_packed_list()
1738 mqrq->packed->nr_entries = ++reqs; mmc_blk_prep_packed_list()
1739 mqrq->packed->retries = reqs; mmc_blk_prep_packed_list()
1740 return reqs; mmc_blk_prep_packed_list()
1944 u8 reqs = 0; mmc_blk_issue_rw_rq() local
1950 reqs = mmc_blk_prep_packed_list(mq, rqc); mmc_blk_issue_rw_rq()
1966 if (reqs >= packed_nr) mmc_blk_issue_rw_rq()
/linux-4.4.14/drivers/scsi/bfa/
H A Dbfa_defs_svc.h48 u16 num_ioim_reqs; /* number of IO reqs */
50 u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
68 u16 num_tio_reqs; /* number of TM IO reqs */
437 u32 ic_reqs; /* interrupt coalesce reqs */
439 u32 set_intr_reqs; /* set interrupt reqs */
H A Dbfi.h34 /* Get num dma reqs - that fit in a segment */
/linux-4.4.14/net/sunrpc/
H A Dbackchannel_rqst.c211 * of reqs specified by the caller.
/linux-4.4.14/drivers/scsi/isci/
H A Dhost.c260 struct isci_request *ireq = ihost->reqs[index]; sci_controller_task_completion()
283 ireq = ihost->reqs[index]; sci_controller_sdma_completion()
403 ireq = ihost->reqs[index]; sci_controller_event_completion()
411 ireq = ihost->reqs[index]; sci_controller_event_completion()
2292 ihost->reqs[i] = ireq; sci_controller_dma_alloc()
2468 struct isci_request *ireq = ihost->reqs[task_index]; sci_request_by_tag()
H A Dhost.h208 struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; member in struct:isci_host
H A Dremote_device.c130 struct isci_request *ireq = ihost->reqs[i]; sci_remote_device_terminate_reqs_checkabort()
H A Drequest.c3405 ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; isci_request_from_tag()
/linux-4.4.14/drivers/staging/lustre/lustre/mdc/
H A Dmdc_lib.c544 /* Empty waiting list? Decrease reqs in-flight number */ mdc_exit_request()
/linux-4.4.14/drivers/vhost/
H A Dscsi.c64 /* Refcount for the inflight reqs */
177 * Reference counting for inflight reqs, used for flush operation. At
1172 * when all the reqs are finished. vhost_scsi_flush()
1183 /* Wait for all reqs issued before the flush to be finished */ vhost_scsi_flush()
/linux-4.4.14/include/scsi/
H A Dlibiscsi.h287 /* This tracks the reqs queued into the initiator */
/linux-4.4.14/drivers/staging/rdma/ehca/
H A Dehca_reqs.c480 /* loop processes list of send reqs */ ehca_post_send()
527 /* loop processes list of recv reqs */ internal_post_recv()
/linux-4.4.14/include/xen/interface/
H A Dxen.h131 * enum neg_errnoval HYPERVISOR_mmu_update(const struct mmu_update reqs[],
134 * @reqs is an array of mmu_update_t structures ((ptr, val) pairs).
/linux-4.4.14/drivers/scsi/csiostor/
H A Dcsio_hw.h233 struct list_head mgmt_req_freelist; /* Free poll of reqs */
H A Dcsio_hw.c3716 * module for future use. Allocate and save off mgmt reqs in the
/linux-4.4.14/drivers/iommu/
H A Damd_iommu.c2073 int reqs, ret; pdev_iommuv2_enable() local
2076 reqs = 32; pdev_iommuv2_enable()
2078 reqs = 1; pdev_iommuv2_enable()
2092 ret = pci_enable_pri(pdev, reqs); pdev_iommuv2_enable()
/linux-4.4.14/fs/fuse/
H A Dfuse_i.h250 unsigned reqs; member in struct:fuse_io_priv
H A Dfile.c574 left = --io->reqs; fuse_aio_complete()
624 io->reqs++; fuse_async_req_send()
2818 io->reqs = 1; fuse_direct_IO()
/linux-4.4.14/include/uapi/linux/
H A Dpci_regs.h853 #define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */
854 #define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */
/linux-4.4.14/drivers/dma/
H A Dpl330.c485 /* Holds list of reqs with due callbacks */
516 /* Hook to attach to DMAC's list of reqs with due callback */
2461 * Ideally we should lookout for reqs bigger than __pl330_prep_dma_memcpy()
/linux-4.4.14/drivers/s390/scsi/
H A Dzfcp_fc.c194 /* wait 10 milliseconds, other reqs might pop in */ zfcp_fc_wka_port_put()
/linux-4.4.14/drivers/scsi/bnx2i/
H A Dbnx2i.h370 * @dummy_buffer: Dummy buffer to be used with zero length scsicmd reqs
/linux-4.4.14/drivers/scsi/fnic/
H A Dfnic_main.c691 /* Configure Maximum Outstanding IO reqs*/ fnic_probe()
/linux-4.4.14/arch/mips/include/asm/sn/sn0/
H A Dhubio.h596 pnd_req: 1; /* reqs not issued due to IOQ full */
/linux-4.4.14/net/ipv4/
H A Dinet_connection_sock.c867 /* Free all the reqs queued in rskq_rst_head. */ inet_csk_listen_stop()
/linux-4.4.14/net/rds/
H A Diw_send.c647 /* if there's data reference it with a chain of work reqs */ rds_iw_xmit()
/linux-4.4.14/drivers/usb/gadget/udc/
H A Dpxa27x_udc.c194 seq_printf(s, "%-12s: IN %lu(%lu reqs), OUT %lu(%lu reqs), irqs=%lu, udccr=0x%08x, udccsr=0x%03x, udcbcr=%d\n", eps_dbg_show()
/linux-4.4.14/drivers/gpu/drm/radeon/
H A Dsumo_dpm.c1095 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ sumo_apply_state_adjust_rules()
H A Dtrinity_dpm.c1541 u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */ trinity_apply_state_adjust_rules()
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_request.c973 * reqs have no reference to the OBD export and thus access to ldlm_cli_update_pool()
/linux-4.4.14/fs/cifs/
H A Dcifsglob.h485 bool mand_lock:1; /* send mandatory not posix byte range lock reqs */
/linux-4.4.14/drivers/gpu/drm/i915/
H A Dintel_lrc.c589 "More than 2 already-submitted reqs queued\n"); execlists_context_queue()
H A Di915_reg.h2753 #define RSCRP (1<<18) /* rs requests control on rs1/2 reqs */
/linux-4.4.14/drivers/block/
H A Dskd_main.c3544 pr_debug("%s:%s:%d starting %s queue to error-out reqs\n", skd_start_device()
/linux-4.4.14/fs/ext4/
H A Dext4.h1386 atomic_t s_bal_reqs; /* number of reqs with len > 1 */
H A Dmballoc.c2738 "mballoc: %u blocks %u reqs (%u success)", ext4_mb_release()

Completed in 3295 milliseconds