/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
D | bnad.c | 104 for (i = 0; i < ccb->q_depth; i++) { in bnad_cq_cleanup() 117 u32 q_depth, u32 index) in bnad_tx_buff_unmap() argument 140 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 152 BNA_QE_INDX_INC(index, q_depth); in bnad_tx_buff_unmap() 169 for (i = 0; i < tcb->q_depth; i++) { in bnad_txq_cleanup() 173 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i); in bnad_txq_cleanup() 188 u32 wis, unmap_wis, hw_cons, cons, q_depth; in bnad_txcmpl_process() local 199 q_depth = tcb->q_depth; in bnad_txcmpl_process() 201 wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth); in bnad_txcmpl_process() 202 BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth))); in bnad_txcmpl_process() [all …]
|
D | bna_types.h | 440 u32 q_depth; member 571 u32 q_depth; member 587 int q_depth; member 634 u32 q_depth; member
|
D | bna.h | 162 ((_q_ptr)->q.q_depth - 1)) 167 & ((_q_ptr)->q.q_depth - 1)) 170 (BNA_QE_FREE_CNT(&((_q_ptr)->q), (_q_ptr)->q.q_depth)) 173 (BNA_QE_IN_USE_CNT(&(_q_ptr)->q, (_q_ptr)->q.q_depth))
|
D | bfa_msgq.c | 525 msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth); in bfa_msgq_init() 527 msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth); in bfa_msgq_init()
|
D | bfi.h | 431 u16 q_depth; /* Total num of entries in the queue */ member
|
D | bna_tx_rx.c | 2638 q0->rcb->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2639 q0->q_depth = rx_cfg->q0_depth; in bna_rx_create() 2664 q1->rcb->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2665 q1->q_depth = rx_cfg->q1_depth; in bna_rx_create() 2695 rxp->cq.ccb->q_depth = cq_depth; in bna_rx_create() 3789 txq->tcb->q_depth = tx_cfg->txq_depth; in bna_tx_create()
|
/linux-4.1.27/drivers/block/rsxx/ |
D | cregs.c | 147 card->creg_ctrl.q_depth--; in creg_kick_queue() 199 card->creg_ctrl.q_depth++; in creg_queue_cmd() 336 card->creg_ctrl.q_depth--; in creg_reset() 413 card->creg_ctrl.q_depth + 20000); in __issue_creg_rw() 720 card->creg_ctrl.q_depth++; in rsxx_eeh_save_issued_creg()
|
D | rsxx_priv.h | 142 unsigned int q_depth; member
|
D | dma.c | 207 u32 q_depth = 0; in dma_intr_coal_auto_tune() local 215 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth); in dma_intr_coal_auto_tune() 218 q_depth / 2, in dma_intr_coal_auto_tune()
|
/linux-4.1.27/drivers/scsi/bfa/ |
D | bfi.h | 545 #define BFI_MSGQ_FULL(_q) (((_q->pi + 1) % _q->q_depth) == _q->ci) 547 #define BFI_MSGQ_UPDATE_CI(_q) (_q->ci = (_q->ci + 1) % _q->q_depth) 548 #define BFI_MSGQ_UPDATE_PI(_q) (_q->pi = (_q->pi + 1) % _q->q_depth) 551 #define BFI_MSGQ_FREE_CNT(_q) ((_q->ci - _q->pi - 1) & (_q->q_depth - 1)) 592 u16 q_depth; /* Total num of entries in the queue */ member
|
D | bfa_fcpim.h | 126 u16 q_depth; member
|
D | bfa_defs_svc.h | 965 u16 q_depth; /* SCSI Queue depth */ member
|
D | bfa_svc.c | 3657 fcport->cfg.q_depth = in bfa_fcport_isr() 3658 cpu_to_be16(fcport->cfg.q_depth); in bfa_fcport_isr() 4078 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa); in bfa_fcport_get_attr()
|
D | bfa_fcpim.c | 505 return fcpim->q_depth; in bfa_fcpim_qdepth_get()
|
/linux-4.1.27/drivers/target/tcm_fc/ |
D | tfc_conf.c | 209 u32 q_depth; in ft_add_acl() local 222 q_depth = 32; /* XXX bogus default - get from tpg? */ in ft_add_acl() 224 &acl->se_node_acl, name, q_depth); in ft_add_acl()
|
/linux-4.1.27/drivers/block/ |
D | nvme-core.c | 108 u16 q_depth; member 356 if (tag >= nvmeq->q_depth) { in nvme_finish_cmd() 380 if (++tail == nvmeq->q_depth) in __nvme_submit_cmd() 739 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_discard() 754 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_flush() 806 if (++nvmeq->sq_tail == nvmeq->q_depth) in nvme_submit_iod() 923 if (++head == nvmeq->q_depth) { in nvme_process_cq() 1122 c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_cq() 1139 c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1); in adapter_alloc_sq() 1310 dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), in nvme_free_queue() [all …]
|
/linux-4.1.27/include/linux/ |
D | nvme.h | 84 int q_depth; member
|