q_depth           133 drivers/block/rsxx/cregs.c 	card->creg_ctrl.q_depth--;
q_depth           185 drivers/block/rsxx/cregs.c 	card->creg_ctrl.q_depth++;
q_depth           322 drivers/block/rsxx/cregs.c 		card->creg_ctrl.q_depth--;
q_depth           399 drivers/block/rsxx/cregs.c 				   card->creg_ctrl.q_depth + 20000);
q_depth           706 drivers/block/rsxx/cregs.c 		card->creg_ctrl.q_depth++;
q_depth           193 drivers/block/rsxx/dma.c 	u32 q_depth = 0;
q_depth           201 drivers/block/rsxx/dma.c 		q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
q_depth           204 drivers/block/rsxx/dma.c 				      q_depth / 2,
q_depth           128 drivers/block/rsxx/rsxx_priv.h 		unsigned int		q_depth;
q_depth           112 drivers/net/ethernet/amazon/ena/ena_com.c 	u16 size = ADMIN_SQ_SIZE(queue->q_depth);
q_depth           134 drivers/net/ethernet/amazon/ena/ena_com.c 	u16 size = ADMIN_CQ_SIZE(queue->q_depth);
q_depth           157 drivers/net/ethernet/amazon/ena/ena_com.c 	dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
q_depth           167 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq->head = aenq->q_depth;
q_depth           177 drivers/net/ethernet/amazon/ena/ena_com.c 	aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
q_depth           208 drivers/net/ethernet/amazon/ena/ena_com.c 	if (unlikely(command_id >= queue->q_depth)) {
q_depth           210 drivers/net/ethernet/amazon/ena/ena_com.c 		       command_id, queue->q_depth);
q_depth           238 drivers/net/ethernet/amazon/ena/ena_com.c 	queue_size_mask = admin_queue->q_depth - 1;
q_depth           244 drivers/net/ethernet/amazon/ena/ena_com.c 	if (cnt >= admin_queue->q_depth) {
q_depth           287 drivers/net/ethernet/amazon/ena/ena_com.c 	size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
q_depth           297 drivers/net/ethernet/amazon/ena/ena_com.c 	for (i = 0; i < queue->q_depth; i++) {
q_depth           346 drivers/net/ethernet/amazon/ena/ena_com.c 	size = io_sq->desc_entry_size * io_sq->q_depth;
q_depth           432 drivers/net/ethernet/amazon/ena/ena_com.c 	size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
q_depth           491 drivers/net/ethernet/amazon/ena/ena_com.c 	head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
q_depth           507 drivers/net/ethernet/amazon/ena/ena_com.c 		if (unlikely(head_masked == admin_queue->q_depth)) {
q_depth           918 drivers/net/ethernet/amazon/ena/ena_com.c 		size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
q_depth           928 drivers/net/ethernet/amazon/ena/ena_com.c 		size = io_sq->desc_entry_size * io_sq->q_depth;
q_depth          1230 drivers/net/ethernet/amazon/ena/ena_com.c 	create_cmd.sq_depth = io_sq->q_depth;
q_depth          1266 drivers/net/ethernet/amazon/ena/ena_com.c 	pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
q_depth          1377 drivers/net/ethernet/amazon/ena/ena_com.c 	create_cmd.cq_depth = io_cq->q_depth;
q_depth          1412 drivers/net/ethernet/amazon/ena/ena_com.c 	pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
q_depth          1442 drivers/net/ethernet/amazon/ena/ena_com.c 	for (i = 0; i < admin_queue->q_depth; i++) {
q_depth          1509 drivers/net/ethernet/amazon/ena/ena_com.c 	u16 depth = ena_dev->aenq.q_depth;
q_depth          1642 drivers/net/ethernet/amazon/ena/ena_com.c 	size = ADMIN_SQ_SIZE(admin_queue->q_depth);
q_depth          1648 drivers/net/ethernet/amazon/ena/ena_com.c 	size = ADMIN_CQ_SIZE(admin_queue->q_depth);
q_depth          1654 drivers/net/ethernet/amazon/ena/ena_com.c 	size = ADMIN_AENQ_SIZE(aenq->q_depth);
q_depth          1754 drivers/net/ethernet/amazon/ena/ena_com.c 	admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
q_depth          1792 drivers/net/ethernet/amazon/ena/ena_com.c 	aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
q_depth          1798 drivers/net/ethernet/amazon/ena/ena_com.c 	acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
q_depth          1838 drivers/net/ethernet/amazon/ena/ena_com.c 	io_cq->q_depth = ctx->queue_size;
q_depth          1844 drivers/net/ethernet/amazon/ena/ena_com.c 	io_sq->q_depth = ctx->queue_size;
q_depth          2021 drivers/net/ethernet/amazon/ena/ena_com.c 	masked_head = aenq->head & (aenq->q_depth - 1);
q_depth          2049 drivers/net/ethernet/amazon/ena/ena_com.c 		if (unlikely(masked_head == aenq->q_depth)) {
q_depth           154 drivers/net/ethernet/amazon/ena/ena_com.h 	u16 q_depth;
q_depth           196 drivers/net/ethernet/amazon/ena/ena_com.h 	u16 q_depth;
q_depth           244 drivers/net/ethernet/amazon/ena/ena_com.h 	u16 q_depth;
q_depth           274 drivers/net/ethernet/amazon/ena/ena_com.h 	u16 q_depth;
q_depth            42 drivers/net/ethernet/amazon/ena/ena_eth_com.c 	head_masked = io_cq->head & (io_cq->q_depth - 1);
q_depth            67 drivers/net/ethernet/amazon/ena/ena_eth_com.c 	tail_masked = io_sq->tail & (io_sq->q_depth - 1);
q_depth            82 drivers/net/ethernet/amazon/ena/ena_eth_com.c 	dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
q_depth           108 drivers/net/ethernet/amazon/ena/ena_eth_com.c 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
q_depth           236 drivers/net/ethernet/amazon/ena/ena_eth_com.c 	if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
q_depth           245 drivers/net/ethernet/amazon/ena/ena_eth_com.c 	idx &= (io_cq->q_depth - 1);
q_depth           273 drivers/net/ethernet/amazon/ena/ena_eth_com.c 		head_masked = io_cq->head & (io_cq->q_depth - 1);
q_depth           106 drivers/net/ethernet/amazon/ena/ena_eth_com.h 	return io_sq->q_depth - 1 - cnt;
q_depth           201 drivers/net/ethernet/amazon/ena/ena_eth_com.h 		need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
q_depth           238 drivers/net/ethernet/amazon/ena/ena_eth_com.h 	if (unlikely((io_cq->head & (io_cq->q_depth - 1)) == 0))
q_depth           249 drivers/net/ethernet/amazon/ena/ena_eth_com.h 	masked_head = io_cq->head & (io_cq->q_depth - 1);
q_depth           267 drivers/net/ethernet/amazon/ena/ena_eth_com.h 	if (unlikely(*req_id >= io_cq->q_depth)) {
q_depth           518 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
q_depth           520 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	msgq_cfg->rspq.q_depth = htons(msgq->rspq.depth);
q_depth           413 drivers/net/ethernet/brocade/bna/bfi.h 	u16 q_depth;     /* Total num of entries in the queue */
q_depth          2385 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		q0->rcb->q_depth = rx_cfg->q0_depth;
q_depth          2386 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		q0->q_depth = rx_cfg->q0_depth;
q_depth          2412 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			q1->rcb->q_depth = rx_cfg->q1_depth;
q_depth          2413 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 			q1->q_depth = rx_cfg->q1_depth;
q_depth          2444 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		rxp->cq.ccb->q_depth = cq_depth;
q_depth          3466 drivers/net/ethernet/brocade/bna/bna_tx_rx.c 		txq->tcb->q_depth = tx_cfg->txq_depth;
q_depth           422 drivers/net/ethernet/brocade/bna/bna_types.h 	u32		q_depth;
q_depth           550 drivers/net/ethernet/brocade/bna/bna_types.h 	u32		q_depth;
q_depth           566 drivers/net/ethernet/brocade/bna/bna_types.h 	int			q_depth;
q_depth           614 drivers/net/ethernet/brocade/bna/bna_types.h 	u32		q_depth;
q_depth            78 drivers/net/ethernet/brocade/bna/bnad.c 	for (i = 0; i < ccb->q_depth; i++) {
q_depth            91 drivers/net/ethernet/brocade/bna/bnad.c 			      u32 q_depth, u32 index)
q_depth           114 drivers/net/ethernet/brocade/bna/bnad.c 			BNA_QE_INDX_INC(index, q_depth);
q_depth           126 drivers/net/ethernet/brocade/bna/bnad.c 	BNA_QE_INDX_INC(index, q_depth);
q_depth           143 drivers/net/ethernet/brocade/bna/bnad.c 	for (i = 0; i < tcb->q_depth; i++) {
q_depth           147 drivers/net/ethernet/brocade/bna/bnad.c 		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
q_depth           162 drivers/net/ethernet/brocade/bna/bnad.c 	u32 wis, unmap_wis, hw_cons, cons, q_depth;
q_depth           174 drivers/net/ethernet/brocade/bna/bnad.c 	q_depth = tcb->q_depth;
q_depth           176 drivers/net/ethernet/brocade/bna/bnad.c 	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
q_depth           177 drivers/net/ethernet/brocade/bna/bnad.c 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
q_depth           190 drivers/net/ethernet/brocade/bna/bnad.c 		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
q_depth           216 drivers/net/ethernet/brocade/bna/bnad.c 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
q_depth           327 drivers/net/ethernet/brocade/bna/bnad.c 	for (i = 0; i < rcb->q_depth; i++) {
q_depth           341 drivers/net/ethernet/brocade/bna/bnad.c 	u32 alloced, prod, q_depth;
q_depth           350 drivers/net/ethernet/brocade/bna/bnad.c 	q_depth = rcb->q_depth;
q_depth           397 drivers/net/ethernet/brocade/bna/bnad.c 		BNA_QE_INDX_INC(prod, q_depth);
q_depth           415 drivers/net/ethernet/brocade/bna/bnad.c 	u32 alloced, prod, q_depth, buff_sz;
q_depth           424 drivers/net/ethernet/brocade/bna/bnad.c 	q_depth = rcb->q_depth;
q_depth           453 drivers/net/ethernet/brocade/bna/bnad.c 		BNA_QE_INDX_INC(prod, q_depth);
q_depth           474 drivers/net/ethernet/brocade/bna/bnad.c 	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
q_depth           509 drivers/net/ethernet/brocade/bna/bnad.c 		BNA_QE_INDX_INC(ci, rcb->q_depth);
q_depth           545 drivers/net/ethernet/brocade/bna/bnad.c 		BNA_QE_INDX_INC(ci, rcb->q_depth);
q_depth           561 drivers/net/ethernet/brocade/bna/bnad.c 		BNA_QE_INDX_INC(pi, ccb->q_depth);
q_depth           651 drivers/net/ethernet/brocade/bna/bnad.c 				BNA_QE_INDX_INC(pi, ccb->q_depth);
q_depth           720 drivers/net/ethernet/brocade/bna/bnad.c 		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
q_depth           724 drivers/net/ethernet/brocade/bna/bnad.c 			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
q_depth          2925 drivers/net/ethernet/brocade/bna/bnad.c 	u32		prod, q_depth, vect_id;
q_depth          2963 drivers/net/ethernet/brocade/bna/bnad.c 	q_depth = tcb->q_depth;
q_depth          2977 drivers/net/ethernet/brocade/bna/bnad.c 	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
q_depth          2997 drivers/net/ethernet/brocade/bna/bnad.c 		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
q_depth          3040 drivers/net/ethernet/brocade/bna/bnad.c 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
q_depth          3052 drivers/net/ethernet/brocade/bna/bnad.c 			BNA_QE_INDX_INC(prod, q_depth);
q_depth          3062 drivers/net/ethernet/brocade/bna/bnad.c 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
q_depth          3079 drivers/net/ethernet/brocade/bna/bnad.c 		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
q_depth          3085 drivers/net/ethernet/brocade/bna/bnad.c 	BNA_QE_INDX_INC(prod, q_depth);
q_depth           368 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	if (next_prod_idx >= wq->q_depth) {
q_depth           370 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		next_prod_idx -= wq->q_depth;
q_depth           446 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	if (next_prod_idx >= wq->q_depth) {
q_depth           448 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		next_prod_idx -= wq->q_depth;
q_depth           750 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
q_depth           755 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 					   wq->q_depth));
q_depth           219 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb);
q_depth           247 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb);
q_depth           319 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe_size = wq->q_depth * sizeof(*rq->cqe);
q_depth           324 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma);
q_depth           329 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	for (i = 0; i < wq->q_depth; i++) {
q_depth           362 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	for (i = 0; i < wq->q_depth; i++)
q_depth            34 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQ_SIZE(wq)                     ((wq)->q_depth * (wq)->wqebb_size)
q_depth           506 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 		      u16 wqebb_size, u16 wq_page_size, u16 q_depth,
q_depth           525 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	if (q_depth & (q_depth - 1)) {
q_depth           549 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wq->q_depth = q_depth;
q_depth           566 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	atomic_set(&wq->delta, q_depth);
q_depth           567 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	wq->mask = q_depth - 1;
q_depth           604 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 			 u16 q_depth, u16 max_wqe_size)
q_depth           622 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	if (q_depth & (q_depth - 1)) {
q_depth           652 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 		wq[i].q_depth = q_depth;
q_depth           670 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 		atomic_set(&wq[i].delta, q_depth);
q_depth           671 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 		wq[i].mask = q_depth - 1;
q_depth           828 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c 	if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
q_depth            30 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h 	u16             q_depth;
q_depth            80 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h 			 u16 q_depth, u16 max_wqe_size);
q_depth            91 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h 		      u16 wqebb_size, u16 wq_page_size, u16 q_depth,
q_depth            31 drivers/nvme/host/pci.c #define SQ_SIZE(q)	((q)->q_depth << (q)->sqes)
q_depth            32 drivers/nvme/host/pci.c #define CQ_SIZE(q)	((q)->q_depth * sizeof(struct nvme_completion))
q_depth           102 drivers/nvme/host/pci.c 	int q_depth;
q_depth           173 drivers/nvme/host/pci.c 	u16 q_depth;
q_depth           458 drivers/nvme/host/pci.c 		if (next_tail == nvmeq->q_depth)
q_depth           482 drivers/nvme/host/pci.c 	if (++nvmeq->sq_tail == nvmeq->q_depth)
q_depth           952 drivers/nvme/host/pci.c 	if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
q_depth           981 drivers/nvme/host/pci.c 		if (++start == nvmeq->q_depth)
q_depth           988 drivers/nvme/host/pci.c 	if (nvmeq->cq_head == nvmeq->q_depth - 1) {
q_depth          1133 drivers/nvme/host/pci.c 	c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
q_depth          1163 drivers/nvme/host/pci.c 	c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
q_depth          1430 drivers/nvme/host/pci.c 	int q_depth = dev->q_depth;
q_depth          1431 drivers/nvme/host/pci.c 	unsigned q_size_aligned = roundup(q_depth * entry_size,
q_depth          1437 drivers/nvme/host/pci.c 		q_depth = div_u64(mem_per_q, entry_size);
q_depth          1444 drivers/nvme/host/pci.c 		if (q_depth < 64)
q_depth          1448 drivers/nvme/host/pci.c 	return q_depth;
q_depth          1485 drivers/nvme/host/pci.c 	nvmeq->q_depth = depth;
q_depth          1706 drivers/nvme/host/pci.c 	aqa = nvmeq->q_depth - 1;
q_depth          1735 drivers/nvme/host/pci.c 		if (nvme_alloc_queue(dev, i, dev->q_depth)) {
q_depth          2137 drivers/nvme/host/pci.c 			dev->q_depth = result;
q_depth          2281 drivers/nvme/host/pci.c 				min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
q_depth          2340 drivers/nvme/host/pci.c 	dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1,
q_depth          2342 drivers/nvme/host/pci.c 	dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
q_depth          2361 drivers/nvme/host/pci.c 		dev->q_depth = 2;
q_depth          2364 drivers/nvme/host/pci.c 			dev->q_depth);
q_depth          2368 drivers/nvme/host/pci.c 		dev->q_depth = 64;
q_depth          2370 drivers/nvme/host/pci.c                         "set queue depth=%u\n", dev->q_depth);
q_depth          2378 drivers/nvme/host/pci.c 	    (dev->q_depth < (NVME_AQ_DEPTH + 2))) {
q_depth          2379 drivers/nvme/host/pci.c 		dev->q_depth = NVME_AQ_DEPTH + 2;
q_depth          2381 drivers/nvme/host/pci.c 			 dev->q_depth);
q_depth           958 drivers/scsi/bfa/bfa_defs_svc.h 	u16	 q_depth;	/*  SCSI Queue depth		*/
q_depth           497 drivers/scsi/bfa/bfa_fcpim.c 	return fcpim->q_depth;
q_depth           119 drivers/scsi/bfa/bfa_fcpim.h 	u16			q_depth;
q_depth          3567 drivers/scsi/bfa/bfa_svc.c 				fcport->cfg.q_depth =
q_depth          3568 drivers/scsi/bfa/bfa_svc.c 					cpu_to_be16(fcport->cfg.q_depth);
q_depth          3988 drivers/scsi/bfa/bfa_svc.c 	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
q_depth           538 drivers/scsi/bfa/bfi.h #define BFI_MSGQ_FULL(_q)	(((_q->pi + 1) % _q->q_depth) == _q->ci)
q_depth           540 drivers/scsi/bfa/bfi.h #define BFI_MSGQ_UPDATE_CI(_q)	(_q->ci = (_q->ci + 1) % _q->q_depth)
q_depth           541 drivers/scsi/bfa/bfi.h #define BFI_MSGQ_UPDATE_PI(_q)	(_q->pi = (_q->pi + 1) % _q->q_depth)
q_depth           544 drivers/scsi/bfa/bfi.h #define BFI_MSGQ_FREE_CNT(_q)	((_q->ci - _q->pi - 1) & (_q->q_depth - 1))
q_depth           585 drivers/scsi/bfa/bfi.h 	u16 q_depth;     /* Total num of entries in the queue */