cmdq              558 drivers/atm/fore200e.c     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
cmdq              559 drivers/atm/fore200e.c     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
cmdq              564 drivers/atm/fore200e.c     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
cmdq             1227 drivers/atm/fore200e.c     struct host_cmdq*        cmdq  = &fore200e->host_cmdq;
cmdq             1228 drivers/atm/fore200e.c     struct host_cmdq_entry*  entry = &cmdq->host_entry[ cmdq->head ];
cmdq             1235 drivers/atm/fore200e.c     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
cmdq             1671 drivers/atm/fore200e.c     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
cmdq             1672 drivers/atm/fore200e.c     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
cmdq             1688 drivers/atm/fore200e.c     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
cmdq             1742 drivers/atm/fore200e.c     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
cmdq             1743 drivers/atm/fore200e.c     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
cmdq             1750 drivers/atm/fore200e.c     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
cmdq             1782 drivers/atm/fore200e.c     struct host_cmdq*       cmdq  = &fore200e->host_cmdq;
cmdq             1783 drivers/atm/fore200e.c     struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
cmdq             1789 drivers/atm/fore200e.c     FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
cmdq             2259 drivers/atm/fore200e.c     struct host_cmdq*     cmdq =  &fore200e->host_cmdq;
cmdq             2267 drivers/atm/fore200e.c 				       &cmdq->status,
cmdq             2280 drivers/atm/fore200e.c 	cmdq->host_entry[ i ].status   = 
cmdq             2281 drivers/atm/fore200e.c                               FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
cmdq             2282 drivers/atm/fore200e.c 	cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
cmdq             2284 drivers/atm/fore200e.c 	*cmdq->host_entry[ i ].status = STATUS_FREE;
cmdq             2286 drivers/atm/fore200e.c 	fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i), 
cmdq             2291 drivers/atm/fore200e.c     cmdq->head = 0;
cmdq              107 drivers/crypto/cavium/nitrox/nitrox_dev.h 		struct nitrox_cmdq *cmdq;
cmdq              124 drivers/crypto/cavium/nitrox/nitrox_hal.c 		struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
cmdq              137 drivers/crypto/cavium/nitrox/nitrox_hal.c 		nitrox_write_csr(ndev, offset, cmdq->dma);
cmdq              356 drivers/crypto/cavium/nitrox/nitrox_hal.c 		struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
cmdq              379 drivers/crypto/cavium/nitrox/nitrox_hal.c 		nitrox_write_csr(ndev, offset, cmdq->dma);
cmdq               31 drivers/crypto/cavium/nitrox/nitrox_isr.c 	struct nitrox_cmdq *cmdq = qvec->cmdq;
cmdq               33 drivers/crypto/cavium/nitrox/nitrox_isr.c 	slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
cmdq              332 drivers/crypto/cavium/nitrox/nitrox_isr.c 		qvec->cmdq = &ndev->pkt_inq[qvec->ring];
cmdq               25 drivers/crypto/cavium/nitrox/nitrox_lib.c static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes)
cmdq               27 drivers/crypto/cavium/nitrox/nitrox_lib.c 	struct nitrox_device *ndev = cmdq->ndev;
cmdq               29 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes;
cmdq               30 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize,
cmdq               31 drivers/crypto/cavium/nitrox/nitrox_lib.c 						&cmdq->unalign_dma,
cmdq               33 drivers/crypto/cavium/nitrox/nitrox_lib.c 	if (!cmdq->unalign_base)
cmdq               36 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes);
cmdq               37 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma);
cmdq               38 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->write_idx = 0;
cmdq               40 drivers/crypto/cavium/nitrox/nitrox_lib.c 	spin_lock_init(&cmdq->cmd_qlock);
cmdq               41 drivers/crypto/cavium/nitrox/nitrox_lib.c 	spin_lock_init(&cmdq->resp_qlock);
cmdq               42 drivers/crypto/cavium/nitrox/nitrox_lib.c 	spin_lock_init(&cmdq->backlog_qlock);
cmdq               44 drivers/crypto/cavium/nitrox/nitrox_lib.c 	INIT_LIST_HEAD(&cmdq->response_head);
cmdq               45 drivers/crypto/cavium/nitrox/nitrox_lib.c 	INIT_LIST_HEAD(&cmdq->backlog_head);
cmdq               46 drivers/crypto/cavium/nitrox/nitrox_lib.c 	INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
cmdq               48 drivers/crypto/cavium/nitrox/nitrox_lib.c 	atomic_set(&cmdq->pending_count, 0);
cmdq               49 drivers/crypto/cavium/nitrox/nitrox_lib.c 	atomic_set(&cmdq->backlog_count, 0);
cmdq               53 drivers/crypto/cavium/nitrox/nitrox_lib.c static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq)
cmdq               55 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->write_idx = 0;
cmdq               56 drivers/crypto/cavium/nitrox/nitrox_lib.c 	atomic_set(&cmdq->pending_count, 0);
cmdq               57 drivers/crypto/cavium/nitrox/nitrox_lib.c 	atomic_set(&cmdq->backlog_count, 0);
cmdq               60 drivers/crypto/cavium/nitrox/nitrox_lib.c static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq)
cmdq               64 drivers/crypto/cavium/nitrox/nitrox_lib.c 	if (!cmdq)
cmdq               67 drivers/crypto/cavium/nitrox/nitrox_lib.c 	if (!cmdq->unalign_base)
cmdq               70 drivers/crypto/cavium/nitrox/nitrox_lib.c 	ndev = cmdq->ndev;
cmdq               71 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cancel_work_sync(&cmdq->backlog_qflush);
cmdq               73 drivers/crypto/cavium/nitrox/nitrox_lib.c 	dma_free_coherent(DEV(ndev), cmdq->qsize,
cmdq               74 drivers/crypto/cavium/nitrox/nitrox_lib.c 			  cmdq->unalign_base, cmdq->unalign_dma);
cmdq               75 drivers/crypto/cavium/nitrox/nitrox_lib.c 	nitrox_cmdq_reset(cmdq);
cmdq               77 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->dbell_csr_addr = NULL;
cmdq               78 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->compl_cnt_csr_addr = NULL;
cmdq               79 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->unalign_base = NULL;
cmdq               80 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->base = NULL;
cmdq               81 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->unalign_dma = 0;
cmdq               82 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->dma = 0;
cmdq               83 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->qsize = 0;
cmdq               84 drivers/crypto/cavium/nitrox/nitrox_lib.c 	cmdq->instr_size = 0;
cmdq              103 drivers/crypto/cavium/nitrox/nitrox_lib.c 		struct nitrox_cmdq *cmdq;
cmdq              106 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node);
cmdq              107 drivers/crypto/cavium/nitrox/nitrox_lib.c 		if (!cmdq) {
cmdq              112 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->ndev = ndev;
cmdq              113 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->qno = i;
cmdq              114 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->instr_size = sizeof(struct aqmq_command_s);
cmdq              118 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
cmdq              121 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
cmdq              123 drivers/crypto/cavium/nitrox/nitrox_lib.c 		err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES);
cmdq              125 drivers/crypto/cavium/nitrox/nitrox_lib.c 			kzfree(cmdq);
cmdq              128 drivers/crypto/cavium/nitrox/nitrox_lib.c 		ndev->aqmq[i] = cmdq;
cmdq              143 drivers/crypto/cavium/nitrox/nitrox_lib.c 		struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
cmdq              145 drivers/crypto/cavium/nitrox/nitrox_lib.c 		nitrox_cmdq_cleanup(cmdq);
cmdq              162 drivers/crypto/cavium/nitrox/nitrox_lib.c 		struct nitrox_cmdq *cmdq;
cmdq              165 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq = &ndev->pkt_inq[i];
cmdq              166 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->ndev = ndev;
cmdq              167 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->qno = i;
cmdq              168 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->instr_size = sizeof(struct nps_pkt_instr);
cmdq              172 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
cmdq              175 drivers/crypto/cavium/nitrox/nitrox_lib.c 		cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset);
cmdq              177 drivers/crypto/cavium/nitrox/nitrox_lib.c 		err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES);
cmdq              513 drivers/crypto/cavium/nitrox/nitrox_req.h 	struct nitrox_cmdq *cmdq;
cmdq              228 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 				    struct nitrox_cmdq *cmdq)
cmdq              232 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_lock_bh(&cmdq->backlog_qlock);
cmdq              233 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
cmdq              234 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	atomic_inc(&cmdq->backlog_count);
cmdq              236 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_unlock_bh(&cmdq->backlog_qlock);
cmdq              240 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 				     struct nitrox_cmdq *cmdq)
cmdq              244 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_lock_bh(&cmdq->resp_qlock);
cmdq              245 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	list_add_tail(&sr->response, &cmdq->response_head);
cmdq              246 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_unlock_bh(&cmdq->resp_qlock);
cmdq              250 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 				     struct nitrox_cmdq *cmdq)
cmdq              252 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_lock_bh(&cmdq->resp_qlock);
cmdq              254 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_unlock_bh(&cmdq->resp_qlock);
cmdq              258 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c get_first_response_entry(struct nitrox_cmdq *cmdq)
cmdq              260 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	return list_first_entry_or_null(&cmdq->response_head,
cmdq              264 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
cmdq              266 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
cmdq              267 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		atomic_dec(&cmdq->pending_count);
cmdq              285 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 			  struct nitrox_cmdq *cmdq)
cmdq              291 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_lock_bh(&cmdq->cmd_qlock);
cmdq              293 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	idx = cmdq->write_idx;
cmdq              295 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	ent = cmdq->base + (idx * cmdq->instr_size);
cmdq              296 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	memcpy(ent, &sr->instr, cmdq->instr_size);
cmdq              299 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	response_list_add(sr, cmdq);
cmdq              305 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	writeq(1, cmdq->dbell_csr_addr);
cmdq              307 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
cmdq              309 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_unlock_bh(&cmdq->cmd_qlock);
cmdq              315 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
cmdq              317 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	struct nitrox_device *ndev = cmdq->ndev;
cmdq              321 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	if (!atomic_read(&cmdq->backlog_count))
cmdq              324 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_lock_bh(&cmdq->backlog_qlock);
cmdq              326 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
cmdq              328 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
cmdq              334 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		atomic_dec(&cmdq->backlog_count);
cmdq              339 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		post_se_instr(sr, cmdq);
cmdq              341 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	spin_unlock_bh(&cmdq->backlog_qlock);
cmdq              348 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	struct nitrox_cmdq *cmdq = sr->cmdq;
cmdq              352 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	post_backlog_cmds(cmdq);
cmdq              354 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
cmdq              361 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		backlog_list_add(sr, cmdq);
cmdq              364 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	post_se_instr(sr, cmdq);
cmdq              422 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	sr->cmdq = &ndev->pkt_inq[qno];
cmdq              502 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	struct nitrox_cmdq *cmdq;
cmdq              504 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
cmdq              505 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	post_backlog_cmds(cmdq);
cmdq              533 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c static void process_response_list(struct nitrox_cmdq *cmdq)
cmdq              535 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	struct nitrox_device *ndev = cmdq->ndev;
cmdq              542 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	budget = atomic_read(&cmdq->pending_count);
cmdq              545 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		sr = get_first_response_entry(cmdq);
cmdq              561 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		atomic_dec(&cmdq->pending_count);
cmdq              566 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		response_list_del(sr, cmdq);
cmdq              585 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	struct nitrox_cmdq *cmdq = qvec->cmdq;
cmdq              589 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
cmdq              593 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	process_response_list(cmdq);
cmdq              599 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
cmdq              601 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 	if (atomic_read(&cmdq->backlog_count))
cmdq              602 drivers/crypto/cavium/nitrox/nitrox_reqmgr.c 		schedule_work(&cmdq->backlog_qflush);
cmdq               88 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
cmdq              119 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	spin_lock_irqsave(&cmdq->lock, flags);
cmdq              120 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
cmdq              122 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		spin_unlock_irqrestore(&cmdq->lock, flags);
cmdq              136 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		spin_unlock_irqrestore(&cmdq->lock, flags);
cmdq              158 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
cmdq              162 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		sw_prod = HWQ_CMP(cmdq->prod, cmdq);
cmdq              175 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		cmdq->prod++;
cmdq              181 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	cmdq_prod = cmdq->prod;
cmdq              199 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	spin_unlock_irqrestore(&cmdq->lock, flags);
cmdq              295 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
cmdq              331 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		spin_lock_irqsave_nested(&cmdq->lock, flags,
cmdq              354 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		cmdq->cons += crsqe->req_size;
cmdq              359 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		spin_unlock_irqrestore(&cmdq->lock, flags);
cmdq              563 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
cmdq              591 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	rcfw->cmdq.max_elements = rcfw->cmdq_depth;
cmdq              593 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 			(rcfw->pdev, &rcfw->cmdq, NULL,
cmdq              594 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 			 &rcfw->cmdq.max_elements,
cmdq              603 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
cmdq              763 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 	init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
cmdq              767 drivers/infiniband/hw/bnxt_re/qplib_rcfw.c 		((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
cmdq              261 drivers/infiniband/hw/bnxt_re/qplib_rcfw.h 	struct bnxt_qplib_hwq	cmdq;
cmdq              604 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_cmdq		cmdq;
cmdq              918 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_queue *q = &smmu->cmdq.q;
cmdq              947 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_queue *q = &smmu->cmdq.q;
cmdq             1006 drivers/iommu/arm-smmu-v3.c static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq)
cmdq             1016 drivers/iommu/arm-smmu-v3.c 	if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0)
cmdq             1020 drivers/iommu/arm-smmu-v3.c 		val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0);
cmdq             1021 drivers/iommu/arm-smmu-v3.c 	} while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val);
cmdq             1024 drivers/iommu/arm-smmu-v3.c static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq)
cmdq             1026 drivers/iommu/arm-smmu-v3.c 	(void)atomic_dec_return_release(&cmdq->lock);
cmdq             1029 drivers/iommu/arm-smmu-v3.c static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq)
cmdq             1031 drivers/iommu/arm-smmu-v3.c 	if (atomic_read(&cmdq->lock) == 1)
cmdq             1034 drivers/iommu/arm-smmu-v3.c 	arm_smmu_cmdq_shared_unlock(cmdq);
cmdq             1038 drivers/iommu/arm-smmu-v3.c #define arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)		\
cmdq             1042 drivers/iommu/arm-smmu-v3.c 	__ret = !atomic_cmpxchg_relaxed(&cmdq->lock, 0, INT_MIN);	\
cmdq             1048 drivers/iommu/arm-smmu-v3.c #define arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags)		\
cmdq             1050 drivers/iommu/arm-smmu-v3.c 	atomic_set_release(&cmdq->lock, 0);				\
cmdq             1094 drivers/iommu/arm-smmu-v3.c static void __arm_smmu_cmdq_poll_set_valid_map(struct arm_smmu_cmdq *cmdq,
cmdq             1099 drivers/iommu/arm-smmu-v3.c 		.max_n_shift	= cmdq->q.llq.max_n_shift,
cmdq             1114 drivers/iommu/arm-smmu-v3.c 		ptr = &cmdq->valid_map[swidx];
cmdq             1141 drivers/iommu/arm-smmu-v3.c static void arm_smmu_cmdq_set_valid_map(struct arm_smmu_cmdq *cmdq,
cmdq             1144 drivers/iommu/arm-smmu-v3.c 	__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, true);
cmdq             1148 drivers/iommu/arm-smmu-v3.c static void arm_smmu_cmdq_poll_valid_map(struct arm_smmu_cmdq *cmdq,
cmdq             1151 drivers/iommu/arm-smmu-v3.c 	__arm_smmu_cmdq_poll_set_valid_map(cmdq, sprod, eprod, false);
cmdq             1160 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
cmdq             1167 drivers/iommu/arm-smmu-v3.c 	if (arm_smmu_cmdq_exclusive_trylock_irqsave(cmdq, flags)) {
cmdq             1168 drivers/iommu/arm-smmu-v3.c 		WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg));
cmdq             1169 drivers/iommu/arm-smmu-v3.c 		arm_smmu_cmdq_exclusive_unlock_irqrestore(cmdq, flags);
cmdq             1170 drivers/iommu/arm-smmu-v3.c 		llq->val = READ_ONCE(cmdq->q.llq.val);
cmdq             1176 drivers/iommu/arm-smmu-v3.c 		llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
cmdq             1195 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
cmdq             1196 drivers/iommu/arm-smmu-v3.c 	u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod));
cmdq             1218 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
cmdq             1223 drivers/iommu/arm-smmu-v3.c 	llq->val = READ_ONCE(smmu->cmdq.q.llq.val);
cmdq             1258 drivers/iommu/arm-smmu-v3.c 		llq->cons = readl(cmdq->q.cons_reg);
cmdq             1274 drivers/iommu/arm-smmu-v3.c static void arm_smmu_cmdq_write_entries(struct arm_smmu_cmdq *cmdq, u64 *cmds,
cmdq             1279 drivers/iommu/arm-smmu-v3.c 		.max_n_shift	= cmdq->q.llq.max_n_shift,
cmdq             1287 drivers/iommu/arm-smmu-v3.c 		queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS);
cmdq             1314 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
cmdq             1316 drivers/iommu/arm-smmu-v3.c 		.max_n_shift = cmdq->q.llq.max_n_shift,
cmdq             1322 drivers/iommu/arm-smmu-v3.c 	llq.val = READ_ONCE(cmdq->q.llq.val);
cmdq             1337 drivers/iommu/arm-smmu-v3.c 		old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val);
cmdq             1351 drivers/iommu/arm-smmu-v3.c 	arm_smmu_cmdq_write_entries(cmdq, cmds, llq.prod, n);
cmdq             1355 drivers/iommu/arm-smmu-v3.c 		queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS);
cmdq             1363 drivers/iommu/arm-smmu-v3.c 		arm_smmu_cmdq_shared_lock(cmdq);
cmdq             1368 drivers/iommu/arm-smmu-v3.c 	arm_smmu_cmdq_set_valid_map(cmdq, llq.prod, head.prod);
cmdq             1373 drivers/iommu/arm-smmu-v3.c 		atomic_cond_read_relaxed(&cmdq->owner_prod, VAL == llq.prod);
cmdq             1377 drivers/iommu/arm-smmu-v3.c 						   &cmdq->q.llq.atomic.prod);
cmdq             1385 drivers/iommu/arm-smmu-v3.c 		arm_smmu_cmdq_poll_valid_map(cmdq, llq.prod, prod);
cmdq             1391 drivers/iommu/arm-smmu-v3.c 		writel_relaxed(prod, cmdq->q.prod_reg);
cmdq             1398 drivers/iommu/arm-smmu-v3.c 		atomic_set_release(&cmdq->owner_prod, prod);
cmdq             1409 drivers/iommu/arm-smmu-v3.c 					    readl_relaxed(cmdq->q.prod_reg),
cmdq             1410 drivers/iommu/arm-smmu-v3.c 					    readl_relaxed(cmdq->q.cons_reg));
cmdq             1417 drivers/iommu/arm-smmu-v3.c 		if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) {
cmdq             1418 drivers/iommu/arm-smmu-v3.c 			WRITE_ONCE(cmdq->q.llq.cons, llq.cons);
cmdq             1419 drivers/iommu/arm-smmu-v3.c 			arm_smmu_cmdq_shared_unlock(cmdq);
cmdq             2797 drivers/iommu/arm-smmu-v3.c 	struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
cmdq             2798 drivers/iommu/arm-smmu-v3.c 	unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
cmdq             2801 drivers/iommu/arm-smmu-v3.c 	atomic_set(&cmdq->owner_prod, 0);
cmdq             2802 drivers/iommu/arm-smmu-v3.c 	atomic_set(&cmdq->lock, 0);
cmdq             2809 drivers/iommu/arm-smmu-v3.c 		cmdq->valid_map = bitmap;
cmdq             2821 drivers/iommu/arm-smmu-v3.c 	ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
cmdq             3207 drivers/iommu/arm-smmu-v3.c 	writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
cmdq             3208 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
cmdq             3209 drivers/iommu/arm-smmu-v3.c 	writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
cmdq             3409 drivers/iommu/arm-smmu-v3.c 	smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT,
cmdq             3411 drivers/iommu/arm-smmu-v3.c 	if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) {
cmdq               63 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq		*cmdq;
cmdq               81 drivers/mailbox/mtk-cmdq-mailbox.c static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
cmdq               93 drivers/mailbox/mtk-cmdq-mailbox.c 		dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
cmdq               94 drivers/mailbox/mtk-cmdq-mailbox.c 			(u32)(thread->base - cmdq->base));
cmdq              106 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_init(struct cmdq *cmdq)
cmdq              110 drivers/mailbox/mtk-cmdq-mailbox.c 	WARN_ON(clk_enable(cmdq->clock) < 0);
cmdq              111 drivers/mailbox/mtk-cmdq-mailbox.c 	writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
cmdq              113 drivers/mailbox/mtk-cmdq-mailbox.c 		writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
cmdq              114 drivers/mailbox/mtk-cmdq-mailbox.c 	clk_disable(cmdq->clock);
cmdq              117 drivers/mailbox/mtk-cmdq-mailbox.c static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
cmdq              125 drivers/mailbox/mtk-cmdq-mailbox.c 		dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
cmdq              126 drivers/mailbox/mtk-cmdq-mailbox.c 			(u32)(thread->base - cmdq->base));
cmdq              133 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
cmdq              135 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq_thread_reset(cmdq, thread);
cmdq              148 drivers/mailbox/mtk-cmdq-mailbox.c 	struct device *dev = task->cmdq->mbox.dev;
cmdq              177 drivers/mailbox/mtk-cmdq-mailbox.c 	struct device *dev = task->cmdq->mbox.dev;
cmdq              225 drivers/mailbox/mtk-cmdq-mailbox.c 	dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task);
cmdq              226 drivers/mailbox/mtk-cmdq-mailbox.c 	WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0);
cmdq              234 drivers/mailbox/mtk-cmdq-mailbox.c static void cmdq_thread_irq_handler(struct cmdq *cmdq,
cmdq              282 drivers/mailbox/mtk-cmdq-mailbox.c 		cmdq_thread_disable(cmdq, thread);
cmdq              283 drivers/mailbox/mtk-cmdq-mailbox.c 		clk_disable(cmdq->clock);
cmdq              289 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq *cmdq = dev;
cmdq              293 drivers/mailbox/mtk-cmdq-mailbox.c 	irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
cmdq              294 drivers/mailbox/mtk-cmdq-mailbox.c 	if (!(irq_status ^ cmdq->irq_mask))
cmdq              297 drivers/mailbox/mtk-cmdq-mailbox.c 	for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
cmdq              298 drivers/mailbox/mtk-cmdq-mailbox.c 		struct cmdq_thread *thread = &cmdq->thread[bit];
cmdq              301 drivers/mailbox/mtk-cmdq-mailbox.c 		cmdq_thread_irq_handler(cmdq, thread);
cmdq              310 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq *cmdq = dev_get_drvdata(dev);
cmdq              315 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->suspended = true;
cmdq              317 drivers/mailbox/mtk-cmdq-mailbox.c 	for (i = 0; i < cmdq->thread_nr; i++) {
cmdq              318 drivers/mailbox/mtk-cmdq-mailbox.c 		thread = &cmdq->thread[i];
cmdq              328 drivers/mailbox/mtk-cmdq-mailbox.c 	clk_unprepare(cmdq->clock);
cmdq              335 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq *cmdq = dev_get_drvdata(dev);
cmdq              337 drivers/mailbox/mtk-cmdq-mailbox.c 	WARN_ON(clk_prepare(cmdq->clock) < 0);
cmdq              338 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->suspended = false;
cmdq              344 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq *cmdq = platform_get_drvdata(pdev);
cmdq              346 drivers/mailbox/mtk-cmdq-mailbox.c 	clk_unprepare(cmdq->clock);
cmdq              355 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
cmdq              360 drivers/mailbox/mtk-cmdq-mailbox.c 	WARN_ON(cmdq->suspended);
cmdq              366 drivers/mailbox/mtk-cmdq-mailbox.c 	task->cmdq = cmdq;
cmdq              373 drivers/mailbox/mtk-cmdq-mailbox.c 		WARN_ON(clk_enable(cmdq->clock) < 0);
cmdq              374 drivers/mailbox/mtk-cmdq-mailbox.c 		WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
cmdq              383 drivers/mailbox/mtk-cmdq-mailbox.c 		WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
cmdq              396 drivers/mailbox/mtk-cmdq-mailbox.c 				WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
cmdq              462 drivers/mailbox/mtk-cmdq-mailbox.c 	struct cmdq *cmdq;
cmdq              465 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
cmdq              466 drivers/mailbox/mtk-cmdq-mailbox.c 	if (!cmdq)
cmdq              470 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->base = devm_ioremap_resource(dev, res);
cmdq              471 drivers/mailbox/mtk-cmdq-mailbox.c 	if (IS_ERR(cmdq->base)) {
cmdq              473 drivers/mailbox/mtk-cmdq-mailbox.c 		return PTR_ERR(cmdq->base);
cmdq              476 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->irq = platform_get_irq(pdev, 0);
cmdq              477 drivers/mailbox/mtk-cmdq-mailbox.c 	if (!cmdq->irq) {
cmdq              482 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
cmdq              483 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
cmdq              484 drivers/mailbox/mtk-cmdq-mailbox.c 	err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
cmdq              485 drivers/mailbox/mtk-cmdq-mailbox.c 			       "mtk_cmdq", cmdq);
cmdq              492 drivers/mailbox/mtk-cmdq-mailbox.c 		dev, cmdq->base, cmdq->irq);
cmdq              494 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->clock = devm_clk_get(dev, "gce");
cmdq              495 drivers/mailbox/mtk-cmdq-mailbox.c 	if (IS_ERR(cmdq->clock)) {
cmdq              497 drivers/mailbox/mtk-cmdq-mailbox.c 		return PTR_ERR(cmdq->clock);
cmdq              500 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.dev = dev;
cmdq              501 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
cmdq              502 drivers/mailbox/mtk-cmdq-mailbox.c 					sizeof(*cmdq->mbox.chans), GFP_KERNEL);
cmdq              503 drivers/mailbox/mtk-cmdq-mailbox.c 	if (!cmdq->mbox.chans)
cmdq              506 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.num_chans = cmdq->thread_nr;
cmdq              507 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.ops = &cmdq_mbox_chan_ops;
cmdq              508 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.of_xlate = cmdq_xlate;
cmdq              511 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.txdone_irq = false;
cmdq              512 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->mbox.txdone_poll = false;
cmdq              514 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
cmdq              515 drivers/mailbox/mtk-cmdq-mailbox.c 					sizeof(*cmdq->thread), GFP_KERNEL);
cmdq              516 drivers/mailbox/mtk-cmdq-mailbox.c 	if (!cmdq->thread)
cmdq              519 drivers/mailbox/mtk-cmdq-mailbox.c 	for (i = 0; i < cmdq->thread_nr; i++) {
cmdq              520 drivers/mailbox/mtk-cmdq-mailbox.c 		cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
cmdq              522 drivers/mailbox/mtk-cmdq-mailbox.c 		INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
cmdq              523 drivers/mailbox/mtk-cmdq-mailbox.c 		cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
cmdq              526 drivers/mailbox/mtk-cmdq-mailbox.c 	err = devm_mbox_controller_register(dev, &cmdq->mbox);
cmdq              532 drivers/mailbox/mtk-cmdq-mailbox.c 	platform_set_drvdata(pdev, cmdq);
cmdq              533 drivers/mailbox/mtk-cmdq-mailbox.c 	WARN_ON(clk_prepare(cmdq->clock) < 0);
cmdq              535 drivers/mailbox/mtk-cmdq-mailbox.c 	cmdq_init(cmdq);
cmdq               31 drivers/net/ethernet/brocade/bna/bfa_msgq.c static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
cmdq               32 drivers/net/ethernet/brocade/bna/bfa_msgq.c static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
cmdq               43 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
cmdq               44 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
cmdq               45 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
cmdq               46 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
cmdq               50 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq)
cmdq               54 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->producer_index = 0;
cmdq               55 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->consumer_index = 0;
cmdq               56 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->flags = 0;
cmdq               57 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->token = 0;
cmdq               58 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->offset = 0;
cmdq               59 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->bytes_to_copy = 0;
cmdq               60 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	while (!list_empty(&cmdq->pending_q)) {
cmdq               61 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		cmdq_ent = list_first_entry(&cmdq->pending_q,
cmdq               69 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_stopped(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
cmdq               73 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_set_state(cmdq, cmdq_sm_init_wait);
cmdq               82 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
cmdq               91 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_init_wait_entry(struct bfa_msgq_cmdq *cmdq)
cmdq               93 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_wc_down(&cmdq->msgq->init_wc);
cmdq               97 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_init_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
cmdq              102 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
cmdq              106 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
cmdq              110 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
cmdq              111 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
cmdq              112 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
cmdq              114 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
cmdq              123 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_ready_entry(struct bfa_msgq_cmdq *cmdq)
cmdq              128 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_ready(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
cmdq              133 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
cmdq              137 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
cmdq              146 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_dbell_wait_entry(struct bfa_msgq_cmdq *cmdq)
cmdq              148 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_msgq_cmdq_dbell(cmdq);
cmdq              152 drivers/net/ethernet/brocade/bna/bfa_msgq.c cmdq_sm_dbell_wait(struct bfa_msgq_cmdq *cmdq, enum cmdq_event event)
cmdq              157 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
cmdq              161 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		cmdq->flags |= BFA_MSGQ_CMDQ_F_DB_UPDATE;
cmdq              165 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		if (cmdq->flags & BFA_MSGQ_CMDQ_F_DB_UPDATE) {
cmdq              166 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			cmdq->flags &= ~BFA_MSGQ_CMDQ_F_DB_UPDATE;
cmdq              167 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			bfa_fsm_set_state(cmdq, cmdq_sm_dbell_wait);
cmdq              169 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			bfa_fsm_set_state(cmdq, cmdq_sm_ready);
cmdq              180 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
cmdq              181 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_fsm_send_event(cmdq, CMDQ_E_DB_READY);
cmdq              185 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq)
cmdq              188 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		(struct bfi_msgq_h2i_db *)(&cmdq->dbell_mb.msg[0]);
cmdq              193 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	dbell->idx.cmdq_pi = htons(cmdq->producer_index);
cmdq              195 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->dbell_mb,
cmdq              196 drivers/net/ethernet/brocade/bna/bfa_msgq.c 				bfa_msgq_cmdq_dbell_ready, cmdq)) {
cmdq              197 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_msgq_cmdq_dbell_ready(cmdq);
cmdq              202 drivers/net/ethernet/brocade/bna/bfa_msgq.c __cmd_copy(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq_cmd_entry *cmd)
cmdq              210 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	dst = (u8 *)cmdq->addr.kva;
cmdq              211 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
cmdq              219 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		BFA_MSGQ_INDX_ADD(cmdq->producer_index, 1, cmdq->depth);
cmdq              220 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		dst = (u8 *)cmdq->addr.kva;
cmdq              221 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		dst += (cmdq->producer_index * BFI_MSGQ_CMD_ENTRY_SIZE);
cmdq              228 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_msgq_cmdq_ci_update(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
cmdq              234 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->consumer_index = ntohs(dbell->idx.cmdq_ci);
cmdq              237 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	while (!list_empty(&cmdq->pending_q)) {
cmdq              238 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		cmd = list_first_entry(&cmdq->pending_q,
cmdq              241 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			BFA_MSGQ_FREE_CNT(cmdq)) {
cmdq              243 drivers/net/ethernet/brocade/bna/bfa_msgq.c 			__cmd_copy(cmdq, cmd);
cmdq              252 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_send_event(cmdq, CMDQ_E_POST);
cmdq              258 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	struct bfa_msgq_cmdq *cmdq = (struct bfa_msgq_cmdq *)arg;
cmdq              260 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	if (cmdq->bytes_to_copy)
cmdq              261 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_msgq_cmdq_copy_rsp(cmdq);
cmdq              265 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_msgq_cmdq_copy_req(struct bfa_msgq_cmdq *cmdq, struct bfi_mbmsg *mb)
cmdq              270 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->token = 0;
cmdq              271 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->offset = ntohs(req->offset);
cmdq              272 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->bytes_to_copy = ntohs(req->len);
cmdq              273 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_msgq_cmdq_copy_rsp(cmdq);
cmdq              277 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq)
cmdq              280 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		(struct bfi_msgq_h2i_cmdq_copy_rsp *)&cmdq->copy_mb.msg[0];
cmdq              282 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	u8 *addr = (u8 *)cmdq->addr.kva;
cmdq              286 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	rsp->mh.mtag.i2htok = htons(cmdq->token);
cmdq              287 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	copied = (cmdq->bytes_to_copy >= BFI_CMD_COPY_SZ) ? BFI_CMD_COPY_SZ :
cmdq              288 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		cmdq->bytes_to_copy;
cmdq              289 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	addr += cmdq->offset;
cmdq              292 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->token++;
cmdq              293 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->offset += copied;
cmdq              294 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->bytes_to_copy -= copied;
cmdq              296 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	if (!bfa_nw_ioc_mbox_queue(cmdq->msgq->ioc, &cmdq->copy_mb,
cmdq              297 drivers/net/ethernet/brocade/bna/bfa_msgq.c 				bfa_msgq_cmdq_copy_next, cmdq)) {
cmdq              298 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_msgq_cmdq_copy_next(cmdq);
cmdq              303 drivers/net/ethernet/brocade/bna/bfa_msgq.c bfa_msgq_cmdq_attach(struct bfa_msgq_cmdq *cmdq, struct bfa_msgq *msgq)
cmdq              305 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->depth = BFA_MSGQ_CMDQ_NUM_ENTRY;
cmdq              306 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	INIT_LIST_HEAD(&cmdq->pending_q);
cmdq              307 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	cmdq->msgq = msgq;
cmdq              308 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_fsm_set_state(cmdq, cmdq_sm_stopped);
cmdq              502 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_INIT_RESP);
cmdq              517 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_dma_be_addr_set(msgq_cfg->cmdq.addr, msgq->cmdq.addr.pa);
cmdq              518 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	msgq_cfg->cmdq.q_depth = htons(msgq->cmdq.depth);
cmdq              540 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_msgq_cmdq_ci_update(&msgq->cmdq, msg);
cmdq              544 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_msgq_cmdq_copy_req(&msgq->cmdq, msg);
cmdq              561 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_START);
cmdq              568 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_STOP);
cmdq              573 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_FAIL);
cmdq              592 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	msgq->cmdq.addr.kva = kva;
cmdq              593 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	msgq->cmdq.addr.pa  = pa;
cmdq              607 drivers/net/ethernet/brocade/bna/bfa_msgq.c 	bfa_msgq_cmdq_attach(&msgq->cmdq, msgq);
cmdq              627 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		BFA_MSGQ_FREE_CNT(&msgq->cmdq)) {
cmdq              628 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		__cmd_copy(&msgq->cmdq, cmd);
cmdq              630 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		bfa_fsm_send_event(&msgq->cmdq, CMDQ_E_POST);
cmdq              632 drivers/net/ethernet/brocade/bna/bfa_msgq.c 		list_add_tail(&cmd->qe, &msgq->cmdq.pending_q);
cmdq              104 drivers/net/ethernet/brocade/bna/bfa_msgq.h 	struct bfa_msgq_cmdq cmdq;
cmdq              420 drivers/net/ethernet/brocade/bna/bfi.h 	struct bfi_msgq cmdq;
cmdq             1435 drivers/net/ethernet/chelsio/cxgb/sge.c 	struct cmdQ *cmdq = &sge->cmdQ[0];
cmdq             1437 drivers/net/ethernet/chelsio/cxgb/sge.c 	cmdq->processed += pr0;
cmdq             1443 drivers/net/ethernet/chelsio/cxgb/sge.c 		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
cmdq             1445 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
cmdq             1446 drivers/net/ethernet/chelsio/cxgb/sge.c 		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
cmdq             1447 drivers/net/ethernet/chelsio/cxgb/sge.c 			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
cmdq               78 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c #define cmdq_to_cmdqs(cmdq)     container_of((cmdq) - (cmdq)->cmdq_type, \
cmdq               79 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 					     struct hinic_cmdqs, cmdq[0])
cmdq              325 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static void cmdq_set_db(struct hinic_cmdq *cmdq,
cmdq              337 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
cmdq              340 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
cmdq              348 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	struct hinic_wq *wq = cmdq->wq;
cmdq              353 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_lock_bh(&cmdq->cmdq_lock);
cmdq              358 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		spin_unlock_bh(&cmdq->cmdq_lock);
cmdq              364 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	wrapped = cmdq->wrapped;
cmdq              369 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		cmdq->wrapped = !cmdq->wrapped;
cmdq              373 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->errcode[curr_prod_idx] = &errcode;
cmdq              376 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->done[curr_prod_idx] = &done;
cmdq              388 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
cmdq              390 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_unlock_bh(&cmdq->cmdq_lock);
cmdq              394 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		spin_lock_bh(&cmdq->cmdq_lock);
cmdq              396 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		if (cmdq->errcode[curr_prod_idx] == &errcode)
cmdq              397 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 			cmdq->errcode[curr_prod_idx] = NULL;
cmdq              399 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		if (cmdq->done[curr_prod_idx] == &done)
cmdq              400 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 			cmdq->done[curr_prod_idx] = NULL;
cmdq              402 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		spin_unlock_bh(&cmdq->cmdq_lock);
cmdq              421 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
cmdq              426 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	struct hinic_wq *wq = cmdq->wq;
cmdq              431 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_lock(&cmdq->cmdq_lock);
cmdq              436 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		spin_unlock(&cmdq->cmdq_lock);
cmdq              442 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	wrapped = cmdq->wrapped;
cmdq              447 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		cmdq->wrapped = !cmdq->wrapped;
cmdq              461 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
cmdq              463 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_unlock(&cmdq->cmdq_lock);
cmdq              499 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
cmdq              514 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
cmdq              523 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
cmdq              532 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
cmdq              566 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
cmdq              582 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	clear_wqe_complete_bit(cmdq, wqe);
cmdq              584 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
cmdq              588 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
cmdq              591 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	if (cmdq->errcode[prod_idx])
cmdq              592 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		*cmdq->errcode[prod_idx] = errcode;
cmdq              601 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
cmdq              606 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_lock(&cmdq->cmdq_lock);
cmdq              607 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq_update_errcode(cmdq, prod_idx, errcode);
cmdq              611 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	if (cmdq->done[prod_idx])
cmdq              612 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		complete(cmdq->done[prod_idx]);
cmdq              613 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_unlock(&cmdq->cmdq_lock);
cmdq              616 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
cmdq              631 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq_sync_cmd_handler(cmdq, ci, errcode);
cmdq              633 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	clear_wqe_complete_bit(cmdq, cmdq_wqe);
cmdq              634 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
cmdq              647 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
cmdq              655 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
cmdq              666 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 			if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
cmdq              671 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 			hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
cmdq              675 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 			if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
cmdq              697 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 				 struct hinic_cmdq *cmdq,
cmdq              702 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
cmdq              703 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	struct hinic_wq *wq = cmdq->wq;
cmdq              715 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
cmdq              727 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq_ctxt->cmdq_type  = cmdq->cmdq_type;
cmdq              739 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
cmdq              744 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->wq = wq;
cmdq              745 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->cmdq_type = q_type;
cmdq              746 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->wrapped = 1;
cmdq              748 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	spin_lock_init(&cmdq->cmdq_lock);
cmdq              750 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth));
cmdq              751 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	if (!cmdq->done)
cmdq              754 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->errcode = vzalloc(array_size(sizeof(*cmdq->errcode),
cmdq              756 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	if (!cmdq->errcode) {
cmdq              761 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	cmdq->db_base = db_area + CMDQ_DB_OFF;
cmdq              765 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	vfree(cmdq->done);
cmdq              773 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static void free_cmdq(struct hinic_cmdq *cmdq)
cmdq              775 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	vfree(cmdq->errcode);
cmdq              776 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 	vfree(cmdq->done);
cmdq              812 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		err = init_cmdq(&cmdqs->cmdq[cmdq_type],
cmdq              821 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 				     &cmdqs->cmdq[cmdq_type],
cmdq              848 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		free_cmdq(&cmdqs->cmdq[type]);
cmdq              933 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c 		free_cmdq(&cmdqs->cmdq[cmdq_type]);
cmdq              157 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h 	struct hinic_cmdq       cmdq[HINIC_MAX_CMDQ_TYPES];
cmdq              502 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	enum hinic_cmdq_type cmdq, type;
cmdq              533 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) {
cmdq              541 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 		func_to_io->cmdq_db_area[cmdq] = db_area;
cmdq              555 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	for (type = HINIC_CMDQ_SYNC; type < cmdq; type++)
cmdq              574 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	enum hinic_cmdq_type cmdq;
cmdq              578 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 	for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++)
cmdq              579 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c 		return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]);
cmdq             3229 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
cmdq             3245 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
cmdq             3247 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			cmdq->n_window * (sizeof(*txcmd) +
cmdq             3297 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
cmdq             3302 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		spin_lock_bh(&cmdq->lock);
cmdq             3303 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		ptr = cmdq->write_ptr;
cmdq             3304 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		for (i = 0; i < cmdq->n_window; i++) {
cmdq             3305 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 			u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
cmdq             3315 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 							   (u8 *)cmdq->tfds +
cmdq             3323 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 				memcpy(txcmd->data, cmdq->entries[idx].cmd,
cmdq             3330 drivers/net/wireless/intel/iwlwifi/pcie/trans.c 		spin_unlock_bh(&cmdq->lock);
cmdq              996 drivers/scsi/aacraid/aacraid.h 	struct list_head	cmdq;		/* A queue of FIBs which need to be prcessed by the FS thread. This is */
cmdq              264 drivers/scsi/aacraid/comminit.c 	INIT_LIST_HEAD(&q->cmdq);
cmdq             2178 drivers/scsi/aacraid/commsup.c 	while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
cmdq             2187 drivers/scsi/aacraid/commsup.c 		entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
cmdq              199 drivers/scsi/aacraid/dpcsup.c 		        list_add_tail(&fib->fiblink, &q->cmdq);
cmdq              310 drivers/scsi/aacraid/dpcsup.c 		list_add_tail(&fib->fiblink, &q->cmdq);
cmdq              592 drivers/scsi/bfa/bfi.h 	struct bfi_msgq_s cmdq;
cmdq             1269 drivers/staging/wlan-ng/hfa384x.h 	wait_queue_head_t cmdq;	/* wait queue itself */
cmdq              537 drivers/staging/wlan-ng/hfa384x_usb.c 	init_waitqueue_head(&hw->cmdq);
cmdq              319 drivers/staging/wlan-ng/prism2mgmt.c 	wait_event_interruptible_timeout(hw->cmdq, hw->scanflag, timeout);
cmdq             1107 drivers/staging/wlan-ng/prism2sta.c 	wake_up_interruptible(&hw->cmdq);