cq_head 452 drivers/crypto/hisilicon/qm.c if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) { cq_head 454 drivers/crypto/hisilicon/qm.c qp->qp_status.cq_head = 0; cq_head 456 drivers/crypto/hisilicon/qm.c qp->qp_status.cq_head++; cq_head 462 drivers/crypto/hisilicon/qm.c struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; cq_head 469 drivers/crypto/hisilicon/qm.c cqe = qp->cqe + qp->qp_status.cq_head; cq_head 471 drivers/crypto/hisilicon/qm.c qp->qp_status.cq_head, 0); cq_head 477 drivers/crypto/hisilicon/qm.c qp->qp_status.cq_head, 1); cq_head 648 drivers/crypto/hisilicon/qm.c qp_status->cq_head = 0; cq_head 168 drivers/crypto/hisilicon/qm.h u16 cq_head; cq_head 1094 drivers/infiniband/hw/i40iw/i40iw_uk.c u32 cq_head; cq_head 1097 drivers/infiniband/hw/i40iw/i40iw_uk.c cq_head = cq->cq_ring.head; cq_head 1101 drivers/infiniband/hw/i40iw/i40iw_uk.c cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]); cq_head 1103 drivers/infiniband/hw/i40iw/i40iw_uk.c cqe = (u64 *)&cq->cq_base[cq_head]; cq_head 1114 drivers/infiniband/hw/i40iw/i40iw_uk.c cq_head = (cq_head + 1) % cq->cq_ring.size; cq_head 1115 drivers/infiniband/hw/i40iw/i40iw_uk.c if (!cq_head) cq_head 108 drivers/infiniband/hw/i40iw/i40iw_verbs.h u16 cq_head; cq_head 961 drivers/net/ethernet/cavium/thunder/nicvf_main.c u64 cq_head; cq_head 974 drivers/net/ethernet/cavium/thunder/nicvf_main.c cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_head 978 drivers/net/ethernet/cavium/thunder/nicvf_main.c cq->cq_idx, cq_head); cq_head 58 drivers/net/ethernet/cisco/enic/vnic_cq.c unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, cq_head 70 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(cq_head, &cq->ctrl->cq_head); cq_head 87 drivers/net/ethernet/cisco/enic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_head); cq_head 35 drivers/net/ethernet/cisco/enic/vnic_cq.h u32 cq_head; /* 0x20 */ cq_head 117 drivers/net/ethernet/cisco/enic/vnic_cq.h unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, cq_head 177 drivers/nvme/host/pci.c u16 cq_head; cq_head 927 drivers/nvme/host/pci.c return (le16_to_cpu(nvmeq->cqes[nvmeq->cq_head].status) & 1) == cq_head 933 drivers/nvme/host/pci.c u16 head = nvmeq->cq_head; cq_head 988 drivers/nvme/host/pci.c if (nvmeq->cq_head == nvmeq->q_depth - 1) { cq_head 989 drivers/nvme/host/pci.c nvmeq->cq_head = 0; cq_head 992 drivers/nvme/host/pci.c nvmeq->cq_head++; cq_head 1001 drivers/nvme/host/pci.c *start = nvmeq->cq_head; cq_head 1003 drivers/nvme/host/pci.c if (tag == -1U || nvmeq->cqes[nvmeq->cq_head].command_id == tag) cq_head 1007 drivers/nvme/host/pci.c *end = nvmeq->cq_head; cq_head 1025 drivers/nvme/host/pci.c if (nvmeq->cq_head != nvmeq->last_cq_head) cq_head 1028 drivers/nvme/host/pci.c nvmeq->last_cq_head = nvmeq->cq_head; cq_head 1497 drivers/nvme/host/pci.c nvmeq->cq_head = 0; cq_head 1532 drivers/nvme/host/pci.c nvmeq->cq_head = 0; cq_head 53 drivers/scsi/fnic/vnic_cq.c unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, cq_head 65 drivers/scsi/fnic/vnic_cq.c iowrite32(cq_head, &cq->ctrl->cq_head); cq_head 80 drivers/scsi/fnic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_head); cq_head 43 drivers/scsi/fnic/vnic_cq.h u32 cq_head; /* 0x20 */ cq_head 115 drivers/scsi/fnic/vnic_cq.h unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, cq_head 54 drivers/scsi/snic/vnic_cq.c unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail, cq_head 66 drivers/scsi/snic/vnic_cq.c iowrite32(cq_head, &cq->ctrl->cq_head); cq_head 81 drivers/scsi/snic/vnic_cq.c iowrite32(0, &cq->ctrl->cq_head); cq_head 33 drivers/scsi/snic/vnic_cq.h u32 cq_head; /* 0x20 */ cq_head 105 drivers/scsi/snic/vnic_cq.h unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,