sq_tail 647 drivers/crypto/hisilicon/qm.c qp_status->sq_tail = 0; sq_tail 1089 drivers/crypto/hisilicon/qm.c u16 sq_tail = qp_status->sq_tail; sq_tail 1094 drivers/crypto/hisilicon/qm.c return qp->sqe + sq_tail * qp->qm->sqe_size; sq_tail 1360 drivers/crypto/hisilicon/qm.c u16 sq_tail = qp_status->sq_tail; sq_tail 1361 drivers/crypto/hisilicon/qm.c u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH; sq_tail 1376 drivers/crypto/hisilicon/qm.c qp_status->sq_tail = sq_tail_next; sq_tail 167 drivers/crypto/hisilicon/qm.h u16 sq_tail; sq_tail 175 drivers/nvme/host/pci.c u16 sq_tail; sq_tail 456 drivers/nvme/host/pci.c u16 next_tail = nvmeq->sq_tail + 1; sq_tail 464 drivers/nvme/host/pci.c if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, sq_tail 466 drivers/nvme/host/pci.c writel(nvmeq->sq_tail, nvmeq->q_db); sq_tail 467 drivers/nvme/host/pci.c nvmeq->last_sq_tail = nvmeq->sq_tail; sq_tail 480 drivers/nvme/host/pci.c memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes), sq_tail 482 drivers/nvme/host/pci.c if (++nvmeq->sq_tail == nvmeq->q_depth) sq_tail 483 drivers/nvme/host/pci.c nvmeq->sq_tail = 0; sq_tail 493 drivers/nvme/host/pci.c if (nvmeq->sq_tail != nvmeq->last_sq_tail) sq_tail 973 drivers/nvme/host/pci.c trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); sq_tail 1530 drivers/nvme/host/pci.c nvmeq->sq_tail = 0; sq_tail 145 drivers/nvme/host/trace.h TP_PROTO(struct request *req, __le16 sq_head, int sq_tail), sq_tail 146 drivers/nvme/host/trace.h TP_ARGS(req, sq_head, sq_tail), sq_tail 152 drivers/nvme/host/trace.h __field(u16, sq_tail) sq_tail 159 drivers/nvme/host/trace.h __entry->sq_tail = sq_tail; sq_tail 163 drivers/nvme/host/trace.h __entry->qid, __entry->sq_head, __entry->sq_tail sq_tail 375 drivers/scsi/cxlflash/main.c writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); sq_tail 383 drivers/scsi/cxlflash/main.c readq_be(&hwq->host_map->sq_tail)); sq_tail 295 drivers/scsi/cxlflash/sislite.h __be64 sq_tail; /* Submission Queue TAIL (R/W): next IOARCB */