sqe 467 drivers/crypto/hisilicon/qm.c qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head); sqe 1094 drivers/crypto/hisilicon/qm.c return qp->sqe + sq_tail * qp->qm->sqe_size; sqe 1298 drivers/crypto/hisilicon/qm.c QP_INIT_BUF(qp, sqe, qm->sqe_size * QM_Q_DEPTH); sqe 1304 drivers/crypto/hisilicon/qm.c ver, qp->sqe, (unsigned long)qp->sqe_dma, sqe 1362 drivers/crypto/hisilicon/qm.c void *sqe = qm_get_avail_sqe(qp); sqe 1369 drivers/crypto/hisilicon/qm.c if (!sqe) sqe 1372 drivers/crypto/hisilicon/qm.c memcpy(sqe, msg, qp->qm->sqe_size); sqe 174 drivers/crypto/hisilicon/qm.h int (*fill_sqe)(void *sqe, void *q_parm, void *d_parm); sqe 183 drivers/crypto/hisilicon/qm.h void *sqe; sqe 79 drivers/crypto/hisilicon/zip/zip_crypto.c static void hisi_zip_config_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) sqe 83 drivers/crypto/hisilicon/zip/zip_crypto.c val = (sqe->dw9) & ~HZIP_BUF_TYPE_M; sqe 85 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dw9 = val; sqe 88 drivers/crypto/hisilicon/zip/zip_crypto.c static void hisi_zip_config_tag(struct hisi_zip_sqe *sqe, u32 tag) sqe 90 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->tag = tag; sqe 93 drivers/crypto/hisilicon/zip/zip_crypto.c static void hisi_zip_fill_sqe(struct hisi_zip_sqe *sqe, u8 req_type, sqe 97 drivers/crypto/hisilicon/zip/zip_crypto.c memset(sqe, 0, sizeof(struct hisi_zip_sqe)); sqe 99 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->input_data_length = slen - sskip; sqe 100 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, sskip); sqe 101 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, dskip); sqe 102 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dw9 = FIELD_PREP(HZIP_REQ_TYPE_M, req_type); sqe 103 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dest_avail_out = dlen - dskip; sqe 104 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->source_addr_l = lower_32_bits(s_addr); sqe 105 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->source_addr_h = upper_32_bits(s_addr); sqe 106 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dest_addr_l = lower_32_bits(d_addr); sqe 107 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->dest_addr_h = upper_32_bits(d_addr); sqe 312 drivers/crypto/hisilicon/zip/zip_crypto.c struct hisi_zip_sqe *sqe = data; sqe 315 drivers/crypto/hisilicon/zip/zip_crypto.c struct hisi_zip_req *req = req_q->q + sqe->tag; sqe 321 drivers/crypto/hisilicon/zip/zip_crypto.c status = sqe->dw3 & HZIP_BD_STATUS_M; sqe 326 drivers/crypto/hisilicon/zip/zip_crypto.c sqe->produced); sqe 329 drivers/crypto/hisilicon/zip/zip_crypto.c dlen = sqe->produced; sqe 1601 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_send_raweth_qp1 *sqe = sqe 1604 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1605 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1606 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_size = wqe_size16 + sqe 1607 drivers/infiniband/hw/bnxt_re/qplib_fp.c ((offsetof(typeof(*sqe), data) + 15) >> 4); sqe 1608 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action); sqe 1609 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags); sqe 1610 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->length = cpu_to_le32(data_len); sqe 1611 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta & sqe 1621 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_send *sqe = (struct sq_send *)hw_sq_send_hdr; sqe 1623 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1624 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1625 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_size = wqe_size16 + sqe 1626 drivers/infiniband/hw/bnxt_re/qplib_fp.c ((offsetof(typeof(*sqe), data) + 15) >> 4); sqe 1627 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->inv_key_or_imm_data = cpu_to_le32( sqe 1631 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->q_key = cpu_to_le32(wqe->send.q_key); sqe 1632 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->dst_qp = cpu_to_le32( sqe 1634 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->length = cpu_to_le32(data_len); sqe 1635 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->avid = cpu_to_le32(wqe->send.avid & sqe 1639 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->length = cpu_to_le32(data_len); sqe 1640 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->dst_qp = 0; sqe 1641 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->avid = 0; sqe 1654 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_rdma *sqe = (struct sq_rdma *)hw_sq_send_hdr; sqe 1656 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1657 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1658 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_size = wqe_size16 + sqe 1659 drivers/infiniband/hw/bnxt_re/qplib_fp.c ((offsetof(typeof(*sqe), data) + 15) >> 4); sqe 1660 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key); sqe 1661 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->length = cpu_to_le32((u32)data_len); sqe 1662 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va); sqe 1663 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->remote_key = cpu_to_le32(wqe->rdma.r_key); sqe 1674 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_atomic *sqe = (struct sq_atomic *)hw_sq_send_hdr; sqe 1676 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1677 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1678 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->remote_key = cpu_to_le32(wqe->atomic.r_key); sqe 1679 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va); sqe 1680 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data); sqe 1681 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data); sqe 1691 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_localinvalidate *sqe = sqe 1694 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1695 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1696 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key); sqe 1702 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_fr_pmr *sqe = (struct sq_fr_pmr *)hw_sq_send_hdr; sqe 1704 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1705 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1706 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->access_cntl = wqe->frmr.access_cntl | sqe 1708 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->zero_based_page_size_log = sqe 1712 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->l_key = cpu_to_le32(wqe->frmr.l_key); sqe 1714 drivers/infiniband/hw/bnxt_re/qplib_fp.c memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length)); sqe 1715 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->numlevels_pbl_page_size_log = sqe 1726 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr); sqe 1727 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->va = cpu_to_le64(wqe->frmr.va); sqe 1733 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct sq_bind *sqe = (struct sq_bind *)hw_sq_send_hdr; sqe 1735 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->wqe_type = wqe->type; sqe 1736 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->flags = wqe->flags; sqe 1737 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->access_cntl = wqe->bind.access_cntl; sqe 1738 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->mw_type_zero_based = wqe->bind.mw_type | sqe 1740 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key); sqe 1741 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->l_key = cpu_to_le32(wqe->bind.r_key); sqe 1742 drivers/infiniband/hw/bnxt_re/qplib_fp.c sqe->va = cpu_to_le64(wqe->bind.va); sqe 1744 drivers/infiniband/hw/bnxt_re/qplib_fp.c memcpy(&sqe->length, &temp32, sizeof(wqe->bind.length)); sqe 96 drivers/infiniband/hw/cxgb4/restrack.c struct t4_swsqe *sqe) sqe 100 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode)) sqe 102 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete)) sqe 104 drivers/infiniband/hw/cxgb4/restrack.c if (sqe->complete && sqe 105 drivers/infiniband/hw/cxgb4/restrack.c rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe))) sqe 107 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled)) sqe 109 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed)) sqe 204 drivers/infiniband/sw/siw/siw.h struct siw_sqe sqe; sqe 496 drivers/infiniband/sw/siw/siw.h #define tx_type(wqe) ((wqe)->sqe.opcode) sqe 498 drivers/infiniband/sw/siw/siw.h #define tx_flags(wqe) ((wqe)->sqe.flags) sqe 538 drivers/infiniband/sw/siw/siw.h void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe); sqe 539 drivers/infiniband/sw/siw/siw.h int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, sqe 641 drivers/infiniband/sw/siw/siw.h struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; sqe 643 drivers/infiniband/sw/siw/siw.h return READ_ONCE(sqe->flags) == 0; sqe 648 drivers/infiniband/sw/siw/siw.h struct siw_sqe *sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; sqe 650 drivers/infiniband/sw/siw/siw.h if (READ_ONCE(sqe->flags) & SIW_WQE_VALID) sqe 651 drivers/infiniband/sw/siw/siw.h return sqe; sqe 271 drivers/infiniband/sw/siw/siw_mem.c if (!(wqe->sqe.flags & SIW_WQE_INLINE)) sqe 272 drivers/infiniband/sw/siw/siw_mem.c siw_unref_mem_sgl(wqe->mem, wqe->sqe.num_sge); sqe 275 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.flags = 0; sqe 276 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.num_sge = 1; sqe 277 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].length = 0; sqe 278 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].laddr = 0; sqe 279 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].lkey = 0; sqe 284 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.rkey = 1; sqe 285 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.raddr = 0; sqe 289 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.opcode = SIW_OP_WRITE; sqe 293 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.opcode = SIW_OP_READ; sqe 299 drivers/infiniband/sw/siw/siw_qp.c siw_read_to_orq(rreq, &wqe->sqe); sqe 479 drivers/infiniband/sw/siw/siw_qp.c rreq->ddp_msn = htonl(wqe->sqe.sge[0].length); sqe 482 drivers/infiniband/sw/siw/siw_qp.c rreq->sink_stag = htonl(wqe->sqe.rkey); sqe 483 drivers/infiniband/sw/siw/siw_qp.c rreq->sink_to = cpu_to_be64(wqe->sqe.raddr); sqe 484 drivers/infiniband/sw/siw/siw_qp.c rreq->read_size = htonl(wqe->sqe.sge[0].length); sqe 485 drivers/infiniband/sw/siw/siw_qp.c rreq->source_stag = htonl(wqe->sqe.sge[0].lkey); sqe 487 drivers/infiniband/sw/siw/siw_qp.c cpu_to_be64(wqe->sqe.sge[0].laddr); sqe 868 drivers/infiniband/sw/siw/siw_qp.c void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe) sqe 870 drivers/infiniband/sw/siw/siw_qp.c rreq->id = sqe->id; sqe 871 drivers/infiniband/sw/siw/siw_qp.c rreq->opcode = sqe->opcode; sqe 872 drivers/infiniband/sw/siw/siw_qp.c rreq->sge[0].laddr = sqe->sge[0].laddr; sqe 873 drivers/infiniband/sw/siw/siw_qp.c rreq->sge[0].length = sqe->sge[0].length; sqe 874 drivers/infiniband/sw/siw/siw_qp.c rreq->sge[0].lkey = sqe->sge[0].lkey; sqe 875 drivers/infiniband/sw/siw/siw_qp.c rreq->sge[1].lkey = sqe->sge[1].lkey; sqe 876 drivers/infiniband/sw/siw/siw_qp.c rreq->flags = sqe->flags | SIW_WQE_VALID; sqe 888 drivers/infiniband/sw/siw/siw_qp.c struct siw_sqe *irqe, *sqe; sqe 895 drivers/infiniband/sw/siw/siw_qp.c sqe = sq_get_next(qp); sqe 901 drivers/infiniband/sw/siw/siw_qp.c if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) { sqe 909 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.opcode = SIW_OP_READ_RESPONSE; sqe 910 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.flags = 0; sqe 912 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.num_sge = 1; sqe 913 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].length = irqe->sge[0].length; sqe 914 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].laddr = irqe->sge[0].laddr; sqe 915 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].lkey = irqe->sge[0].lkey; sqe 917 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.num_sge = 0; sqe 923 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[1].length = irqe->sge[1].length; sqe 925 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.rkey = irqe->rkey; sqe 926 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.raddr = irqe->raddr; sqe 936 drivers/infiniband/sw/siw/siw_qp.c sqe = sq_get_next(qp); sqe 937 drivers/infiniband/sw/siw/siw_qp.c if (sqe) { sqe 943 drivers/infiniband/sw/siw/siw_qp.c memcpy(&wqe->sqe, sqe, sizeof(*sqe)); sqe 945 drivers/infiniband/sw/siw/siw_qp.c if (wqe->sqe.opcode >= SIW_NUM_OPCODES) { sqe 949 drivers/infiniband/sw/siw/siw_qp.c if (wqe->sqe.flags & SIW_WQE_INLINE) { sqe 950 drivers/infiniband/sw/siw/siw_qp.c if (wqe->sqe.opcode != SIW_OP_SEND && sqe 951 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.opcode != SIW_OP_WRITE) { sqe 955 drivers/infiniband/sw/siw/siw_qp.c if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) { sqe 959 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1]; sqe 960 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.sge[0].lkey = 0; sqe 961 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.num_sge = 1; sqe 963 drivers/infiniband/sw/siw/siw_qp.c if (wqe->sqe.flags & SIW_WQE_READ_FENCE) { sqe 965 drivers/infiniband/sw/siw/siw_qp.c if (unlikely(wqe->sqe.opcode == SIW_OP_READ || sqe 966 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.opcode == sqe 980 drivers/infiniband/sw/siw/siw_qp.c } else if (wqe->sqe.opcode == SIW_OP_READ || sqe 981 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) { sqe 984 drivers/infiniband/sw/siw/siw_qp.c wqe->sqe.num_sge = 1; sqe 994 drivers/infiniband/sw/siw/siw_qp.c siw_read_to_orq(rreq, &wqe->sqe); sqe 1004 drivers/infiniband/sw/siw/siw_qp.c smp_store_mb(sqe->flags, 0); sqe 1047 drivers/infiniband/sw/siw/siw_qp.c int siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, sqe 1054 drivers/infiniband/sw/siw/siw_qp.c u32 sqe_flags = sqe->flags; sqe 1067 drivers/infiniband/sw/siw/siw_qp.c cqe->id = sqe->id; sqe 1068 drivers/infiniband/sw/siw/siw_qp.c cqe->opcode = sqe->opcode; sqe 1081 drivers/infiniband/sw/siw/siw_qp.c smp_store_mb(sqe->flags, 0); sqe 1100 drivers/infiniband/sw/siw/siw_qp.c smp_store_mb(sqe->flags, 0); sqe 1177 drivers/infiniband/sw/siw/siw_qp.c struct siw_sqe *sqe; sqe 1185 drivers/infiniband/sw/siw/siw_qp.c sqe = &qp->orq[qp->orq_get % qp->attrs.orq_size]; sqe 1186 drivers/infiniband/sw/siw/siw_qp.c if (!READ_ONCE(sqe->flags)) sqe 1189 drivers/infiniband/sw/siw/siw_qp.c if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0) sqe 1192 drivers/infiniband/sw/siw/siw_qp.c WRITE_ONCE(sqe->flags, 0); sqe 1212 drivers/infiniband/sw/siw/siw_qp.c siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, sqe 1221 drivers/infiniband/sw/siw/siw_qp.c sqe = &qp->sendq[qp->sq_get % qp->attrs.sq_size]; sqe 1222 drivers/infiniband/sw/siw/siw_qp.c if (!READ_ONCE(sqe->flags)) sqe 1226 drivers/infiniband/sw/siw/siw_qp.c if (siw_sqe_complete(qp, sqe, 0, SIW_WC_WR_FLUSH_ERR) != 0) sqe 1233 drivers/infiniband/sw/siw/siw_qp.c WRITE_ONCE(sqe->flags, 0); sqe 1270 drivers/infiniband/sw/siw/siw_qp.c siw_sqe_complete(qp, &wqe->sqe, 0, SIW_WC_WR_FLUSH_ERR); sqe 175 drivers/infiniband/sw/siw/siw_qp_rx.c srx->ddp_stag = wqe->sqe.sge[0].lkey; sqe 176 drivers/infiniband/sw/siw/siw_qp_rx.c srx->ddp_to = wqe->sqe.sge[0].laddr; sqe 690 drivers/infiniband/sw/siw/siw_qp_rx.c resp = &tx_work->sqe; sqe 749 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.id = orqe->id; sqe 750 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.opcode = orqe->opcode; sqe 751 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.sge[0].laddr = orqe->sge[0].laddr; sqe 752 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.sge[0].lkey = orqe->sge[0].lkey; sqe 753 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.sge[0].length = orqe->sge[0].length; sqe 754 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.flags = orqe->flags; sqe 755 drivers/infiniband/sw/siw/siw_qp_rx.c wqe->sqe.num_sge = 1; sqe 789 drivers/infiniband/sw/siw/siw_qp_rx.c qp_id(qp), wqe->wr_status, wqe->sqe.opcode); sqe 818 drivers/infiniband/sw/siw/siw_qp_rx.c sge = wqe->sqe.sge; /* there is only one */ sqe 1160 drivers/infiniband/sw/siw/siw_qp_rx.c if (tx_waiting->sqe.opcode == SIW_OP_READ || sqe 1161 drivers/infiniband/sw/siw/siw_qp_rx.c tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { sqe 1168 drivers/infiniband/sw/siw/siw_qp_rx.c siw_read_to_orq(rreq, &tx_waiting->sqe); sqe 1272 drivers/infiniband/sw/siw/siw_qp_rx.c rv = siw_invalidate_stag(qp->pd, wqe->sqe.sge[0].lkey); sqe 1287 drivers/infiniband/sw/siw/siw_qp_rx.c if ((wqe->sqe.flags & SIW_WQE_SIGNALLED) || error != 0) sqe 1288 drivers/infiniband/sw/siw/siw_qp_rx.c rv = siw_sqe_complete(qp, &wqe->sqe, wqe->processed, sqe 43 drivers/infiniband/sw/siw/siw_qp_tx.c struct siw_sge *sge = &wqe->sqe.sge[0]; sqe 46 drivers/infiniband/sw/siw/siw_qp_tx.c if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) sqe 53 drivers/infiniband/sw/siw/siw_qp_tx.c memcpy(paddr, &wqe->sqe.sge[1], bytes); sqe 137 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); sqe 139 drivers/infiniband/sw/siw/siw_qp_tx.c cpu_to_be64(wqe->sqe.sge[0].laddr); sqe 140 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); sqe 141 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); sqe 142 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); sqe 185 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); sqe 197 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); sqe 198 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr); sqe 211 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); sqe 212 drivers/infiniband/sw/siw/siw_qp_tx.c c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr); sqe 246 drivers/infiniband/sw/siw/siw_qp_tx.c cpu_to_be64(wqe->sqe.raddr); sqe 427 drivers/infiniband/sw/siw/siw_qp_tx.c struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx]; sqe 582 drivers/infiniband/sw/siw/siw_qp_tx.c rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx], sqe 640 drivers/infiniband/sw/siw/siw_qp_tx.c sge = &wqe->sqe.sge[c_tx->sge_idx]; sqe 710 drivers/infiniband/sw/siw/siw_qp_tx.c cpu_to_be64(wqe->sqe.raddr + wqe->processed); sqe 754 drivers/infiniband/sw/siw/siw_qp_tx.c struct siw_sge *sge = &wqe->sqe.sge[0]; sqe 755 drivers/infiniband/sw/siw/siw_qp_tx.c int i, len, num_sge = wqe->sqe.num_sge; sqe 795 drivers/infiniband/sw/siw/siw_qp_tx.c if (!(wqe->sqe.flags & SIW_WQE_INLINE)) { sqe 797 drivers/infiniband/sw/siw/siw_qp_tx.c wqe->sqe.num_sge = 1; sqe 819 drivers/infiniband/sw/siw/siw_qp_tx.c wqe->bytes = wqe->sqe.sge[0].length; sqe 825 drivers/infiniband/sw/siw/siw_qp_tx.c wqe->sqe.sge[0].laddr = sqe 826 drivers/infiniband/sw/siw/siw_qp_tx.c (u64)(uintptr_t)&wqe->sqe.sge[1]; sqe 848 drivers/infiniband/sw/siw/siw_qp_tx.c wqe->sqe.id); sqe 919 drivers/infiniband/sw/siw/siw_qp_tx.c static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) sqe 921 drivers/infiniband/sw/siw/siw_qp_tx.c struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; sqe 926 drivers/infiniband/sw/siw/siw_qp_tx.c siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); sqe 929 drivers/infiniband/sw/siw/siw_qp_tx.c pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); sqe 933 drivers/infiniband/sw/siw/siw_qp_tx.c if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { sqe 934 drivers/infiniband/sw/siw/siw_qp_tx.c pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); sqe 938 drivers/infiniband/sw/siw/siw_qp_tx.c mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); sqe 940 drivers/infiniband/sw/siw/siw_qp_tx.c pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); sqe 950 drivers/infiniband/sw/siw/siw_qp_tx.c pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey); sqe 955 drivers/infiniband/sw/siw/siw_qp_tx.c mem->stag = sqe->rkey; sqe 956 drivers/infiniband/sw/siw/siw_qp_tx.c mem->perms = sqe->access; sqe 958 drivers/infiniband/sw/siw/siw_qp_tx.c siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey); sqe 972 drivers/infiniband/sw/siw/siw_qp_tx.c rv = siw_fastreg_mr(qp->pd, &wqe->sqe); sqe 976 drivers/infiniband/sw/siw/siw_qp_tx.c rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey); sqe 1050 drivers/infiniband/sw/siw/siw_qp_tx.c siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, sqe 1135 drivers/infiniband/sw/siw/siw_qp_tx.c siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, sqe 657 drivers/infiniband/sw/siw/siw_verbs.c struct siw_sqe *sqe) sqe 660 drivers/infiniband/sw/siw/siw_verbs.c void *kbuf = &sqe->sge[1]; sqe 663 drivers/infiniband/sw/siw/siw_verbs.c sqe->sge[0].laddr = (uintptr_t)kbuf; sqe 664 drivers/infiniband/sw/siw/siw_verbs.c sqe->sge[0].lkey = 0; sqe 682 drivers/infiniband/sw/siw/siw_verbs.c sqe->sge[0].length = bytes > 0 ? bytes : 0; sqe 683 drivers/infiniband/sw/siw/siw_verbs.c sqe->num_sge = bytes > 0 ? 1 : 0; sqe 692 drivers/infiniband/sw/siw/siw_verbs.c struct siw_sqe sqe = {}; sqe 696 drivers/infiniband/sw/siw/siw_verbs.c sqe.id = wr->wr_id; sqe 697 drivers/infiniband/sw/siw/siw_verbs.c sqe.opcode = wr->opcode; sqe 698 drivers/infiniband/sw/siw/siw_verbs.c rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR); sqe 800 drivers/infiniband/sw/siw/siw_verbs.c struct siw_sqe *sqe = &qp->sendq[idx]; sqe 802 drivers/infiniband/sw/siw/siw_verbs.c if (sqe->flags) { sqe 812 drivers/infiniband/sw/siw/siw_verbs.c sqe->id = wr->wr_id; sqe 816 drivers/infiniband/sw/siw/siw_verbs.c sqe->flags |= SIW_WQE_SIGNALLED; sqe 819 drivers/infiniband/sw/siw/siw_verbs.c sqe->flags |= SIW_WQE_READ_FENCE; sqe 825 drivers/infiniband/sw/siw/siw_verbs.c sqe->flags |= SIW_WQE_SOLICITED; sqe 828 drivers/infiniband/sw/siw/siw_verbs.c siw_copy_sgl(wr->sg_list, sqe->sge, sqe 830 drivers/infiniband/sw/siw/siw_verbs.c sqe->num_sge = wr->num_sge; sqe 832 drivers/infiniband/sw/siw/siw_verbs.c rv = siw_copy_inline_sgl(wr, sqe); sqe 837 drivers/infiniband/sw/siw/siw_verbs.c sqe->flags |= SIW_WQE_INLINE; sqe 838 drivers/infiniband/sw/siw/siw_verbs.c sqe->num_sge = 1; sqe 841 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_SEND; sqe 843 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_SEND_REMOTE_INV; sqe 844 drivers/infiniband/sw/siw/siw_verbs.c sqe->rkey = wr->ex.invalidate_rkey; sqe 861 drivers/infiniband/sw/siw/siw_verbs.c siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1); sqe 865 drivers/infiniband/sw/siw/siw_verbs.c sqe->raddr = rdma_wr(wr)->remote_addr; sqe 866 drivers/infiniband/sw/siw/siw_verbs.c sqe->rkey = rdma_wr(wr)->rkey; sqe 867 drivers/infiniband/sw/siw/siw_verbs.c sqe->num_sge = 1; sqe 870 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_READ; sqe 872 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_READ_LOCAL_INV; sqe 877 drivers/infiniband/sw/siw/siw_verbs.c siw_copy_sgl(wr->sg_list, &sqe->sge[0], sqe 879 drivers/infiniband/sw/siw/siw_verbs.c sqe->num_sge = wr->num_sge; sqe 881 drivers/infiniband/sw/siw/siw_verbs.c rv = siw_copy_inline_sgl(wr, sqe); sqe 886 drivers/infiniband/sw/siw/siw_verbs.c sqe->flags |= SIW_WQE_INLINE; sqe 887 drivers/infiniband/sw/siw/siw_verbs.c sqe->num_sge = 1; sqe 889 drivers/infiniband/sw/siw/siw_verbs.c sqe->raddr = rdma_wr(wr)->remote_addr; sqe 890 drivers/infiniband/sw/siw/siw_verbs.c sqe->rkey = rdma_wr(wr)->rkey; sqe 891 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_WRITE; sqe 895 drivers/infiniband/sw/siw/siw_verbs.c sqe->base_mr = (uintptr_t)reg_wr(wr)->mr; sqe 896 drivers/infiniband/sw/siw/siw_verbs.c sqe->rkey = reg_wr(wr)->key; sqe 897 drivers/infiniband/sw/siw/siw_verbs.c sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK; sqe 898 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_REG_MR; sqe 902 drivers/infiniband/sw/siw/siw_verbs.c sqe->rkey = wr->ex.invalidate_rkey; sqe 903 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode = SIW_OP_INVAL_STAG; sqe 913 drivers/infiniband/sw/siw/siw_verbs.c sqe->opcode, sqe->flags, sqe 914 drivers/infiniband/sw/siw/siw_verbs.c (void *)(uintptr_t)sqe->id); sqe 921 drivers/infiniband/sw/siw/siw_verbs.c sqe->flags |= SIW_WQE_VALID; sqe 3105 drivers/net/ethernet/broadcom/cnic_defs.h struct fcoe_sqe sqe; sqe 1562 drivers/nvme/host/fc.c struct nvme_command *sqe = &op->cmd_iu.sqe; sqe 1666 drivers/nvme/host/fc.c sqe->common.command_id != cqe->command_id)) { sqe 1676 drivers/nvme/host/fc.c sqe->common.command_id, sqe 1785 drivers/nvme/host/fc.c struct nvme_command *sqe; sqe 1797 drivers/nvme/host/fc.c sqe = &cmdiu->sqe; sqe 1809 drivers/nvme/host/fc.c memset(sqe, 0, sizeof(*sqe)); sqe 1810 drivers/nvme/host/fc.c sqe->common.opcode = nvme_admin_async_event; sqe 1812 drivers/nvme/host/fc.c sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; sqe 2212 drivers/nvme/host/fc.c struct nvme_command *sqe = &cmdiu->sqe; sqe 2250 drivers/nvme/host/fc.c WARN_ON_ONCE(sqe->common.metadata); sqe 2251 drivers/nvme/host/fc.c sqe->common.flags |= NVME_CMD_SGL_METABUF; sqe 2260 drivers/nvme/host/fc.c sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | sqe 2262 drivers/nvme/host/fc.c sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); sqe 2263 drivers/nvme/host/fc.c sqe->rw.dptr.sgl.addr = 0; sqe 2330 drivers/nvme/host/fc.c struct nvme_command *sqe = &cmdiu->sqe; sqe 2340 drivers/nvme/host/fc.c ret = nvme_setup_cmd(ns, rq, sqe); sqe 55 drivers/nvme/host/rdma.c struct nvme_rdma_qe sqe; sqe 284 drivers/nvme/host/rdma.c kfree(req->sqe.data); sqe 297 drivers/nvme/host/rdma.c req->sqe.data = kzalloc(sizeof(struct nvme_command), GFP_KERNEL); sqe 298 drivers/nvme/host/rdma.c if (!req->sqe.data) sqe 1330 drivers/nvme/host/rdma.c container_of(qe, struct nvme_rdma_request, sqe); sqe 1419 drivers/nvme/host/rdma.c struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; sqe 1420 drivers/nvme/host/rdma.c struct nvme_command *cmd = sqe->data; sqe 1424 drivers/nvme/host/rdma.c ib_dma_sync_single_for_cpu(dev, sqe->dma, sizeof(*cmd), DMA_TO_DEVICE); sqe 1432 drivers/nvme/host/rdma.c sqe->cqe.done = nvme_rdma_async_done; sqe 1434 drivers/nvme/host/rdma.c ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd), sqe 1437 drivers/nvme/host/rdma.c ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL); sqe 1739 drivers/nvme/host/rdma.c struct nvme_rdma_qe *sqe = &req->sqe; sqe 1740 drivers/nvme/host/rdma.c struct nvme_command *c = sqe->data; sqe 1753 drivers/nvme/host/rdma.c req->sqe.dma = ib_dma_map_single(dev, req->sqe.data, sqe 1756 drivers/nvme/host/rdma.c err = ib_dma_mapping_error(dev, req->sqe.dma); sqe 1760 drivers/nvme/host/rdma.c ib_dma_sync_single_for_cpu(dev, sqe->dma, sqe 1777 drivers/nvme/host/rdma.c sqe->cqe.done = nvme_rdma_send_done; sqe 1779 drivers/nvme/host/rdma.c ib_dma_sync_single_for_device(dev, sqe->dma, sqe 1782 drivers/nvme/host/rdma.c err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, sqe 1797 drivers/nvme/host/rdma.c ib_dma_unmap_single(dev, req->sqe.dma, sizeof(struct nvme_command), sqe 1816 drivers/nvme/host/rdma.c ib_dma_unmap_single(ibdev, req->sqe.dma, sizeof(struct nvme_command), sqe 1777 drivers/nvme/target/fc.c struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; sqe 1809 drivers/nvme/target/fc.c nvme_is_fabrics((struct nvme_command *) sqe) || sqe 1812 drivers/nvme/target/fc.c (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || sqe 2083 drivers/nvme/target/fc.c struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; sqe 2107 drivers/nvme/target/fc.c cqe->command_id = sqe->command_id; sqe 2174 drivers/nvme/target/fc.c if (!nvme_is_write(&cmdiu->sqe)) sqe 2178 drivers/nvme/target/fc.c if (nvme_is_write(&cmdiu->sqe)) sqe 2186 drivers/nvme/target/fc.c fod->req.cmd = &fod->cmdiubuf.sqe; sqe 1377 drivers/scsi/bnx2fc/bnx2fc_hwi.c struct fcoe_sqe *sqe; sqe 1379 drivers/scsi/bnx2fc/bnx2fc_hwi.c sqe = &tgt->sq[tgt->sq_prod_idx]; sqe 1382 drivers/scsi/bnx2fc/bnx2fc_hwi.c sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; sqe 1383 drivers/scsi/bnx2fc/bnx2fc_hwi.c sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; sqe 634 drivers/scsi/bnx2i/bnx2i.h struct sqe *sq_virt; sqe 638 drivers/scsi/bnx2i/bnx2i.h struct sqe *sq_prod_qe; sqe 639 drivers/scsi/bnx2i/bnx2i.h struct sqe *sq_cons_qe; sqe 640 drivers/scsi/bnx2i/bnx2i.h struct sqe *sq_first_qe; sqe 641 drivers/scsi/bnx2i/bnx2i.h struct sqe *sq_last_qe; sqe 1038 drivers/scsi/lpfc/lpfc_nvme.c cid = cp->sqe.common.command_id; sqe 1511 drivers/scsi/lpfc/lpfc_nvme.c struct nvme_common_command *sqe; sqe 1601 drivers/scsi/lpfc/lpfc_nvme.c sqe = &((struct nvme_fc_cmd_iu *) sqe 1602 drivers/scsi/lpfc/lpfc_nvme.c pnvme_fcreq->cmdaddr)->sqe.common; sqe 1603 drivers/scsi/lpfc/lpfc_nvme.c if (sqe->opcode == nvme_admin_keep_alive) sqe 13 drivers/scsi/qedf/drv_fcoe_fw_funcs.c memset(task_params->sqe, 0, sizeof(*(task_params->sqe))); sqe 14 drivers/scsi/qedf/drv_fcoe_fw_funcs.c SET_FIELD(task_params->sqe->flags, FCOE_WQE_REQ_TYPE, sqe 16 drivers/scsi/qedf/drv_fcoe_fw_funcs.c task_params->sqe->task_id = task_params->itid; sqe 167 drivers/scsi/qedf/drv_fcoe_fw_funcs.c task_params->sqe->additional_info_union.burst_length = sqe 169 drivers/scsi/qedf/drv_fcoe_fw_funcs.c SET_FIELD(task_params->sqe->flags, sqe 171 drivers/scsi/qedf/drv_fcoe_fw_funcs.c SET_FIELD(task_params->sqe->flags, FCOE_WQE_SGL_MODE, sqe 193 drivers/scsi/qedf/drv_fcoe_fw_funcs.c task_params->sqe->additional_info_union.seq_rec_updated_offset = sqe 16 drivers/scsi/qedf/drv_fcoe_fw_funcs.h struct fcoe_wqe *sqe; sqe 503 drivers/scsi/qedf/qedf.h struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe); sqe 23 drivers/scsi/qedf/qedf_els.c struct fcoe_wqe *sqe; sqe 120 drivers/scsi/qedf/qedf_els.c sqe = &fcport->sq[sqe_idx]; sqe 121 drivers/scsi/qedf/qedf_els.c memset(sqe, 0, sizeof(struct fcoe_wqe)); sqe 125 drivers/scsi/qedf/qedf_els.c qedf_init_mp_task(els_req, task, sqe); sqe 687 drivers/scsi/qedf/qedf_els.c struct fcoe_wqe *sqe; sqe 717 drivers/scsi/qedf/qedf_els.c sqe = &fcport->sq[sqe_idx]; sqe 718 drivers/scsi/qedf/qedf_els.c memset(sqe, 0, sizeof(struct fcoe_wqe)); sqe 719 drivers/scsi/qedf/qedf_els.c orig_io_req->task_params->sqe = sqe; sqe 596 drivers/scsi/qedf/qedf_io.c struct fcoe_wqe *sqe) sqe 632 drivers/scsi/qedf/qedf_io.c io_req->task_params->sqe = sqe; sqe 685 drivers/scsi/qedf/qedf_io.c struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe) sqe 711 drivers/scsi/qedf/qedf_io.c io_req->task_params->sqe = sqe; sqe 864 drivers/scsi/qedf/qedf_io.c struct fcoe_wqe *sqe; sqe 913 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sqe 914 drivers/scsi/qedf/qedf_io.c memset(sqe, 0, sizeof(struct fcoe_wqe)); sqe 927 drivers/scsi/qedf/qedf_io.c qedf_init_task(fcport, lport, io_req, task_ctx, sqe); sqe 1841 drivers/scsi/qedf/qedf_io.c struct fcoe_wqe *sqe; sqe 1920 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sqe 1921 drivers/scsi/qedf/qedf_io.c memset(sqe, 0, sizeof(struct fcoe_wqe)); sqe 1922 drivers/scsi/qedf/qedf_io.c io_req->task_params->sqe = sqe; sqe 2139 drivers/scsi/qedf/qedf_io.c struct fcoe_wqe *sqe; sqe 2206 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sqe 2207 drivers/scsi/qedf/qedf_io.c memset(sqe, 0, sizeof(struct fcoe_wqe)); sqe 2208 drivers/scsi/qedf/qedf_io.c io_req->task_params->sqe = sqe; sqe 2280 drivers/scsi/qedf/qedf_io.c struct fcoe_wqe *sqe; sqe 2337 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sqe 2338 drivers/scsi/qedf/qedf_io.c memset(sqe, 0, sizeof(struct fcoe_wqe)); sqe 2340 drivers/scsi/qedf/qedf_io.c qedf_init_task(fcport, lport, io_req, task, sqe); sqe 1089 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 1091 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 1163 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 1164 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 1534 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 1536 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 1674 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 1676 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 1791 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 1793 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 2153 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 2164 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 2208 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sqe 2209 drivers/scsi/qedi/qedi_fw.c memset(task_params.sqe, 0, sizeof(struct iscsi_wqe)); sqe 98 drivers/scsi/qedi/qedi_fw_api.c if (!task_params->sqe) sqe 101 drivers/scsi/qedi/qedi_fw_api.c memset(task_params->sqe, 0, sizeof(*task_params->sqe)); sqe 102 drivers/scsi/qedi/qedi_fw_api.c task_params->sqe->task_id = cpu_to_le16(task_params->itid); sqe 104 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, sqe 115 drivers/scsi/qedi/qedi_fw_api.c init_dif_context_flags(&task_params->sqe->prot_flags, sqe 118 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, sqe 134 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, sqe 136 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->contlen_cdbsize, ISCSI_WQE_CONT_LEN, sqe 141 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->contlen_cdbsize, sqe 147 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, sqe 152 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->contlen_cdbsize, sqe 162 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, sqe 165 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_WQE_TYPE, sqe 178 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_RESPONSE, sqe 182 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->contlen_cdbsize, sqe 187 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, sqe 190 drivers/scsi/qedi/qedi_fw_api.c SET_FIELD(task_params->sqe->flags, ISCSI_WQE_NUM_SGES, sqe 14 drivers/scsi/qedi/qedi_fw_iscsi.h struct iscsi_wqe *sqe; sqe 403 drivers/scsi/qla2xxx/qla_nvme.c if (cmd->sqe.common.opcode == nvme_admin_async_event) { sqe 273 fs/io_uring.c const struct io_uring_sqe *sqe; sqe 497 fs/io_uring.c if (req->submit.sqe) { sqe 498 fs/io_uring.c switch (req->submit.sqe->opcode) { sqe 1078 fs/io_uring.c const struct io_uring_sqe *sqe = s->sqe; sqe 1102 fs/io_uring.c kiocb->ki_pos = READ_ONCE(sqe->off); sqe 1106 fs/io_uring.c ioprio = READ_ONCE(sqe->ioprio); sqe 1116 fs/io_uring.c ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); sqe 1166 fs/io_uring.c const struct io_uring_sqe *sqe, sqe 1169 fs/io_uring.c size_t len = READ_ONCE(sqe->len); sqe 1179 fs/io_uring.c buf_index = READ_ONCE(sqe->buf_index); sqe 1185 fs/io_uring.c buf_addr = READ_ONCE(sqe->addr); sqe 1243 fs/io_uring.c const struct io_uring_sqe *sqe = s->sqe; sqe 1244 fs/io_uring.c void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); sqe 1245 fs/io_uring.c size_t sqe_len = READ_ONCE(sqe->len); sqe 1256 fs/io_uring.c opcode = READ_ONCE(sqe->opcode); sqe 1259 fs/io_uring.c ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); sqe 1556 fs/io_uring.c static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) sqe 1565 fs/io_uring.c if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) sqe 1571 fs/io_uring.c static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, sqe 1574 fs/io_uring.c loff_t sqe_off = READ_ONCE(sqe->off); sqe 1575 fs/io_uring.c loff_t sqe_len = READ_ONCE(sqe->len); sqe 1580 fs/io_uring.c fsync_flags = READ_ONCE(sqe->fsync_flags); sqe 1584 fs/io_uring.c ret = io_prep_fsync(req, sqe); sqe 1598 fs/io_uring.c io_cqring_add_event(req->ctx, sqe->user_data, ret); sqe 1603 fs/io_uring.c static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) sqe 1613 fs/io_uring.c if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) sqe 1620 fs/io_uring.c const struct io_uring_sqe *sqe, sqe 1628 fs/io_uring.c ret = io_prep_sfr(req, sqe); sqe 1636 fs/io_uring.c sqe_off = READ_ONCE(sqe->off); sqe 1637 fs/io_uring.c sqe_len = READ_ONCE(sqe->len); sqe 1638 fs/io_uring.c flags = READ_ONCE(sqe->sync_range_flags); sqe 1644 fs/io_uring.c io_cqring_add_event(req->ctx, sqe->user_data, ret); sqe 1650 fs/io_uring.c static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sqe 1666 fs/io_uring.c flags = READ_ONCE(sqe->msg_flags); sqe 1678 fs/io_uring.c READ_ONCE(sqe->addr); sqe 1697 fs/io_uring.c io_cqring_add_event(req->ctx, sqe->user_data, ret); sqe 1703 fs/io_uring.c static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sqe 1707 fs/io_uring.c return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock); sqe 1713 fs/io_uring.c static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, sqe 1717 fs/io_uring.c return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock); sqe 1754 fs/io_uring.c static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) sqe 1762 fs/io_uring.c if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || sqe 1763 fs/io_uring.c sqe->poll_events) sqe 1768 fs/io_uring.c if (READ_ONCE(sqe->addr) == poll_req->user_data) { sqe 1776 fs/io_uring.c io_cqring_add_event(req->ctx, sqe->user_data, ret); sqe 1877 fs/io_uring.c static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) sqe 1888 fs/io_uring.c if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) sqe 1893 fs/io_uring.c req->submit.sqe = NULL; sqe 1895 fs/io_uring.c events = READ_ONCE(sqe->poll_events); sqe 1977 fs/io_uring.c static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) sqe 1987 fs/io_uring.c if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || sqe 1988 fs/io_uring.c sqe->len != 1) sqe 1991 fs/io_uring.c if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) sqe 2001 fs/io_uring.c count = READ_ONCE(sqe->off); sqe 2083 fs/io_uring.c memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy)); sqe 2084 fs/io_uring.c req->submit.sqe = sqe_copy; sqe 2097 fs/io_uring.c req->user_data = READ_ONCE(s->sqe->user_data); sqe 2102 fs/io_uring.c opcode = READ_ONCE(s->sqe->opcode); sqe 2108 fs/io_uring.c if (unlikely(s->sqe->buf_index)) sqe 2113 fs/io_uring.c if (unlikely(s->sqe->buf_index)) sqe 2124 fs/io_uring.c ret = io_fsync(req, s->sqe, force_nonblock); sqe 2127 fs/io_uring.c ret = io_poll_add(req, s->sqe); sqe 2130 fs/io_uring.c ret = io_poll_remove(req, s->sqe); sqe 2133 fs/io_uring.c ret = io_sync_file_range(req, s->sqe, force_nonblock); sqe 2136 fs/io_uring.c ret = io_sendmsg(req, s->sqe, force_nonblock); sqe 2139 fs/io_uring.c ret = io_recvmsg(req, s->sqe, force_nonblock); sqe 2142 fs/io_uring.c ret = io_timeout(req, s->sqe); sqe 2168 fs/io_uring.c const struct io_uring_sqe *sqe) sqe 2170 fs/io_uring.c switch (sqe->opcode) { sqe 2182 fs/io_uring.c static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe) sqe 2184 fs/io_uring.c u8 opcode = READ_ONCE(sqe->opcode); sqe 2203 fs/io_uring.c async_list = io_async_list_from_sqe(ctx, req->submit.sqe); sqe 2207 fs/io_uring.c const struct io_uring_sqe *sqe = s->sqe; sqe 2223 fs/io_uring.c if (io_sqe_needs_user(sqe) && !cur_mm) { sqe 2255 fs/io_uring.c io_cqring_add_event(ctx, sqe->user_data, ret); sqe 2260 fs/io_uring.c kfree(sqe); sqe 2358 fs/io_uring.c static bool io_op_needs_file(const struct io_uring_sqe *sqe) sqe 2360 fs/io_uring.c int op = READ_ONCE(sqe->opcode); sqe 2378 fs/io_uring.c flags = READ_ONCE(s->sqe->flags); sqe 2379 fs/io_uring.c fd = READ_ONCE(s->sqe->fd); sqe 2390 fs/io_uring.c if (!io_op_needs_file(s->sqe)) sqe 2425 fs/io_uring.c sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); sqe 2429 fs/io_uring.c s->sqe = sqe_copy; sqe 2431 fs/io_uring.c list = io_async_list_from_sqe(ctx, s->sqe); sqe 2470 fs/io_uring.c io_cqring_add_event(ctx, s->sqe->user_data, ret); sqe 2498 fs/io_uring.c io_cqring_add_event(ctx, s->sqe->user_data, ret); sqe 2530 fs/io_uring.c if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) { sqe 2546 fs/io_uring.c io_cqring_add_event(ctx, s->sqe->user_data, ret); sqe 2550 fs/io_uring.c req->user_data = s->sqe->user_data; sqe 2553 fs/io_uring.c switch (READ_ONCE(s->sqe->opcode)) { sqe 2579 fs/io_uring.c sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); sqe 2585 fs/io_uring.c s->sqe = sqe_copy; sqe 2588 fs/io_uring.c } else if (s->sqe->flags & IOSQE_IO_LINK) { sqe 2667 fs/io_uring.c s->sqe = &ctx->sq_sqes[head]; sqe 2709 fs/io_uring.c prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; sqe 2711 fs/io_uring.c if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { sqe 2724 fs/io_uring.c io_cqring_add_event(ctx, s.sqe->user_data, sqe 2903 fs/io_uring.c prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; sqe 2905 fs/io_uring.c if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { sqe 32 include/linux/nvme-fc.h struct nvme_command sqe; sqe 145 tools/io_uring/io_uring-bench.c struct io_uring_sqe *sqe = &s->sqes[index]; sqe 151 tools/io_uring/io_uring-bench.c sqe->opcode = IORING_OP_NOP; sqe 172 tools/io_uring/io_uring-bench.c sqe->flags = IOSQE_FIXED_FILE; sqe 173 tools/io_uring/io_uring-bench.c sqe->fd = f->fixed_fd; sqe 175 tools/io_uring/io_uring-bench.c sqe->flags = 0; sqe 176 tools/io_uring/io_uring-bench.c sqe->fd = f->real_fd; sqe 179 tools/io_uring/io_uring-bench.c sqe->opcode = IORING_OP_READ_FIXED; sqe 180 tools/io_uring/io_uring-bench.c sqe->addr = (unsigned long) s->iovecs[index].iov_base; sqe 181 tools/io_uring/io_uring-bench.c sqe->len = BS; sqe 182 tools/io_uring/io_uring-bench.c sqe->buf_index = index; sqe 184 tools/io_uring/io_uring-bench.c sqe->opcode = IORING_OP_READV; sqe 185 tools/io_uring/io_uring-bench.c sqe->addr = (unsigned long) &s->iovecs[index]; sqe 186 tools/io_uring/io_uring-bench.c sqe->len = 1; sqe 187 tools/io_uring/io_uring-bench.c sqe->buf_index = 0; sqe 189 tools/io_uring/io_uring-bench.c sqe->ioprio = 0; sqe 190 tools/io_uring/io_uring-bench.c sqe->off = offset; sqe 191 tools/io_uring/io_uring-bench.c sqe->user_data = (unsigned long) f; sqe 71 tools/io_uring/io_uring-cp.c struct io_uring_sqe *sqe; sqe 73 tools/io_uring/io_uring-cp.c sqe = io_uring_get_sqe(ring); sqe 74 tools/io_uring/io_uring-cp.c assert(sqe); sqe 77 tools/io_uring/io_uring-cp.c io_uring_prep_readv(sqe, infd, &data->iov, 1, data->offset); sqe 79 tools/io_uring/io_uring-cp.c io_uring_prep_writev(sqe, outfd, &data->iov, 1, data->offset); sqe 81 tools/io_uring/io_uring-cp.c io_uring_sqe_set_data(sqe, data); sqe 86 tools/io_uring/io_uring-cp.c struct io_uring_sqe *sqe; sqe 93 tools/io_uring/io_uring-cp.c sqe = io_uring_get_sqe(ring); sqe 94 tools/io_uring/io_uring-cp.c if (!sqe) { sqe 106 tools/io_uring/io_uring-cp.c io_uring_prep_readv(sqe, infd, &data->iov, 1, offset); sqe 107 tools/io_uring/io_uring-cp.c io_uring_sqe_set_data(sqe, data); sqe 97 tools/io_uring/liburing.h static inline void io_uring_sqe_set_data(struct io_uring_sqe *sqe, void *data) sqe 99 tools/io_uring/liburing.h sqe->user_data = (unsigned long) data; sqe 107 tools/io_uring/liburing.h static inline void io_uring_prep_rw(int op, struct io_uring_sqe *sqe, int fd, sqe 111 tools/io_uring/liburing.h memset(sqe, 0, sizeof(*sqe)); sqe 112 tools/io_uring/liburing.h sqe->opcode = op; sqe 113 tools/io_uring/liburing.h sqe->fd = fd; sqe 114 tools/io_uring/liburing.h sqe->off = offset; sqe 115 tools/io_uring/liburing.h sqe->addr = (unsigned long) addr; sqe 116 tools/io_uring/liburing.h sqe->len = len; sqe 119 tools/io_uring/liburing.h static inline void io_uring_prep_readv(struct io_uring_sqe *sqe, int fd, sqe 123 tools/io_uring/liburing.h io_uring_prep_rw(IORING_OP_READV, sqe, fd, iovecs, nr_vecs, offset); sqe 126 tools/io_uring/liburing.h static inline void io_uring_prep_read_fixed(struct io_uring_sqe *sqe, int fd, sqe 130 tools/io_uring/liburing.h io_uring_prep_rw(IORING_OP_READ_FIXED, sqe, fd, buf, nbytes, offset); sqe 133 tools/io_uring/liburing.h static inline void io_uring_prep_writev(struct io_uring_sqe *sqe, int fd, sqe 137 tools/io_uring/liburing.h io_uring_prep_rw(IORING_OP_WRITEV, sqe, fd, iovecs, nr_vecs, offset); sqe 140 tools/io_uring/liburing.h static inline void io_uring_prep_write_fixed(struct io_uring_sqe *sqe, int fd, sqe 144 tools/io_uring/liburing.h io_uring_prep_rw(IORING_OP_WRITE_FIXED, sqe, fd, buf, nbytes, offset); sqe 147 tools/io_uring/liburing.h static inline void io_uring_prep_poll_add(struct io_uring_sqe *sqe, int fd, sqe 150 tools/io_uring/liburing.h memset(sqe, 0, sizeof(*sqe)); sqe 151 tools/io_uring/liburing.h sqe->opcode = IORING_OP_POLL_ADD; sqe 152 tools/io_uring/liburing.h sqe->fd = fd; sqe 153 tools/io_uring/liburing.h sqe->poll_events = poll_mask; sqe 156 tools/io_uring/liburing.h static inline void io_uring_prep_poll_remove(struct io_uring_sqe *sqe, sqe 159 tools/io_uring/liburing.h memset(sqe, 0, sizeof(*sqe)); sqe 160 tools/io_uring/liburing.h sqe->opcode = IORING_OP_POLL_REMOVE; sqe 161 tools/io_uring/liburing.h sqe->addr = (unsigned long) user_data; sqe 164 tools/io_uring/liburing.h static inline void io_uring_prep_fsync(struct io_uring_sqe *sqe, int fd, sqe 167 tools/io_uring/liburing.h memset(sqe, 0, sizeof(*sqe)); sqe 168 tools/io_uring/liburing.h sqe->opcode = IORING_OP_FSYNC; sqe 169 tools/io_uring/liburing.h sqe->fd = fd; sqe 170 tools/io_uring/liburing.h sqe->fsync_flags = fsync_flags; sqe 173 tools/io_uring/liburing.h static inline void io_uring_prep_nop(struct io_uring_sqe *sqe) sqe 175 tools/io_uring/liburing.h memset(sqe, 0, sizeof(*sqe)); sqe 176 tools/io_uring/liburing.h sqe->opcode = IORING_OP_NOP; sqe 145 tools/io_uring/queue.c struct io_uring_sqe *sqe; sqe 153 tools/io_uring/queue.c sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; sqe 155 tools/io_uring/queue.c return sqe;