cqe               586 drivers/atm/firestream.c 	struct FS_QENTRY *cqe;
cqe               598 drivers/atm/firestream.c 	cqe = bus_to_virt (wp);
cqe               599 drivers/atm/firestream.c 	if (qe != cqe) {
cqe               600 drivers/atm/firestream.c 		fs_dprintk (FS_DEBUG_TXQ, "q mismatch! %p %p\n", qe, cqe);
cqe                62 drivers/crypto/hisilicon/qm.c #define QM_CQE_PHASE(cqe)		((cqe)->w7 & 0x1)
cqe               462 drivers/crypto/hisilicon/qm.c 	struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
cqe               465 drivers/crypto/hisilicon/qm.c 		while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
cqe               467 drivers/crypto/hisilicon/qm.c 			qp->req_cb(qp, qp->sqe + qm->sqe_size * cqe->sq_head);
cqe               469 drivers/crypto/hisilicon/qm.c 			cqe = qp->cqe + qp->qp_status.cq_head;
cqe              1299 drivers/crypto/hisilicon/qm.c 	QP_INIT_BUF(qp, cqe, sizeof(struct qm_cqe) * QM_Q_DEPTH);
cqe              1305 drivers/crypto/hisilicon/qm.c 		     qp->cqe, (unsigned long)qp->cqe_dma);
cqe               184 drivers/crypto/hisilicon/qm.h 	struct qm_cqe *cqe;
cqe               189 drivers/infiniband/core/cq.c 		.cqe		= nr_cqe,
cqe               760 drivers/infiniband/core/mad.c static void build_smp_wc(struct ib_qp *qp, struct ib_cqe *cqe, u16 slid,
cqe               764 drivers/infiniband/core/mad.c 	wc->wr_cqe = cqe;
cqe              1108 drivers/infiniband/core/mad.c 	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
cqe              1110 drivers/infiniband/core/mad.c 	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
cqe              1222 drivers/infiniband/core/mad.c 	mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
cqe              1223 drivers/infiniband/core/mad.c 	mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe;
cqe              2260 drivers/infiniband/core/mad.c 		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
cqe              2511 drivers/infiniband/core/mad.c 		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
cqe              2599 drivers/infiniband/core/mad.c 		container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
cqe              2968 drivers/infiniband/core/mad.c 		mad_priv->header.mad_list.cqe.done = ib_mad_recv_done;
cqe              2969 drivers/infiniband/core/mad.c 		recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe;
cqe                67 drivers/infiniband/core/mad_priv.h 	struct ib_cqe cqe;
cqe               546 drivers/infiniband/core/nldev.c 	if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_CQE, cq->cqe))
cqe               500 drivers/infiniband/core/rw.c 		u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
cqe               536 drivers/infiniband/core/rw.c 		last_wr->wr_cqe = cqe;
cqe               559 drivers/infiniband/core/rw.c 		struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
cqe               563 drivers/infiniband/core/rw.c 	first_wr = rdma_rw_ctx_wrs(ctx, qp, port_num, cqe, chain_wr);
cqe              1017 drivers/infiniband/core/uverbs_cmd.c 	attr.cqe = cmd->cqe;
cqe              1040 drivers/infiniband/core/uverbs_cmd.c 	resp.base.cqe       = cq->cqe;
cqe              1083 drivers/infiniband/core/uverbs_cmd.c 	cmd_ex.cqe = cmd.cqe;
cqe              1126 drivers/infiniband/core/uverbs_cmd.c 	ret = cq->device->ops.resize_cq(cq, cmd.cqe, &attrs->driver_udata);
cqe              1130 drivers/infiniband/core/uverbs_cmd.c 	resp.cqe = cq->cqe;
cqe                81 drivers/infiniband/core/uverbs_std_types_cq.c 		ret = uverbs_copy_from(&attr.cqe, attrs,
cqe               136 drivers/infiniband/core/uverbs_std_types_cq.c 	ret = uverbs_copy_to(attrs, UVERBS_ATTR_CREATE_CQ_RESP_CQE, &cq->cqe,
cqe               137 drivers/infiniband/core/uverbs_std_types_cq.c 			     sizeof(cq->cqe));
cqe              1972 drivers/infiniband/core/verbs.c int ib_resize_cq(struct ib_cq *cq, int cqe)
cqe              1975 drivers/infiniband/core/verbs.c 		cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
cqe              2615 drivers/infiniband/core/verbs.c 	struct ib_cqe cqe;
cqe              2621 drivers/infiniband/core/verbs.c 	struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
cqe              2622 drivers/infiniband/core/verbs.c 						cqe);
cqe              2624 drivers/infiniband/core/verbs.c 	complete(&cqe->done);
cqe              2638 drivers/infiniband/core/verbs.c 			{ .wr_cqe	= &sdrain.cqe, },
cqe              2650 drivers/infiniband/core/verbs.c 	sdrain.cqe.done = ib_drain_qp_done;
cqe              2683 drivers/infiniband/core/verbs.c 	rwr.wr_cqe = &rdrain.cqe;
cqe              2684 drivers/infiniband/core/verbs.c 	rdrain.cqe.done = ib_drain_qp_done;
cqe               103 drivers/infiniband/hw/bnxt_re/bnxt_re.h 	struct bnxt_qplib_cqe cqe;
cqe              2540 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	int cqe = attr->cqe;
cqe              2545 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
cqe              2553 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	entries = roundup_pow_of_two(cqe + 1);
cqe              2604 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	cq->ib_cq.cqe = entries;
cqe              2718 drivers/infiniband/hw/bnxt_re/ib_verbs.c static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
cqe              2720 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	switch (cqe->type) {
cqe              2759 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	wc->status = __req_to_ib_wc_status(cqe->status);
cqe              2852 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					 struct bnxt_qplib_cqe *cqe)
cqe              2877 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	tbl_idx = cqe->wr_id;
cqe              2890 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
cqe              2895 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
cqe              2896 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					     cqe->raweth_qp1_flags2);
cqe              2969 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					  struct bnxt_qplib_cqe *cqe)
cqe              2972 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	wc->status = __rawqp1_to_ib_wc_status(cqe->status);
cqe              3003 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				      struct bnxt_qplib_cqe *cqe)
cqe              3006 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	wc->status = __rc_to_ib_wc_status(cqe->status);
cqe              3008 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
cqe              3010 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (cqe->flags & CQ_RES_RC_FLAGS_INV)
cqe              3012 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
cqe              3019 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					     struct bnxt_qplib_cqe *cqe)
cqe              3030 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	tbl_idx = cqe->wr_id;
cqe              3034 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	orig_cqe = &sqp_entry->cqe;
cqe              3065 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				      struct bnxt_qplib_cqe *cqe)
cqe              3070 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	wc->status = __rc_to_ib_wc_status(cqe->status);
cqe              3072 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
cqe              3077 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		memcpy(wc->smac, cqe->smac, ETH_ALEN);
cqe              3079 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
cqe              3080 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			wc->vlan_id = (cqe->cfa_meta & 0xFFF);
cqe              3084 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
cqe              3118 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	struct bnxt_qplib_cqe *cqe;
cqe              3133 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	cqe = &cq->cql[0];
cqe              3136 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
cqe              3151 drivers/infiniband/hw/bnxt_re/ib_verbs.c 							      cqe + ncqe,
cqe              3157 drivers/infiniband/hw/bnxt_re/ib_verbs.c 		for (i = 0; i < ncqe; i++, cqe++) {
cqe              3161 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			wc->wr_id = cqe->wr_id;
cqe              3162 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			wc->byte_len = cqe->length;
cqe              3165 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				 (unsigned long)(cqe->qp_handle),
cqe              3173 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			wc->ex.imm_data = cqe->immdata;
cqe              3174 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			wc->src_qp = cqe->src_qp;
cqe              3175 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
cqe              3177 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			wc->vendor_err = cqe->status;
cqe              3179 drivers/infiniband/hw/bnxt_re/ib_verbs.c 			switch (cqe->opcode) {
cqe              3189 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				bnxt_re_process_req_wc(wc, cqe);
cqe              3192 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				if (!cqe->status) {
cqe              3196 drivers/infiniband/hw/bnxt_re/ib_verbs.c 								(qp, cqe);
cqe              3201 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					cqe->status = -1;
cqe              3207 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				tbl_idx = cqe->wr_id;
cqe              3210 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				bnxt_re_process_res_rawqp1_wc(wc, cqe);
cqe              3213 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				bnxt_re_process_res_rc_wc(wc, cqe);
cqe              3221 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					if (cqe->status) {
cqe              3225 drivers/infiniband/hw/bnxt_re/ib_verbs.c 								(qp, wc, cqe);
cqe              3229 drivers/infiniband/hw/bnxt_re/ib_verbs.c 				bnxt_re_process_res_ud_wc(qp, wc, cqe);
cqe              3234 drivers/infiniband/hw/bnxt_re/ib_verbs.c 					cqe->opcode);
cqe              1391 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			struct cq_req *cqe = (struct cq_req *)hw_cqe;
cqe              1393 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			if (qp == le64_to_cpu(cqe->qp_handle))
cqe              1394 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				cqe->qp_handle = 0;
cqe              1401 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
cqe              1403 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			if (qp == le64_to_cpu(cqe->qp_handle))
cqe              1404 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				cqe->qp_handle = 0;
cqe              2019 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2024 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2035 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		memset(cqe, 0, sizeof(*cqe));
cqe              2036 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
cqe              2037 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe              2038 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->qp_handle = (u64)(unsigned long)qp;
cqe              2039 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = sq->swq[sw_cons].wr_id;
cqe              2040 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->src_qp = qp->id;
cqe              2041 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->type = sq->swq[sw_cons].type;
cqe              2042 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2047 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	*pcqe = cqe;
cqe              2058 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2078 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2083 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		memset(cqe, 0, sizeof(*cqe));
cqe              2084 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->status =
cqe              2086 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->opcode = opcode;
cqe              2087 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->qp_handle = (unsigned long)qp;
cqe              2088 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[sw_cons].wr_id;
cqe              2089 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2093 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	*pcqe = cqe;
cqe              2222 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2253 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2261 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		memset(cqe, 0, sizeof(*cqe));
cqe              2262 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe              2263 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->qp_handle = (u64)(unsigned long)qp;
cqe              2264 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->src_qp = qp->id;
cqe              2265 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = swq->wr_id;
cqe              2266 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
cqe              2268 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->type = swq->type;
cqe              2276 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->status = hwcqe->status;
cqe              2279 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				sw_sq_cons, cqe->wr_id, cqe->status);
cqe              2280 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe++;
cqe              2293 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				cqe->status = CQ_REQ_STATUS_OK;
cqe              2294 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				cqe++;
cqe              2304 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	*pcqe = cqe;
cqe              2337 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2353 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2354 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe              2355 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->length = le32_to_cpu(hwcqe->length);
cqe              2356 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
cqe              2357 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
cqe              2358 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->flags = le16_to_cpu(hwcqe->flags);
cqe              2359 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->status = hwcqe->status;
cqe              2360 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->qp_handle = (u64)(unsigned long)qp;
cqe              2364 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
cqe              2374 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
cqe              2376 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2378 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		*pcqe = cqe;
cqe              2387 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe              2388 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2391 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		*pcqe = cqe;
cqe              2412 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2427 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2428 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe              2429 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->length = (u32)le16_to_cpu(hwcqe->length);
cqe              2430 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata);
cqe              2431 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
cqe              2432 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->flags = le16_to_cpu(hwcqe->flags);
cqe              2433 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->status = hwcqe->status;
cqe              2434 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->qp_handle = (u64)(unsigned long)qp;
cqe              2436 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	memcpy(cqe->smac, hwcqe->src_mac, ETH_ALEN);
cqe              2439 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
cqe              2444 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
cqe              2455 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
cqe              2457 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2459 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		*pcqe = cqe;
cqe              2469 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe              2470 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2473 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		*pcqe = cqe;
cqe              2509 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2524 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2525 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
cqe              2526 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->flags = le16_to_cpu(hwcqe->flags);
cqe              2527 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->qp_handle = (u64)(unsigned long)qp;
cqe              2532 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->src_qp = qp->id;
cqe              2533 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (qp->id == 1 && !cqe->length) {
cqe              2535 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->length = 296;
cqe              2537 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->length = le16_to_cpu(hwcqe->length);
cqe              2539 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->pkey_index = qp->pkey_index;
cqe              2540 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	memcpy(cqe->smac, qp->smac, 6);
cqe              2542 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
cqe              2543 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
cqe              2544 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
cqe              2546 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
cqe              2559 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = srq->swq[wr_id_idx].wr_id;
cqe              2561 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2563 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		*pcqe = cqe;
cqe              2572 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe->wr_id = rq->swq[wr_id_idx].wr_id;
cqe              2573 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		cqe++;
cqe              2576 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		*pcqe = cqe;
cqe              2596 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	struct bnxt_qplib_cqe *cqe;
cqe              2641 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	cqe = *pcqe;
cqe              2647 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			memset(cqe, 0, sizeof(*cqe));
cqe              2648 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->status = CQ_REQ_STATUS_OK;
cqe              2649 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe              2650 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->qp_handle = (u64)(unsigned long)qp;
cqe              2651 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->src_qp = qp->id;
cqe              2652 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->wr_id = sq->swq[sw_cons].wr_id;
cqe              2653 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe->type = sq->swq[sw_cons].type;
cqe              2654 drivers/infiniband/hw/bnxt_re/qplib_fp.c 			cqe++;
cqe              2659 drivers/infiniband/hw/bnxt_re/qplib_fp.c 	*pcqe = cqe;
cqe              2714 drivers/infiniband/hw/bnxt_re/qplib_fp.c 				  struct bnxt_qplib_cqe *cqe,
cqe              2724 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		__flush_sq(&qp->sq, qp, &cqe, &budget);
cqe              2729 drivers/infiniband/hw/bnxt_re/qplib_fp.c 		__flush_rq(&qp->rq, qp, &cqe, &budget);
cqe              2736 drivers/infiniband/hw/bnxt_re/qplib_fp.c int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
cqe              2765 drivers/infiniband/hw/bnxt_re/qplib_fp.c 						       &cqe, &budget,
cqe              2771 drivers/infiniband/hw/bnxt_re/qplib_fp.c 							  hw_cqe, &cqe,
cqe              2776 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					(cq, (struct cq_res_ud *)hw_cqe, &cqe,
cqe              2782 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					 hw_cqe, &cqe, &budget);
cqe              2787 drivers/infiniband/hw/bnxt_re/qplib_fp.c 					 &cqe, &budget);
cqe               548 drivers/infiniband/hw/bnxt_re/qplib_fp.h int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
cqe               560 drivers/infiniband/hw/bnxt_re/qplib_fp.h 				  struct bnxt_qplib_cqe *cqe,
cqe                75 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct t3_cqe *cqe;
cqe               109 drivers/infiniband/hw/cxgb3/cxio_hal.c 		cqe = cq->queue + Q_PTR2IDX(rptr, cq->size_log2);
cqe               110 drivers/infiniband/hw/cxgb3/cxio_hal.c 		while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
cqe               332 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct t3_cqe cqe;
cqe               336 drivers/infiniband/hw/cxgb3/cxio_hal.c 	memset(&cqe, 0, sizeof(cqe));
cqe               337 drivers/infiniband/hw/cxgb3/cxio_hal.c 	cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
cqe               344 drivers/infiniband/hw/cxgb3/cxio_hal.c 	*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
cqe               369 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct t3_cqe cqe;
cqe               373 drivers/infiniband/hw/cxgb3/cxio_hal.c 	memset(&cqe, 0, sizeof(cqe));
cqe               374 drivers/infiniband/hw/cxgb3/cxio_hal.c 	cqe.header = cpu_to_be32(V_CQE_STATUS(TPT_ERR_SWFLUSH) |
cqe               381 drivers/infiniband/hw/cxgb3/cxio_hal.c 	cqe.u.scqe.wrid_hi = sqp->sq_wptr;
cqe               383 drivers/infiniband/hw/cxgb3/cxio_hal.c 	*(cq->sw_queue + Q_PTR2IDX(cq->sw_wptr, cq->size_log2)) = cqe;
cqe               408 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct t3_cqe *cqe, *swcqe;
cqe               411 drivers/infiniband/hw/cxgb3/cxio_hal.c 	cqe = cxio_next_hw_cqe(cq);
cqe               412 drivers/infiniband/hw/cxgb3/cxio_hal.c 	while (cqe) {
cqe               416 drivers/infiniband/hw/cxgb3/cxio_hal.c 		*swcqe = *cqe;
cqe               420 drivers/infiniband/hw/cxgb3/cxio_hal.c 		cqe = cxio_next_hw_cqe(cq);
cqe               424 drivers/infiniband/hw/cxgb3/cxio_hal.c static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
cqe               426 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (CQE_OPCODE(*cqe) == T3_TERMINATE)
cqe               429 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if ((CQE_OPCODE(*cqe) == T3_RDMA_WRITE) && RQ_TYPE(*cqe))
cqe               432 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if ((CQE_OPCODE(*cqe) == T3_READ_RESP) && SQ_TYPE(*cqe))
cqe               435 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (CQE_SEND_OPCODE(*cqe) && RQ_TYPE(*cqe) &&
cqe               444 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct t3_cqe *cqe;
cqe               450 drivers/infiniband/hw/cxgb3/cxio_hal.c 		cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
cqe               451 drivers/infiniband/hw/cxgb3/cxio_hal.c 		if ((SQ_TYPE(*cqe) ||
cqe               452 drivers/infiniband/hw/cxgb3/cxio_hal.c 		     ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
cqe               453 drivers/infiniband/hw/cxgb3/cxio_hal.c 		    (CQE_QPID(*cqe) == wq->qpid))
cqe               462 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct t3_cqe *cqe;
cqe               469 drivers/infiniband/hw/cxgb3/cxio_hal.c 		cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
cqe               470 drivers/infiniband/hw/cxgb3/cxio_hal.c 		if (RQ_TYPE(*cqe) && (CQE_OPCODE(*cqe) != T3_READ_RESP) &&
cqe               471 drivers/infiniband/hw/cxgb3/cxio_hal.c 		    (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq))
cqe               864 drivers/infiniband/hw/cxgb3/cxio_hal.c 		 CQE_QPID(rsp_msg->cqe), CQE_GENBIT(rsp_msg->cqe),
cqe               865 drivers/infiniband/hw/cxgb3/cxio_hal.c 		 CQE_TYPE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
cqe               866 drivers/infiniband/hw/cxgb3/cxio_hal.c 		 CQE_OPCODE(rsp_msg->cqe), CQE_LEN(rsp_msg->cqe),
cqe               867 drivers/infiniband/hw/cxgb3/cxio_hal.c 		 CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
cqe               874 drivers/infiniband/hw/cxgb3/cxio_hal.c 	if (CQE_QPID(rsp_msg->cqe) == T3_CTRL_QP_ID) {
cqe               875 drivers/infiniband/hw/cxgb3/cxio_hal.c 		rdev_p->ctrl_qp.rptr = CQE_WRID_LOW(rsp_msg->cqe) + 1;
cqe               878 drivers/infiniband/hw/cxgb3/cxio_hal.c 	} else if (CQE_QPID(rsp_msg->cqe) == 0xfff8)
cqe              1061 drivers/infiniband/hw/cxgb3/cxio_hal.c 			sqp->cqe.header |= htonl(V_CQE_SWCQE(1));
cqe              1063 drivers/infiniband/hw/cxgb3/cxio_hal.c 				= sqp->cqe;
cqe              1116 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
cqe              1255 drivers/infiniband/hw/cxgb3/cxio_hal.c 		sqp->cqe = *hw_cqe;
cqe              1262 drivers/infiniband/hw/cxgb3/cxio_hal.c 	*cqe = *hw_cqe;
cqe               146 drivers/infiniband/hw/cxgb3/cxio_hal.h 	struct t3_cqe cqe;	/* flits 2-3 */
cqe               194 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
cqe               675 drivers/infiniband/hw/cxgb3/cxio_wr.h 	struct t3_cqe		cqe;
cqe               728 drivers/infiniband/hw/cxgb3/cxio_wr.h #define CQ_VLD_ENTRY(ptr,size_log2,cqe) (Q_GENBIT(ptr,size_log2) == \
cqe               729 drivers/infiniband/hw/cxgb3/cxio_wr.h 					 CQE_GENBIT(*cqe))
cqe               769 drivers/infiniband/hw/cxgb3/cxio_wr.h 	struct t3_cqe *cqe;
cqe               771 drivers/infiniband/hw/cxgb3/cxio_wr.h 	cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
cqe               772 drivers/infiniband/hw/cxgb3/cxio_wr.h 	if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
cqe               773 drivers/infiniband/hw/cxgb3/cxio_wr.h 		return cqe;
cqe               779 drivers/infiniband/hw/cxgb3/cxio_wr.h 	struct t3_cqe *cqe;
cqe               782 drivers/infiniband/hw/cxgb3/cxio_wr.h 		cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
cqe               783 drivers/infiniband/hw/cxgb3/cxio_wr.h 		return cqe;
cqe               790 drivers/infiniband/hw/cxgb3/cxio_wr.h 	struct t3_cqe *cqe;
cqe               793 drivers/infiniband/hw/cxgb3/cxio_wr.h 		cqe = cq->sw_queue + (Q_PTR2IDX(cq->sw_rptr, cq->size_log2));
cqe               794 drivers/infiniband/hw/cxgb3/cxio_wr.h 		return cqe;
cqe               796 drivers/infiniband/hw/cxgb3/cxio_wr.h 	cqe = cq->queue + (Q_PTR2IDX(cq->rptr, cq->size_log2));
cqe               797 drivers/infiniband/hw/cxgb3/cxio_wr.h 	if (CQ_VLD_ENTRY(cq->rptr, cq->size_log2, cqe))
cqe               798 drivers/infiniband/hw/cxgb3/cxio_wr.h 		return cqe;
cqe                39 drivers/infiniband/hw/cxgb3/iwch_cq.c 	struct t3_cqe cqe;
cqe                45 drivers/infiniband/hw/cxgb3/iwch_cq.c 	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
cqe                61 drivers/infiniband/hw/cxgb3/iwch_cq.c 	wc->vendor_err = CQE_STATUS(cqe);
cqe                66 drivers/infiniband/hw/cxgb3/iwch_cq.c 		 CQE_QPID(cqe), CQE_TYPE(cqe),
cqe                67 drivers/infiniband/hw/cxgb3/iwch_cq.c 		 CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
cqe                68 drivers/infiniband/hw/cxgb3/iwch_cq.c 		 CQE_WRID_LOW(cqe), (unsigned long long)cookie);
cqe                70 drivers/infiniband/hw/cxgb3/iwch_cq.c 	if (CQE_TYPE(cqe) == 0) {
cqe                71 drivers/infiniband/hw/cxgb3/iwch_cq.c 		if (!CQE_STATUS(cqe))
cqe                72 drivers/infiniband/hw/cxgb3/iwch_cq.c 			wc->byte_len = CQE_LEN(cqe);
cqe                76 drivers/infiniband/hw/cxgb3/iwch_cq.c 		if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
cqe                77 drivers/infiniband/hw/cxgb3/iwch_cq.c 		    CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
cqe                78 drivers/infiniband/hw/cxgb3/iwch_cq.c 			wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
cqe                82 drivers/infiniband/hw/cxgb3/iwch_cq.c 		switch (CQE_OPCODE(cqe)) {
cqe                88 drivers/infiniband/hw/cxgb3/iwch_cq.c 			wc->byte_len = CQE_LEN(cqe);
cqe               104 drivers/infiniband/hw/cxgb3/iwch_cq.c 			       CQE_OPCODE(cqe), CQE_QPID(cqe));
cqe               114 drivers/infiniband/hw/cxgb3/iwch_cq.c 		switch (CQE_STATUS(cqe)) {
cqe               158 drivers/infiniband/hw/cxgb3/iwch_cq.c 			       CQE_STATUS(cqe), CQE_QPID(cqe));
cqe                52 drivers/infiniband/hw/cxgb3/iwch_ev.c 	qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
cqe                56 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       __func__, CQE_STATUS(rsp_msg->cqe),
cqe                57 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       CQE_QPID(rsp_msg->cqe));
cqe                67 drivers/infiniband/hw/cxgb3/iwch_ev.c 			 CQE_STATUS(rsp_msg->cqe));
cqe                74 drivers/infiniband/hw/cxgb3/iwch_ev.c 	       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
cqe                75 drivers/infiniband/hw/cxgb3/iwch_ev.c 	       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
cqe                76 drivers/infiniband/hw/cxgb3/iwch_ev.c 	       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));
cqe               119 drivers/infiniband/hw/cxgb3/iwch_ev.c 	qhp = xa_load(&rnicp->qps, CQE_QPID(rsp_msg->cqe));
cqe               122 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       cqid, CQE_QPID(rsp_msg->cqe),
cqe               123 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
cqe               124 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
cqe               125 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       CQE_WRID_LOW(rsp_msg->cqe));
cqe               137 drivers/infiniband/hw/cxgb3/iwch_ev.c 	if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
cqe               138 drivers/infiniband/hw/cxgb3/iwch_ev.c 	    (CQE_STATUS(rsp_msg->cqe) == 0)) {
cqe               139 drivers/infiniband/hw/cxgb3/iwch_ev.c 		if (SQ_TYPE(rsp_msg->cqe)) {
cqe               154 drivers/infiniband/hw/cxgb3/iwch_ev.c 	if (SQ_TYPE(rsp_msg->cqe) &&
cqe               155 drivers/infiniband/hw/cxgb3/iwch_ev.c 	    (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
cqe               161 drivers/infiniband/hw/cxgb3/iwch_ev.c 	if (RQ_TYPE(rsp_msg->cqe) &&
cqe               162 drivers/infiniband/hw/cxgb3/iwch_ev.c 	    (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
cqe               167 drivers/infiniband/hw/cxgb3/iwch_ev.c 	switch (CQE_STATUS(rsp_msg->cqe)) {
cqe               175 drivers/infiniband/hw/cxgb3/iwch_ev.c 		if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
cqe               222 drivers/infiniband/hw/cxgb3/iwch_ev.c 		       CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
cqe               110 drivers/infiniband/hw/cxgb3/iwch_provider.c 	int entries = attr->cqe;
cqe               151 drivers/infiniband/hw/cxgb3/iwch_provider.c 	chp->ibcq.cqe = 1 << chp->cq.size_log2;
cqe               539 drivers/infiniband/hw/cxgb3/iwch_qp.c 		status = CQE_STATUS(rsp_msg->cqe);
cqe               540 drivers/infiniband/hw/cxgb3/iwch_qp.c 		opcode = CQE_OPCODE(rsp_msg->cqe);
cqe               541 drivers/infiniband/hw/cxgb3/iwch_qp.c 		rqtype = RQ_TYPE(rsp_msg->cqe);
cqe               186 drivers/infiniband/hw/cxgb4/cq.c 	struct t4_cqe cqe;
cqe               190 drivers/infiniband/hw/cxgb4/cq.c 	memset(&cqe, 0, sizeof(cqe));
cqe               191 drivers/infiniband/hw/cxgb4/cq.c 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
cqe               196 drivers/infiniband/hw/cxgb4/cq.c 	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cqe               198 drivers/infiniband/hw/cxgb4/cq.c 		cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx);
cqe               199 drivers/infiniband/hw/cxgb4/cq.c 	cq->sw_queue[cq->sw_pidx] = cqe;
cqe               220 drivers/infiniband/hw/cxgb4/cq.c 	struct t4_cqe cqe;
cqe               224 drivers/infiniband/hw/cxgb4/cq.c 	memset(&cqe, 0, sizeof(cqe));
cqe               225 drivers/infiniband/hw/cxgb4/cq.c 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
cqe               230 drivers/infiniband/hw/cxgb4/cq.c 	CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
cqe               231 drivers/infiniband/hw/cxgb4/cq.c 	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cqe               232 drivers/infiniband/hw/cxgb4/cq.c 	cq->sw_queue[cq->sw_pidx] = cqe;
cqe               288 drivers/infiniband/hw/cxgb4/cq.c 			swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cqe               289 drivers/infiniband/hw/cxgb4/cq.c 			cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
cqe               405 drivers/infiniband/hw/cxgb4/cq.c 			swsqe->cqe = *hw_cqe;
cqe               422 drivers/infiniband/hw/cxgb4/cq.c static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
cqe               424 drivers/infiniband/hw/cxgb4/cq.c 	if (DRAIN_CQE(cqe)) {
cqe               429 drivers/infiniband/hw/cxgb4/cq.c 	if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
cqe               432 drivers/infiniband/hw/cxgb4/cq.c 	if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
cqe               435 drivers/infiniband/hw/cxgb4/cq.c 	if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
cqe               438 drivers/infiniband/hw/cxgb4/cq.c 	if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
cqe               445 drivers/infiniband/hw/cxgb4/cq.c 	struct t4_cqe *cqe;
cqe               452 drivers/infiniband/hw/cxgb4/cq.c 		cqe = &cq->sw_queue[ptr];
cqe               453 drivers/infiniband/hw/cxgb4/cq.c 		if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
cqe               454 drivers/infiniband/hw/cxgb4/cq.c 		    (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
cqe               544 drivers/infiniband/hw/cxgb4/cq.c static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
cqe               592 drivers/infiniband/hw/cxgb4/cq.c 		*cqe = *hw_cqe;
cqe               686 drivers/infiniband/hw/cxgb4/cq.c 		swsqe->cqe = *hw_cqe;
cqe               693 drivers/infiniband/hw/cxgb4/cq.c 	*cqe = *hw_cqe;
cqe               757 drivers/infiniband/hw/cxgb4/cq.c 	struct t4_cqe uninitialized_var(cqe);
cqe               764 drivers/infiniband/hw/cxgb4/cq.c 	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit,
cqe               771 drivers/infiniband/hw/cxgb4/cq.c 	wc->vendor_err = CQE_STATUS(&cqe);
cqe               782 drivers/infiniband/hw/cxgb4/cq.c 		 CQE_QPID(&cqe),
cqe               783 drivers/infiniband/hw/cxgb4/cq.c 		 CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
cqe               784 drivers/infiniband/hw/cxgb4/cq.c 		 CQE_STATUS(&cqe), CQE_LEN(&cqe),
cqe               785 drivers/infiniband/hw/cxgb4/cq.c 		 CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
cqe               788 drivers/infiniband/hw/cxgb4/cq.c 	if (CQE_TYPE(&cqe) == 0) {
cqe               789 drivers/infiniband/hw/cxgb4/cq.c 		if (!CQE_STATUS(&cqe))
cqe               790 drivers/infiniband/hw/cxgb4/cq.c 			wc->byte_len = CQE_LEN(&cqe);
cqe               794 drivers/infiniband/hw/cxgb4/cq.c 		switch (CQE_OPCODE(&cqe)) {
cqe               801 drivers/infiniband/hw/cxgb4/cq.c 			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
cqe               807 drivers/infiniband/hw/cxgb4/cq.c 			wc->ex.imm_data = CQE_IMM_DATA(&cqe);
cqe               812 drivers/infiniband/hw/cxgb4/cq.c 			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
cqe               817 drivers/infiniband/hw/cxgb4/cq.c 		switch (CQE_OPCODE(&cqe)) {
cqe               824 drivers/infiniband/hw/cxgb4/cq.c 			wc->byte_len = CQE_LEN(&cqe);
cqe               843 drivers/infiniband/hw/cxgb4/cq.c 			if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS)
cqe               845 drivers/infiniband/hw/cxgb4/cq.c 						   CQE_WRID_FR_STAG(&cqe));
cqe               849 drivers/infiniband/hw/cxgb4/cq.c 			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
cqe               859 drivers/infiniband/hw/cxgb4/cq.c 		switch (CQE_STATUS(&cqe)) {
cqe               904 drivers/infiniband/hw/cxgb4/cq.c 			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
cqe               994 drivers/infiniband/hw/cxgb4/cq.c 	int entries = attr->cqe;
cqe              1076 drivers/infiniband/hw/cxgb4/cq.c 	chp->ibcq.cqe = entries - 2;
cqe                92 drivers/infiniband/hw/cxgb4/device.c void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
cqe               105 drivers/infiniband/hw/cxgb4/device.c 	le.cqe_sge_ts = CQE_TS(cqe);
cqe               106 drivers/infiniband/hw/cxgb4/device.c 	if (SQ_TYPE(cqe)) {
cqe               108 drivers/infiniband/hw/cxgb4/device.c 		le.opcode = CQE_OPCODE(cqe);
cqe               111 drivers/infiniband/hw/cxgb4/device.c 		le.wr_id = CQE_WRID_SQ_IDX(cqe);
cqe               117 drivers/infiniband/hw/cxgb4/device.c 		le.wr_id = CQE_WRID_MSN(cqe);
cqe              1043 drivers/infiniband/hw/cxgb4/iw_cxgb4.h extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
cqe               981 drivers/infiniband/hw/cxgb4/qp.c 	struct t4_cqe cqe = {};
cqe               994 drivers/infiniband/hw/cxgb4/qp.c 	cqe.u.drain_cookie = wr->wr_id;
cqe               995 drivers/infiniband/hw/cxgb4/qp.c 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
cqe              1003 drivers/infiniband/hw/cxgb4/qp.c 	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cqe              1004 drivers/infiniband/hw/cxgb4/qp.c 	cq->sw_queue[cq->sw_pidx] = cqe;
cqe              1037 drivers/infiniband/hw/cxgb4/qp.c 	struct t4_cqe cqe = {};
cqe              1045 drivers/infiniband/hw/cxgb4/qp.c 	cqe.u.drain_cookie = wr->wr_id;
cqe              1046 drivers/infiniband/hw/cxgb4/qp.c 	cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
cqe              1054 drivers/infiniband/hw/cxgb4/qp.c 	cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
cqe              1055 drivers/infiniband/hw/cxgb4/qp.c 	cq->sw_queue[cq->sw_pidx] = cqe;
cqe               105 drivers/infiniband/hw/cxgb4/restrack.c 	    rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
cqe               310 drivers/infiniband/hw/cxgb4/restrack.c static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx,
cqe               316 drivers/infiniband/hw/cxgb4/restrack.c 					 be32_to_cpu(cqe->header)))
cqe               318 drivers/infiniband/hw/cxgb4/restrack.c 	if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
cqe               321 drivers/infiniband/hw/cxgb4/restrack.c 					 be32_to_cpu(cqe->u.gen.wrid_hi)))
cqe               324 drivers/infiniband/hw/cxgb4/restrack.c 					 be32_to_cpu(cqe->u.gen.wrid_low)))
cqe               327 drivers/infiniband/hw/cxgb4/restrack.c 					 be64_to_cpu(cqe->bits_type_ts)))
cqe               308 drivers/infiniband/hw/cxgb4/t4.h 	struct t4_cqe		cqe;
cqe               785 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
cqe               787 drivers/infiniband/hw/cxgb4/t4.h 	return (CQE_GENBIT(cqe) == cq->gen);
cqe               795 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
cqe               813 drivers/infiniband/hw/cxgb4/t4.h 		*cqe = &cq->queue[cq->cidx];
cqe               833 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_next_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
cqe               840 drivers/infiniband/hw/cxgb4/t4.h 		*cqe = &cq->sw_queue[cq->sw_cidx];
cqe               842 drivers/infiniband/hw/cxgb4/t4.h 		ret = t4_next_hw_cqe(cq, cqe);
cqe               425 drivers/infiniband/hw/efa/efa_com.c 						   struct efa_admin_acq_entry *cqe)
cqe               430 drivers/infiniband/hw/efa/efa_com.c 	cmd_id = cqe->acq_common_descriptor.command &
cqe               442 drivers/infiniband/hw/efa/efa_com.c 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
cqe               444 drivers/infiniband/hw/efa/efa_com.c 		memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
cqe               452 drivers/infiniband/hw/efa/efa_com.c 	struct efa_admin_acq_entry *cqe;
cqe               463 drivers/infiniband/hw/efa/efa_com.c 	cqe = &aq->cq.entries[ci];
cqe               466 drivers/infiniband/hw/efa/efa_com.c 	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
cqe               473 drivers/infiniband/hw/efa/efa_com.c 		efa_com_handle_single_admin_completion(aq, cqe);
cqe               482 drivers/infiniband/hw/efa/efa_com.c 		cqe = &aq->cq.entries[ci];
cqe               928 drivers/infiniband/hw/efa/efa_verbs.c 	int entries = attr->cqe;
cqe              1006 drivers/infiniband/hw/efa/efa_verbs.c 	cq->ibcq.cqe = result.actual_depth;
cqe               210 drivers/infiniband/hw/hns/hns_roce_cq.c 				   struct ib_umem **umem, u64 buf_addr, int cqe)
cqe               216 drivers/infiniband/hw/hns/hns_roce_cq.c 	*umem = ib_umem_get(udata, buf_addr, cqe * hr_dev->caps.cq_entry_sz,
cqe               293 drivers/infiniband/hw/hns/hns_roce_cq.c 				    struct hns_roce_cq_buf *buf, int cqe)
cqe               295 drivers/infiniband/hw/hns/hns_roce_cq.c 	hns_roce_buf_free(hr_dev, (cqe + 1) * hr_dev->caps.cq_entry_sz,
cqe               403 drivers/infiniband/hw/hns/hns_roce_cq.c 	hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, hr_cq->ib_cq.cqe);
cqe               418 drivers/infiniband/hw/hns/hns_roce_cq.c 	int cq_entries = attr->cqe;
cqe               431 drivers/infiniband/hw/hns/hns_roce_cq.c 	hr_cq->ib_cq.cqe = cq_entries - 1;
cqe               515 drivers/infiniband/hw/hns/hns_roce_cq.c 		hns_roce_ib_free_cq_buf(hr_dev, &hr_cq->hr_buf, ib_cq->cqe);
cqe               724 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	cq_init_attr.cqe		= HNS_ROCE_MIN_WQE_NUM * 2;
cqe              1985 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
cqe              1989 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		!!(n & (hr_cq->ib_cq.cqe + 1))) ? hr_cqe : NULL;
cqe              2017 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	struct hns_roce_cqe *cqe, *dest;
cqe              2024 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
cqe              2033 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
cqe              2034 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
cqe              2041 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 				       hr_cq->ib_cq.cqe);
cqe              2044 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			memcpy(dest, cqe, sizeof(*cqe));
cqe              2193 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	struct hns_roce_cqe *cqe;
cqe              2201 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	cqe = next_cqe_sw(hr_cq);
cqe              2202 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	if (!cqe)
cqe              2209 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	is_send  = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
cqe              2212 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
cqe              2214 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
cqe              2216 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		      roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
cqe              2220 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
cqe              2238 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	status = roce_get_field(cqe->cqe_byte_4,
cqe              2293 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		sq_wqe = get_send_wqe(*cur_qp, roce_get_field(cqe->cqe_byte_4,
cqe              2303 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			wc->byte_len = le32_to_cpu(cqe->byte_cnt);
cqe              2328 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
cqe              2338 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wc->byte_len = le32_to_cpu(cqe->byte_cnt);
cqe              2339 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		opcode = roce_get_field(cqe->cqe_byte_4,
cqe              2348 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 				cpu_to_be32(le32_to_cpu(cqe->immediate_data));
cqe              2351 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 			if (roce_get_bit(cqe->cqe_byte_4,
cqe              2356 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 					le32_to_cpu(cqe->immediate_data));
cqe              2371 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
cqe              2373 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
cqe              2376 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
cqe              2379 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
cqe              3689 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 		cq_buf_size = (ibcq->cqe + 1) * hr_dev->caps.cq_entry_sz;
cqe              2459 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe);
cqe              2462 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	return (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_OWNER_S) ^
cqe              2463 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		!!(n & (hr_cq->ib_cq.cqe + 1))) ? cqe : NULL;
cqe              2495 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_v2_cqe *cqe, *dest;
cqe              2503 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (prod_index > hr_cq->cons_index + hr_cq->ib_cq.cqe)
cqe              2512 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		cqe = get_cqe_v2(hr_cq, prod_index & hr_cq->ib_cq.cqe);
cqe              2513 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if ((roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
cqe              2517 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			    roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S)) {
cqe              2518 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				wqe_index = roce_get_field(cqe->byte_4,
cqe              2526 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 					  hr_cq->ib_cq.cqe);
cqe              2529 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			memcpy(dest, cqe, sizeof(*cqe));
cqe              2660 drivers/infiniband/hw/hns/hns_roce_hw_v2.c static int hns_roce_handle_recv_inl_wqe(struct hns_roce_v2_cqe *cqe,
cqe              2669 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	wr_num = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_WQE_INDX_M,
cqe              2699 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	struct hns_roce_v2_cqe *cqe;
cqe              2712 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	cqe = next_cqe_sw_v2(hr_cq);
cqe              2713 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	if (!cqe)
cqe              2721 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	is_send = !roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_S_R_S);
cqe              2723 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	qpn = roce_get_field(cqe->byte_16, V2_CQE_BYTE_16_LCL_QPN_M,
cqe              2748 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			wqe_ctr = (u16)roce_get_field(cqe->byte_4,
cqe              2759 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wqe_ctr = (u16)roce_get_field(cqe->byte_4,
cqe              2771 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	status = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_STATUS_M,
cqe              2837 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		switch (roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
cqe              2851 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			wc->byte_len = le32_to_cpu(cqe->byte_cnt);
cqe              2892 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->byte_len = le32_to_cpu(cqe->byte_cnt);
cqe              2894 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		opcode = roce_get_field(cqe->byte_4, V2_CQE_BYTE_4_OPCODE_M,
cqe              2901 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				cpu_to_be32(le32_to_cpu(cqe->immtdata));
cqe              2911 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 				cpu_to_be32(le32_to_cpu(cqe->immtdata));
cqe              2916 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			wc->ex.invalidate_rkey = le32_to_cpu(cqe->rkey);
cqe              2928 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		    (roce_get_bit(cqe->byte_4, V2_CQE_BYTE_4_RQ_INLINE_S))) {
cqe              2929 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			ret = hns_roce_handle_recv_inl_wqe(cqe, cur_qp, wc);
cqe              2934 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->sl = (u8)roce_get_field(cqe->byte_32, V2_CQE_BYTE_32_SL_M,
cqe              2936 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->src_qp = (u8)roce_get_field(cqe->byte_32,
cqe              2940 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->wc_flags |= (roce_get_bit(cqe->byte_32,
cqe              2943 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->port_num = roce_get_field(cqe->byte_32,
cqe              2946 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		memcpy(wc->smac, cqe->smac, 4);
cqe              2947 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->smac[4] = roce_get_field(cqe->byte_28,
cqe              2950 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->smac[5] = roce_get_field(cqe->byte_28,
cqe              2954 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		if (roce_get_bit(cqe->byte_28, V2_CQE_BYTE_28_VID_VLD_S)) {
cqe              2955 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 			wc->vlan_id = (u16)roce_get_field(cqe->byte_28,
cqe              2963 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		wc->network_hdr_type = roce_get_field(cqe->byte_28,
cqe               724 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	u64 *cqe;
cqe               731 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(&ccq->cq_uk);
cqe               733 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 		cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&ccq->cq_uk);
cqe               735 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	get_64bit_val(cqe, 24, &temp);
cqe               740 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	get_64bit_val(cqe, 8, &qp_ctx);
cqe               751 drivers/infiniband/hw/i40iw/i40iw_ctrl.c 	get_64bit_val(cqe, 16, &temp1);
cqe               231 drivers/infiniband/hw/i40iw/i40iw_puda.c 	u64 *cqe;
cqe               237 drivers/infiniband/hw/i40iw/i40iw_puda.c 	cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(&cq->cq_uk);
cqe               238 drivers/infiniband/hw/i40iw/i40iw_puda.c 	get_64bit_val(cqe, 24, &qword3);
cqe               244 drivers/infiniband/hw/i40iw/i40iw_puda.c 	i40iw_debug_buf(cq->dev, I40IW_DEBUG_PUDA, "PUDA CQE", cqe, 32);
cqe               254 drivers/infiniband/hw/i40iw/i40iw_puda.c 	get_64bit_val(cqe, 0, &qword0);
cqe               255 drivers/infiniband/hw/i40iw/i40iw_puda.c 	get_64bit_val(cqe, 16, &qword2);
cqe               260 drivers/infiniband/hw/i40iw/i40iw_puda.c 	get_64bit_val(cqe, 8, &comp_ctx);
cqe               753 drivers/infiniband/hw/i40iw/i40iw_uk.c 	u64 *cqe, *sw_wqe;
cqe               763 drivers/infiniband/hw/i40iw/i40iw_uk.c 		cqe = (u64 *)I40IW_GET_CURRENT_EXTENDED_CQ_ELEMENT(cq);
cqe               765 drivers/infiniband/hw/i40iw/i40iw_uk.c 		cqe = (u64 *)I40IW_GET_CURRENT_CQ_ELEMENT(cq);
cqe               767 drivers/infiniband/hw/i40iw/i40iw_uk.c 	get_64bit_val(cqe, 24, &qword3);
cqe               784 drivers/infiniband/hw/i40iw/i40iw_uk.c 	get_64bit_val(cqe, 0, &qword0);
cqe               785 drivers/infiniband/hw/i40iw/i40iw_uk.c 	get_64bit_val(cqe, 16, &qword2);
cqe               791 drivers/infiniband/hw/i40iw/i40iw_uk.c 	get_64bit_val(cqe, 8, &comp_ctx);
cqe               890 drivers/infiniband/hw/i40iw/i40iw_uk.c 		set_64bit_val(cqe, 24, qword3);
cqe              1092 drivers/infiniband/hw/i40iw/i40iw_uk.c 	u64 *cqe;
cqe              1101 drivers/infiniband/hw/i40iw/i40iw_uk.c 			cqe = (u64 *)&(((struct i40iw_extended_cqe *)cq->cq_base)[cq_head]);
cqe              1103 drivers/infiniband/hw/i40iw/i40iw_uk.c 			cqe = (u64 *)&cq->cq_base[cq_head];
cqe              1104 drivers/infiniband/hw/i40iw/i40iw_uk.c 		get_64bit_val(cqe, 24, &qword3);
cqe              1110 drivers/infiniband/hw/i40iw/i40iw_uk.c 		get_64bit_val(cqe, 8, &comp_ctx);
cqe              1112 drivers/infiniband/hw/i40iw/i40iw_uk.c 			set_64bit_val(cqe, 8, 0);
cqe              1096 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	int entries = attr->cqe;
cqe              1117 drivers/infiniband/hw/i40iw/i40iw_verbs.c 	iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size;
cqe                81 drivers/infiniband/hw/mlx4/cq.c 	struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe);
cqe                82 drivers/infiniband/hw/mlx4/cq.c 	struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe);
cqe                85 drivers/infiniband/hw/mlx4/cq.c 		!!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe;
cqe               133 drivers/infiniband/hw/mlx4/cq.c static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe)
cqe               135 drivers/infiniband/hw/mlx4/cq.c 	mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf);
cqe               140 drivers/infiniband/hw/mlx4/cq.c 			       struct ib_umem **umem, u64 buf_addr, int cqe)
cqe               147 drivers/infiniband/hw/mlx4/cq.c 	*umem = ib_umem_get(udata, buf_addr, cqe * cqe_size,
cqe               179 drivers/infiniband/hw/mlx4/cq.c 	int entries = attr->cqe;
cqe               196 drivers/infiniband/hw/mlx4/cq.c 	cq->ibcq.cqe = entries - 1;
cqe               282 drivers/infiniband/hw/mlx4/cq.c 		mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
cqe               310 drivers/infiniband/hw/mlx4/cq.c 	cq->resize_buf->cqe = entries - 1;
cqe               339 drivers/infiniband/hw/mlx4/cq.c 	cq->resize_buf->cqe = entries - 1;
cqe               357 drivers/infiniband/hw/mlx4/cq.c 	struct mlx4_cqe *cqe, *new_cqe;
cqe               363 drivers/infiniband/hw/mlx4/cq.c 	cqe = get_cqe(cq, i & cq->ibcq.cqe);
cqe               364 drivers/infiniband/hw/mlx4/cq.c 	cqe += cqe_inc;
cqe               366 drivers/infiniband/hw/mlx4/cq.c 	while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) {
cqe               368 drivers/infiniband/hw/mlx4/cq.c 					   (i + 1) & cq->resize_buf->cqe);
cqe               369 drivers/infiniband/hw/mlx4/cq.c 		memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size);
cqe               372 drivers/infiniband/hw/mlx4/cq.c 		new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) |
cqe               373 drivers/infiniband/hw/mlx4/cq.c 			(((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0);
cqe               374 drivers/infiniband/hw/mlx4/cq.c 		cqe = get_cqe(cq, ++i & cq->ibcq.cqe);
cqe               375 drivers/infiniband/hw/mlx4/cq.c 		cqe += cqe_inc;
cqe               395 drivers/infiniband/hw/mlx4/cq.c 	if (entries == ibcq->cqe + 1) {
cqe               431 drivers/infiniband/hw/mlx4/cq.c 		cq->ibcq.cqe = cq->resize_buf->cqe;
cqe               446 drivers/infiniband/hw/mlx4/cq.c 			tmp_cqe = cq->ibcq.cqe;
cqe               448 drivers/infiniband/hw/mlx4/cq.c 			cq->ibcq.cqe = cq->resize_buf->cqe;
cqe               465 drivers/infiniband/hw/mlx4/cq.c 				    cq->resize_buf->cqe);
cqe               494 drivers/infiniband/hw/mlx4/cq.c 		mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe);
cqe               500 drivers/infiniband/hw/mlx4/cq.c static void dump_cqe(void *cqe)
cqe               502 drivers/infiniband/hw/mlx4/cq.c 	__be32 *buf = cqe;
cqe               510 drivers/infiniband/hw/mlx4/cq.c static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe,
cqe               513 drivers/infiniband/hw/mlx4/cq.c 	if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) {
cqe               517 drivers/infiniband/hw/mlx4/cq.c 		       be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index),
cqe               518 drivers/infiniband/hw/mlx4/cq.c 		       cqe->vendor_err_syndrome,
cqe               519 drivers/infiniband/hw/mlx4/cq.c 		       cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK);
cqe               520 drivers/infiniband/hw/mlx4/cq.c 		dump_cqe(cqe);
cqe               523 drivers/infiniband/hw/mlx4/cq.c 	switch (cqe->syndrome) {
cqe               568 drivers/infiniband/hw/mlx4/cq.c 	wc->vendor_err = cqe->vendor_err_syndrome;
cqe               586 drivers/infiniband/hw/mlx4/cq.c 			    unsigned tail, struct mlx4_cqe *cqe, int is_eth)
cqe               665 drivers/infiniband/hw/mlx4/cq.c 	struct mlx4_cqe *cqe;
cqe               678 drivers/infiniband/hw/mlx4/cq.c 	cqe = next_cqe_sw(cq);
cqe               679 drivers/infiniband/hw/mlx4/cq.c 	if (!cqe)
cqe               683 drivers/infiniband/hw/mlx4/cq.c 		cqe++;
cqe               693 drivers/infiniband/hw/mlx4/cq.c 	is_send  = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK;
cqe               694 drivers/infiniband/hw/mlx4/cq.c 	is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
cqe               698 drivers/infiniband/hw/mlx4/cq.c 	if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) {
cqe               702 drivers/infiniband/hw/mlx4/cq.c 			mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
cqe               704 drivers/infiniband/hw/mlx4/cq.c 			cq->ibcq.cqe = cq->resize_buf->cqe;
cqe               714 drivers/infiniband/hw/mlx4/cq.c 	    (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) {
cqe               721 drivers/infiniband/hw/mlx4/cq.c 				       be32_to_cpu(cqe->vlan_my_qpn));
cqe               729 drivers/infiniband/hw/mlx4/cq.c 		g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
cqe               739 drivers/infiniband/hw/mlx4/cq.c 			wqe_ctr = be16_to_cpu(cqe->wqe_index);
cqe               746 drivers/infiniband/hw/mlx4/cq.c 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
cqe               751 drivers/infiniband/hw/mlx4/cq.c 		wqe_ctr = be16_to_cpu(cqe->wqe_index);
cqe               762 drivers/infiniband/hw/mlx4/cq.c 		mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc);
cqe               770 drivers/infiniband/hw/mlx4/cq.c 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
cqe               786 drivers/infiniband/hw/mlx4/cq.c 			wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
cqe               815 drivers/infiniband/hw/mlx4/cq.c 		wc->byte_len = be32_to_cpu(cqe->byte_cnt);
cqe               817 drivers/infiniband/hw/mlx4/cq.c 		switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) {
cqe               821 drivers/infiniband/hw/mlx4/cq.c 			wc->ex.imm_data = cqe->immed_rss_invalid;
cqe               826 drivers/infiniband/hw/mlx4/cq.c 			wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid);
cqe               835 drivers/infiniband/hw/mlx4/cq.c 			wc->ex.imm_data = cqe->immed_rss_invalid;
cqe               846 drivers/infiniband/hw/mlx4/cq.c 				use_tunnel_data(*cur_qp, cq, wc, tail, cqe,
cqe               852 drivers/infiniband/hw/mlx4/cq.c 		g_mlpath_rqpn	   = be32_to_cpu(cqe->g_mlpath_rqpn);
cqe               856 drivers/infiniband/hw/mlx4/cq.c 		wc->pkey_index     = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
cqe               857 drivers/infiniband/hw/mlx4/cq.c 		wc->wc_flags	  |= mlx4_ib_ipoib_csum_ok(cqe->status,
cqe               858 drivers/infiniband/hw/mlx4/cq.c 					cqe->checksum) ? IB_WC_IP_CSUM_OK : 0;
cqe               861 drivers/infiniband/hw/mlx4/cq.c 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 13;
cqe               862 drivers/infiniband/hw/mlx4/cq.c 			if (be32_to_cpu(cqe->vlan_my_qpn) &
cqe               864 drivers/infiniband/hw/mlx4/cq.c 				wc->vlan_id = be16_to_cpu(cqe->sl_vid) &
cqe               869 drivers/infiniband/hw/mlx4/cq.c 			memcpy(wc->smac, cqe->smac, ETH_ALEN);
cqe               872 drivers/infiniband/hw/mlx4/cq.c 			wc->slid = be16_to_cpu(cqe->rlid);
cqe               873 drivers/infiniband/hw/mlx4/cq.c 			wc->sl  = be16_to_cpu(cqe->sl_vid) >> 12;
cqe               923 drivers/infiniband/hw/mlx4/cq.c 	struct mlx4_cqe *cqe, *dest;
cqe               935 drivers/infiniband/hw/mlx4/cq.c 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
cqe               943 drivers/infiniband/hw/mlx4/cq.c 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe               944 drivers/infiniband/hw/mlx4/cq.c 		cqe += cqe_inc;
cqe               946 drivers/infiniband/hw/mlx4/cq.c 		if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) {
cqe               947 drivers/infiniband/hw/mlx4/cq.c 			if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK))
cqe               948 drivers/infiniband/hw/mlx4/cq.c 				mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index));
cqe               951 drivers/infiniband/hw/mlx4/cq.c 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
cqe               955 drivers/infiniband/hw/mlx4/cq.c 			memcpy(dest, cqe, sizeof *cqe);
cqe              2011 drivers/infiniband/hw/mlx4/mad.c 	cq_attr.cqe = cq_size;
cqe              1240 drivers/infiniband/hw/mlx4/main.c 	cq_attr.cqe = 1;
cqe               112 drivers/infiniband/hw/mlx4/mlx4_ib.h 	int			cqe;
cqe              4414 drivers/infiniband/hw/mlx4/qp.c 	struct ib_cqe cqe;
cqe              4420 drivers/infiniband/hw/mlx4/qp.c 	struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
cqe              4422 drivers/infiniband/hw/mlx4/qp.c 						     cqe);
cqe              4424 drivers/infiniband/hw/mlx4/qp.c 	complete(&cqe->done);
cqe              4486 drivers/infiniband/hw/mlx4/qp.c 			{ .wr_cqe	= &sdrain.cqe, },
cqe              4500 drivers/infiniband/hw/mlx4/qp.c 	sdrain.cqe.done = mlx4_ib_drain_qp_done;
cqe              4529 drivers/infiniband/hw/mlx4/qp.c 	rwr.wr_cqe = &rdrain.cqe;
cqe              4530 drivers/infiniband/hw/mlx4/qp.c 	rdrain.cqe.done = mlx4_ib_drain_qp_done;
cqe                80 drivers/infiniband/hw/mlx5/cq.c 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);
cqe                83 drivers/infiniband/hw/mlx5/cq.c 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cqe                86 drivers/infiniband/hw/mlx5/cq.c 	    !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
cqe                87 drivers/infiniband/hw/mlx5/cq.c 		return cqe;
cqe               116 drivers/infiniband/hw/mlx5/cq.c static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
cqe               120 drivers/infiniband/hw/mlx5/cq.c 	switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) {
cqe               136 drivers/infiniband/hw/mlx5/cq.c 		wc->byte_len  = be32_to_cpu(cqe->byte_cnt);
cqe               165 drivers/infiniband/hw/mlx5/cq.c static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
cqe               181 drivers/infiniband/hw/mlx5/cq.c 			msrq = mlx5_cmd_get_srq(dev, be32_to_cpu(cqe->srqn));
cqe               187 drivers/infiniband/hw/mlx5/cq.c 			wqe_ctr = be16_to_cpu(cqe->wqe_counter);
cqe               198 drivers/infiniband/hw/mlx5/cq.c 	wc->byte_len = be32_to_cpu(cqe->byte_cnt);
cqe               200 drivers/infiniband/hw/mlx5/cq.c 	switch (get_cqe_opcode(cqe)) {
cqe               204 drivers/infiniband/hw/mlx5/cq.c 		wc->ex.imm_data = cqe->imm_inval_pkey;
cqe               209 drivers/infiniband/hw/mlx5/cq.c 		if (unlikely(!((cqe->hds_ip_ext & CQE_L3_OK) &&
cqe               210 drivers/infiniband/hw/mlx5/cq.c 			       (cqe->hds_ip_ext & CQE_L4_OK))))
cqe               216 drivers/infiniband/hw/mlx5/cq.c 		wc->ex.imm_data = cqe->imm_inval_pkey;
cqe               221 drivers/infiniband/hw/mlx5/cq.c 		wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
cqe               224 drivers/infiniband/hw/mlx5/cq.c 	wc->src_qp	   = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
cqe               225 drivers/infiniband/hw/mlx5/cq.c 	wc->dlid_path_bits = cqe->ml_path;
cqe               226 drivers/infiniband/hw/mlx5/cq.c 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
cqe               229 drivers/infiniband/hw/mlx5/cq.c 		u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
cqe               238 drivers/infiniband/hw/mlx5/cq.c 		wc->slid = be16_to_cpu(cqe->slid);
cqe               239 drivers/infiniband/hw/mlx5/cq.c 		wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf;
cqe               244 drivers/infiniband/hw/mlx5/cq.c 	vlan_present = cqe->l4_l3_hdr_type & 0x1;
cqe               245 drivers/infiniband/hw/mlx5/cq.c 	roce_packet_type   = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0x3;
cqe               247 drivers/infiniband/hw/mlx5/cq.c 		wc->vlan_id = (be16_to_cpu(cqe->vlan_info)) & 0xfff;
cqe               248 drivers/infiniband/hw/mlx5/cq.c 		wc->sl = (be16_to_cpu(cqe->vlan_info) >> 13) & 0x7;
cqe               268 drivers/infiniband/hw/mlx5/cq.c static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe)
cqe               271 drivers/infiniband/hw/mlx5/cq.c 	mlx5_dump_err_cqe(dev->mdev, cqe);
cqe               275 drivers/infiniband/hw/mlx5/cq.c 				  struct mlx5_err_cqe *cqe,
cqe               280 drivers/infiniband/hw/mlx5/cq.c 	switch (cqe->syndrome) {
cqe               328 drivers/infiniband/hw/mlx5/cq.c 	wc->vendor_err = cqe->vendor_err_synd;
cqe               330 drivers/infiniband/hw/mlx5/cq.c 		dump_cqe(dev, cqe);
cqe               354 drivers/infiniband/hw/mlx5/cq.c static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
cqe               357 drivers/infiniband/hw/mlx5/cq.c 	u16 syndrome = be16_to_cpu(cqe->syndrome);
cqe               365 drivers/infiniband/hw/mlx5/cq.c 		item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16;
cqe               366 drivers/infiniband/hw/mlx5/cq.c 		item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16;
cqe               370 drivers/infiniband/hw/mlx5/cq.c 		item->expected = be32_to_cpu(cqe->expected_reftag);
cqe               371 drivers/infiniband/hw/mlx5/cq.c 		item->actual = be32_to_cpu(cqe->actual_reftag);
cqe               375 drivers/infiniband/hw/mlx5/cq.c 		item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff;
cqe               376 drivers/infiniband/hw/mlx5/cq.c 		item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff;
cqe               382 drivers/infiniband/hw/mlx5/cq.c 	item->sig_err_offset = be64_to_cpu(cqe->err_offset);
cqe               383 drivers/infiniband/hw/mlx5/cq.c 	item->key = be32_to_cpu(cqe->mkey);
cqe               454 drivers/infiniband/hw/mlx5/cq.c 	void *cqe;
cqe               458 drivers/infiniband/hw/mlx5/cq.c 	cqe = next_cqe_sw(cq);
cqe               459 drivers/infiniband/hw/mlx5/cq.c 	if (!cqe)
cqe               462 drivers/infiniband/hw/mlx5/cq.c 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cqe               835 drivers/infiniband/hw/mlx5/cq.c 	void *cqe;
cqe               839 drivers/infiniband/hw/mlx5/cq.c 		cqe = get_cqe(cq, i);
cqe               840 drivers/infiniband/hw/mlx5/cq.c 		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe               914 drivers/infiniband/hw/mlx5/cq.c 	int entries = attr->cqe;
cqe               939 drivers/infiniband/hw/mlx5/cq.c 	cq->ibcq.cqe = entries - 1;
cqe              1037 drivers/infiniband/hw/mlx5/cq.c 	void *cqe, *dest;
cqe              1052 drivers/infiniband/hw/mlx5/cq.c 		if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe)
cqe              1059 drivers/infiniband/hw/mlx5/cq.c 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe              1060 drivers/infiniband/hw/mlx5/cq.c 		cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cqe              1066 drivers/infiniband/hw/mlx5/cq.c 			dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe);
cqe              1069 drivers/infiniband/hw/mlx5/cq.c 			memcpy(dest, cqe, cq->mcq.cqe_sz);
cqe              1260 drivers/infiniband/hw/mlx5/cq.c 	if (entries == ibcq->cqe + 1)
cqe              1321 drivers/infiniband/hw/mlx5/cq.c 		cq->ibcq.cqe = entries - 1;
cqe              1340 drivers/infiniband/hw/mlx5/cq.c 		cq->ibcq.cqe = entries - 1;
cqe                36 drivers/infiniband/hw/mlx5/gsi.c 	struct ib_cqe cqe;
cqe               100 drivers/infiniband/hw/mlx5/gsi.c 		container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe);
cqe               438 drivers/infiniband/hw/mlx5/gsi.c 	gsi_wr->cqe.done = &handle_single_completion;
cqe               439 drivers/infiniband/hw/mlx5/gsi.c 	wr->wr.wr_cqe = &gsi_wr->cqe;
cqe              4932 drivers/infiniband/hw/mlx5/main.c 	struct ib_cq_init_attr cq_attr = {.cqe = 1};
cqe               646 drivers/infiniband/hw/mlx5/mlx5_ib.h 	struct ib_cqe		cqe;
cqe               809 drivers/infiniband/hw/mlx5/mr.c 		container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe);
cqe               817 drivers/infiniband/hw/mlx5/mr.c 	context->cqe.done = mlx5_ib_umr_done;
cqe               831 drivers/infiniband/hw/mlx5/mr.c 	umrwr->wr.wr_cqe = &umr_context.cqe;
cqe              6376 drivers/infiniband/hw/mlx5/qp.c 	struct ib_cqe cqe;
cqe              6382 drivers/infiniband/hw/mlx5/qp.c 	struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe,
cqe              6384 drivers/infiniband/hw/mlx5/qp.c 						     cqe);
cqe              6386 drivers/infiniband/hw/mlx5/qp.c 	complete(&cqe->done);
cqe              6448 drivers/infiniband/hw/mlx5/qp.c 			{ .wr_cqe	= &sdrain.cqe, },
cqe              6462 drivers/infiniband/hw/mlx5/qp.c 	sdrain.cqe.done = mlx5_ib_drain_qp_done;
cqe              6491 drivers/infiniband/hw/mlx5/qp.c 	rwr.wr_cqe = &rdrain.cqe;
cqe              6492 drivers/infiniband/hw/mlx5/qp.c 	rdrain.cqe.done = mlx5_ib_drain_qp_done;
cqe               174 drivers/infiniband/hw/mthca/mthca_cq.c static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe)
cqe               176 drivers/infiniband/hw/mthca/mthca_cq.c 	return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe;
cqe               181 drivers/infiniband/hw/mthca/mthca_cq.c 	return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe));
cqe               184 drivers/infiniband/hw/mthca/mthca_cq.c static inline void set_cqe_hw(struct mthca_cqe *cqe)
cqe               186 drivers/infiniband/hw/mthca/mthca_cq.c 	cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW;
cqe               191 drivers/infiniband/hw/mthca/mthca_cq.c 	__be32 *cqe = cqe_ptr;
cqe               193 drivers/infiniband/hw/mthca/mthca_cq.c 	(void) cqe;	/* avoid warning if mthca_dbg compiled away... */
cqe               195 drivers/infiniband/hw/mthca/mthca_cq.c 		  be32_to_cpu(cqe[0]), be32_to_cpu(cqe[1]), be32_to_cpu(cqe[2]),
cqe               196 drivers/infiniband/hw/mthca/mthca_cq.c 		  be32_to_cpu(cqe[3]), be32_to_cpu(cqe[4]), be32_to_cpu(cqe[5]),
cqe               197 drivers/infiniband/hw/mthca/mthca_cq.c 		  be32_to_cpu(cqe[6]), be32_to_cpu(cqe[7]));
cqe               264 drivers/infiniband/hw/mthca/mthca_cq.c static inline int is_recv_cqe(struct mthca_cqe *cqe)
cqe               266 drivers/infiniband/hw/mthca/mthca_cq.c 	if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
cqe               268 drivers/infiniband/hw/mthca/mthca_cq.c 		return !(cqe->opcode & 0x01);
cqe               270 drivers/infiniband/hw/mthca/mthca_cq.c 		return !(cqe->is_send & 0x80);
cqe               276 drivers/infiniband/hw/mthca/mthca_cq.c 	struct mthca_cqe *cqe;
cqe               290 drivers/infiniband/hw/mthca/mthca_cq.c 	     cqe_sw(get_cqe(cq, prod_index & cq->ibcq.cqe));
cqe               292 drivers/infiniband/hw/mthca/mthca_cq.c 		if (prod_index == cq->cons_index + cq->ibcq.cqe)
cqe               304 drivers/infiniband/hw/mthca/mthca_cq.c 		cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
cqe               305 drivers/infiniband/hw/mthca/mthca_cq.c 		if (cqe->my_qpn == cpu_to_be32(qpn)) {
cqe               306 drivers/infiniband/hw/mthca/mthca_cq.c 			if (srq && is_recv_cqe(cqe))
cqe               307 drivers/infiniband/hw/mthca/mthca_cq.c 				mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
cqe               310 drivers/infiniband/hw/mthca/mthca_cq.c 			memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
cqe               311 drivers/infiniband/hw/mthca/mthca_cq.c 			       cqe, MTHCA_CQ_ENTRY_SIZE);
cqe               316 drivers/infiniband/hw/mthca/mthca_cq.c 			set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe));
cqe               336 drivers/infiniband/hw/mthca/mthca_cq.c 	    cq->ibcq.cqe < cq->resize_buf->cqe) {
cqe               337 drivers/infiniband/hw/mthca/mthca_cq.c 		cq->cons_index &= cq->ibcq.cqe;
cqe               338 drivers/infiniband/hw/mthca/mthca_cq.c 		if (cqe_sw(get_cqe(cq, cq->ibcq.cqe)))
cqe               339 drivers/infiniband/hw/mthca/mthca_cq.c 			cq->cons_index -= cq->ibcq.cqe + 1;
cqe               342 drivers/infiniband/hw/mthca/mthca_cq.c 	for (i = cq->cons_index; cqe_sw(get_cqe(cq, i & cq->ibcq.cqe)); ++i)
cqe               344 drivers/infiniband/hw/mthca/mthca_cq.c 					i & cq->resize_buf->cqe),
cqe               345 drivers/infiniband/hw/mthca/mthca_cq.c 		       get_cqe(cq, i & cq->ibcq.cqe), MTHCA_CQ_ENTRY_SIZE);
cqe               366 drivers/infiniband/hw/mthca/mthca_cq.c void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe)
cqe               368 drivers/infiniband/hw/mthca/mthca_cq.c 	mthca_buf_free(dev, (cqe + 1) * MTHCA_CQ_ENTRY_SIZE, &buf->queue,
cqe               374 drivers/infiniband/hw/mthca/mthca_cq.c 			     struct mthca_err_cqe *cqe,
cqe               380 drivers/infiniband/hw/mthca/mthca_cq.c 	if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) {
cqe               383 drivers/infiniband/hw/mthca/mthca_cq.c 			  be32_to_cpu(cqe->my_qpn), be32_to_cpu(cqe->wqe),
cqe               385 drivers/infiniband/hw/mthca/mthca_cq.c 		dump_cqe(dev, cqe);
cqe               392 drivers/infiniband/hw/mthca/mthca_cq.c 	switch (cqe->syndrome) {
cqe               452 drivers/infiniband/hw/mthca/mthca_cq.c 	entry->vendor_err = cqe->vendor_err;
cqe               468 drivers/infiniband/hw/mthca/mthca_cq.c 	if (!(new_wqe & cpu_to_be32(0x3f)) || (!cqe->db_cnt && dbd))
cqe               471 drivers/infiniband/hw/mthca/mthca_cq.c 	be16_add_cpu(&cqe->db_cnt, -dbd);
cqe               472 drivers/infiniband/hw/mthca/mthca_cq.c 	cqe->wqe      = new_wqe;
cqe               473 drivers/infiniband/hw/mthca/mthca_cq.c 	cqe->syndrome = SYNDROME_WR_FLUSH_ERR;
cqe               485 drivers/infiniband/hw/mthca/mthca_cq.c 	struct mthca_cqe *cqe;
cqe               493 drivers/infiniband/hw/mthca/mthca_cq.c 	cqe = next_cqe_sw(cq);
cqe               494 drivers/infiniband/hw/mthca/mthca_cq.c 	if (!cqe)
cqe               505 drivers/infiniband/hw/mthca/mthca_cq.c 			  cq->cqn, cq->cons_index, be32_to_cpu(cqe->my_qpn),
cqe               506 drivers/infiniband/hw/mthca/mthca_cq.c 			  be32_to_cpu(cqe->wqe));
cqe               507 drivers/infiniband/hw/mthca/mthca_cq.c 		dump_cqe(dev, cqe);
cqe               510 drivers/infiniband/hw/mthca/mthca_cq.c 	is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) ==
cqe               512 drivers/infiniband/hw/mthca/mthca_cq.c 	is_send  = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80;
cqe               514 drivers/infiniband/hw/mthca/mthca_cq.c 	if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) {
cqe               521 drivers/infiniband/hw/mthca/mthca_cq.c 					  be32_to_cpu(cqe->my_qpn) &
cqe               525 drivers/infiniband/hw/mthca/mthca_cq.c 				   be32_to_cpu(cqe->my_qpn) & 0xffffff);
cqe               535 drivers/infiniband/hw/mthca/mthca_cq.c 		wqe_index = ((be32_to_cpu(cqe->wqe) - (*cur_qp)->send_wqe_offset)
cqe               541 drivers/infiniband/hw/mthca/mthca_cq.c 		u32 wqe = be32_to_cpu(cqe->wqe);
cqe               549 drivers/infiniband/hw/mthca/mthca_cq.c 		wqe = be32_to_cpu(cqe->wqe);
cqe               572 drivers/infiniband/hw/mthca/mthca_cq.c 				 (struct mthca_err_cqe *) cqe,
cqe               579 drivers/infiniband/hw/mthca/mthca_cq.c 		switch (cqe->opcode) {
cqe               596 drivers/infiniband/hw/mthca/mthca_cq.c 			entry->byte_len  = be32_to_cpu(cqe->byte_cnt);
cqe               611 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->byte_len = be32_to_cpu(cqe->byte_cnt);
cqe               612 drivers/infiniband/hw/mthca/mthca_cq.c 		switch (cqe->opcode & 0x1f) {
cqe               616 drivers/infiniband/hw/mthca/mthca_cq.c 			entry->ex.imm_data = cqe->imm_etype_pkey_eec;
cqe               622 drivers/infiniband/hw/mthca/mthca_cq.c 			entry->ex.imm_data = cqe->imm_etype_pkey_eec;
cqe               630 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->slid 	   = be16_to_cpu(cqe->rlid);
cqe               631 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->sl   	   = cqe->sl_ipok >> 4;
cqe               632 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->src_qp 	   = be32_to_cpu(cqe->rqpn) & 0xffffff;
cqe               633 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->dlid_path_bits = cqe->g_mlpath & 0x7f;
cqe               634 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->pkey_index  = be32_to_cpu(cqe->imm_etype_pkey_eec) >> 16;
cqe               635 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->wc_flags   |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0;
cqe               636 drivers/infiniband/hw/mthca/mthca_cq.c 		checksum = (be32_to_cpu(cqe->rqpn) >> 24) |
cqe               637 drivers/infiniband/hw/mthca/mthca_cq.c 				((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00);
cqe               638 drivers/infiniband/hw/mthca/mthca_cq.c 		entry->wc_flags	  |=  (cqe->sl_ipok & 1 && checksum == 0xffff) ?
cqe               646 drivers/infiniband/hw/mthca/mthca_cq.c 		set_cqe_hw(cqe);
cqe               698 drivers/infiniband/hw/mthca/mthca_cq.c 			cq->cons_index &= cq->ibcq.cqe;
cqe               701 drivers/infiniband/hw/mthca/mthca_cq.c 					    cq->cons_index & cq->resize_buf->cqe))) {
cqe               706 drivers/infiniband/hw/mthca/mthca_cq.c 			tcqe         = cq->ibcq.cqe;
cqe               708 drivers/infiniband/hw/mthca/mthca_cq.c 			cq->ibcq.cqe = cq->resize_buf->cqe;
cqe               711 drivers/infiniband/hw/mthca/mthca_cq.c 			cq->resize_buf->cqe   = tcqe;
cqe               776 drivers/infiniband/hw/mthca/mthca_cq.c 	cq->ibcq.cqe  = nent - 1;
cqe               865 drivers/infiniband/hw/mthca/mthca_cq.c 		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
cqe               939 drivers/infiniband/hw/mthca/mthca_cq.c 		mthca_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe);
cqe               510 drivers/infiniband/hw/mthca/mthca_dev.h void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe);
cqe               609 drivers/infiniband/hw/mthca/mthca_provider.c 	int entries = attr->cqe;
cqe               716 drivers/infiniband/hw/mthca/mthca_provider.c 	cq->resize_buf->cqe = entries - 1;
cqe               739 drivers/infiniband/hw/mthca/mthca_provider.c 	if (entries == ibcq->cqe + 1) {
cqe               762 drivers/infiniband/hw/mthca/mthca_provider.c 					  cq->resize_buf->cqe);
cqe               779 drivers/infiniband/hw/mthca/mthca_provider.c 			tcqe         = cq->ibcq.cqe;
cqe               781 drivers/infiniband/hw/mthca/mthca_provider.c 			cq->ibcq.cqe = cq->resize_buf->cqe;
cqe               784 drivers/infiniband/hw/mthca/mthca_provider.c 			tcqe = cq->resize_buf->cqe;
cqe               793 drivers/infiniband/hw/mthca/mthca_provider.c 		ibcq->cqe = entries - 1;
cqe               193 drivers/infiniband/hw/mthca/mthca_provider.h 	int			cqe;
cqe               498 drivers/infiniband/hw/ocrdma/ocrdma.h static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
cqe               501 drivers/infiniband/hw/ocrdma/ocrdma.h 	cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
cqe               505 drivers/infiniband/hw/ocrdma/ocrdma.h static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
cqe               507 drivers/infiniband/hw/ocrdma/ocrdma.h 	return (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe               511 drivers/infiniband/hw/ocrdma/ocrdma.h static inline int is_cqe_invalidated(struct ocrdma_cqe *cqe)
cqe               513 drivers/infiniband/hw/ocrdma/ocrdma.h 	return (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe               517 drivers/infiniband/hw/ocrdma/ocrdma.h static inline int is_cqe_imm(struct ocrdma_cqe *cqe)
cqe               519 drivers/infiniband/hw/ocrdma/ocrdma.h 	return (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe               523 drivers/infiniband/hw/ocrdma/ocrdma.h static inline int is_cqe_wr_imm(struct ocrdma_cqe *cqe)
cqe               525 drivers/infiniband/hw/ocrdma/ocrdma.h 	return (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe               123 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
cqe               126 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
cqe               128 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	return cqe;
cqe               676 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 				    struct ocrdma_ae_mcqe *cqe)
cqe               685 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
cqe               687 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	u16 qpid = cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK;
cqe               688 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	u16 cqid = cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK;
cqe               695 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID) {
cqe               705 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID) {
cqe               802 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 					struct ocrdma_ae_mcqe *cqe)
cqe               805 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
cqe               810 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
cqe               828 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 				      struct ocrdma_ae_mcqe *cqe)
cqe               833 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
cqe               846 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	struct ocrdma_ae_mcqe *cqe = ae_cqe;
cqe               847 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
cqe               851 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		ocrdma_process_link_state(dev, cqe);
cqe               854 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		ocrdma_dispatch_ibevent(dev, cqe);
cqe               857 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		ocrdma_process_grp5_aync(dev, cqe);
cqe               865 drivers/infiniband/hw/ocrdma/ocrdma_hw.c static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
cqe               867 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
cqe               868 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		dev->mqe_ctx.cqe_status = (cqe->status &
cqe               871 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		    (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
cqe               877 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		       __func__, cqe->tag_lo, dev->mqe_ctx.tag);
cqe               883 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 	struct ocrdma_mcqe *cqe;
cqe               886 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		cqe = ocrdma_get_mcqe(dev);
cqe               887 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		if (cqe == NULL)
cqe               889 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
cqe               891 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
cqe               892 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 			ocrdma_process_acqe(dev, cqe);
cqe               893 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
cqe               894 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 			ocrdma_process_mcqe(dev, cqe);
cqe               895 drivers/infiniband/hw/ocrdma/ocrdma_hw.c 		memset(cqe, 0, sizeof(struct ocrdma_mcqe));
cqe               983 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	int entries = attr->cqe;
cqe              1037 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ibcq->cqe = new_cnt;
cqe              1048 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_cqe *cqe = NULL;
cqe              1050 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	cqe = cq->va;
cqe              1058 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (is_cqe_valid(cq, cqe))
cqe              1060 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe++;
cqe              1607 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_cqe *cqe;
cqe              1626 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe = cq->va + cur_getp;
cqe              1631 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
cqe              1637 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (is_cqe_for_sq(cqe)) {
cqe              1641 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
cqe              1658 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe->cmn.qpn = 0;
cqe              2454 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 						struct ocrdma_cqe *cqe)
cqe              2456 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	if (is_cqe_for_sq(cqe)) {
cqe              2457 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
cqe              2458 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				cqe->flags_status_srcqpn) &
cqe              2460 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
cqe              2461 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				cqe->flags_status_srcqpn) |
cqe              2466 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
cqe              2467 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					cqe->flags_status_srcqpn) &
cqe              2469 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
cqe              2470 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					cqe->flags_status_srcqpn) |
cqe              2474 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
cqe              2475 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					cqe->flags_status_srcqpn) &
cqe              2477 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
cqe              2478 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 					cqe->flags_status_srcqpn) |
cqe              2485 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
cqe              2502 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_set_cqe_status_flushed(qp, cqe);
cqe              2507 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
cqe              2514 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
cqe              2517 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
cqe              2523 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
cqe              2528 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
cqe              2533 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe              2548 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
cqe              2566 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
cqe              2572 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				     struct ocrdma_cqe *cqe,
cqe              2588 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
cqe              2597 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
cqe              2603 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe              2607 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
cqe              2609 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
cqe              2614 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				 struct ocrdma_cqe *cqe)
cqe              2619 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	status = (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe              2621 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
cqe              2625 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
cqe              2630 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
cqe              2641 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				       struct ocrdma_cqe *cqe,
cqe              2649 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
cqe              2660 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
cqe              2677 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
cqe              2690 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
cqe              2696 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 				     struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
cqe              2706 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_update_ud_rcqe(dev, ibwc, cqe);
cqe              2708 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
cqe              2710 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	if (is_cqe_imm(cqe)) {
cqe              2711 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
cqe              2713 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	} else if (is_cqe_wr_imm(cqe)) {
cqe              2715 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
cqe              2717 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	} else if (is_cqe_invalidated(cqe)) {
cqe              2718 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
cqe              2722 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
cqe              2729 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
cqe              2737 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe              2741 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		status = (le32_to_cpu(cqe->flags_status_srcqpn) &
cqe              2747 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_poll_success_rcqe(qp, cqe, ibwc);
cqe              2749 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
cqe              2755 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
cqe              2763 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe->flags_status_srcqpn = 0;
cqe              2776 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	struct ocrdma_cqe *cqe;
cqe              2781 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe = cq->va + cur_getp;
cqe              2783 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (!is_cqe_valid(cq, cqe))
cqe              2785 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
cqe              2792 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		if (is_cqe_for_sq(cqe)) {
cqe              2793 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
cqe              2796 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 			expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
cqe              2804 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		cqe->cmn.qpn = 0;
cqe              2808 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 		ocrdma_change_cq_phase(cq, cqe, cur_getp);
cqe                76 drivers/infiniband/hw/ocrdma/ocrdma_verbs.h int ocrdma_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
cqe               822 drivers/infiniband/hw/qedr/verbs.c 	int entries = attr->cqe;
cqe               868 drivers/infiniband/hw/qedr/verbs.c 		cq->ibcq.cqe = chain_entries;
cqe               884 drivers/infiniband/hw/qedr/verbs.c 		cq->ibcq.cqe = cq->pbl.capacity;
cqe              3652 drivers/infiniband/hw/qedr/verbs.c static int is_valid_cqe(struct qedr_cq *cq, union rdma_cqe *cqe)
cqe              3654 drivers/infiniband/hw/qedr/verbs.c 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
cqe              3660 drivers/infiniband/hw/qedr/verbs.c static struct qedr_qp *cqe_get_qp(union rdma_cqe *cqe)
cqe              3662 drivers/infiniband/hw/qedr/verbs.c 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
cqe              3671 drivers/infiniband/hw/qedr/verbs.c static enum rdma_cqe_type cqe_get_type(union rdma_cqe *cqe)
cqe              3673 drivers/infiniband/hw/qedr/verbs.c 	struct rdma_cqe_requester *resp_cqe = &cqe->req;
cqe              4058 drivers/infiniband/hw/qedr/verbs.c 	union rdma_cqe *cqe;
cqe              4075 drivers/infiniband/hw/qedr/verbs.c 	cqe = cq->latest_cqe;
cqe              4077 drivers/infiniband/hw/qedr/verbs.c 	while (num_entries && is_valid_cqe(cq, cqe)) {
cqe              4084 drivers/infiniband/hw/qedr/verbs.c 		qp = cqe_get_qp(cqe);
cqe              4086 drivers/infiniband/hw/qedr/verbs.c 			WARN(1, "Error: CQE QP pointer is NULL. CQE=%p\n", cqe);
cqe              4092 drivers/infiniband/hw/qedr/verbs.c 		switch (cqe_get_type(cqe)) {
cqe              4095 drivers/infiniband/hw/qedr/verbs.c 					       &cqe->req);
cqe              4096 drivers/infiniband/hw/qedr/verbs.c 			try_consume_req_cqe(cq, qp, &cqe->req, &update);
cqe              4100 drivers/infiniband/hw/qedr/verbs.c 						&cqe->resp, &update);
cqe              4104 drivers/infiniband/hw/qedr/verbs.c 						    wc, &cqe->resp);
cqe              4110 drivers/infiniband/hw/qedr/verbs.c 			       cqe_get_type(cqe));
cqe              4116 drivers/infiniband/hw/qedr/verbs.c 		cqe = get_cqe(cq);
cqe                55 drivers/infiniband/hw/qedr/verbs.h int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
cqe                83 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 						    cq->ibcq.cqe, &head);
cqe               105 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	int entries = attr->cqe;
cqe               129 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	cq->ibcq.cqe = entries;
cqe               183 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	cmd->cqe = entries;
cqe               192 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	cq->ibcq.cqe = resp->cqe;
cqe               284 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 					    cq->ibcq.cqe, &head);
cqe               289 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 				      cq->ibcq.cqe);
cqe               290 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		struct pvrdma_cqe *cqe;
cqe               294 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 			(cq->ibcq.cqe - head + tail);
cqe               298 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 				curr = cq->ibcq.cqe - 1;
cqe               300 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 				tail = cq->ibcq.cqe - 1;
cqe               304 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 					cqe = get_cqe(cq, tail);
cqe               305 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 					*cqe = *curr_cqe;
cqe               311 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 					cq->ibcq.cqe);
cqe               325 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	struct pvrdma_cqe *cqe;
cqe               329 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 					    cq->ibcq.cqe, &head);
cqe               343 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	cqe = get_cqe(cq, head);
cqe               347 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	if (dev->qp_tbl[cqe->qp & 0xffff])
cqe               348 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 		*cur_qp = (struct pvrdma_qp *)dev->qp_tbl[cqe->qp & 0xffff];
cqe               352 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->opcode = pvrdma_wc_opcode_to_ib(cqe->opcode);
cqe               353 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->status = pvrdma_wc_status_to_ib(cqe->status);
cqe               354 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->wr_id = cqe->wr_id;
cqe               356 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->byte_len = cqe->byte_len;
cqe               357 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->ex.imm_data = cqe->imm_data;
cqe               358 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->src_qp = cqe->src_qp;
cqe               359 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->wc_flags = pvrdma_wc_flags_to_ib(cqe->wc_flags);
cqe               360 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->pkey_index = cqe->pkey_index;
cqe               361 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->slid = cqe->slid;
cqe               362 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->sl = cqe->sl;
cqe               363 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->dlid_path_bits = cqe->dlid_path_bits;
cqe               364 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->port_num = cqe->port_num;
cqe               365 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->vendor_err = cqe->vendor_err;
cqe               366 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	wc->network_hdr_type = cqe->network_hdr_type;
cqe               369 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c 	pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe);
cqe               482 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h 	u32 cqe;
cqe               490 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h 	u32 cqe;
cqe               496 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h 	u32 cqe;
cqe               501 drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h 	u32 cqe;
cqe                96 drivers/infiniband/sw/rdmavt/cq.c 	if (head >= (unsigned)cq->ibcq.cqe) {
cqe                97 drivers/infiniband/sw/rdmavt/cq.c 		head = cq->ibcq.cqe;
cqe               209 drivers/infiniband/sw/rdmavt/cq.c 	unsigned int entries = attr->cqe;
cqe               291 drivers/infiniband/sw/rdmavt/cq.c 	cq->ibcq.cqe = entries;
cqe               379 drivers/infiniband/sw/rdmavt/cq.c int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cqe               391 drivers/infiniband/sw/rdmavt/cq.c 	if (cqe < 1 || cqe > rdi->dparms.props.max_cqe)
cqe               398 drivers/infiniband/sw/rdmavt/cq.c 		sz = sizeof(struct ib_uverbs_wc) * (cqe + 1);
cqe               404 drivers/infiniband/sw/rdmavt/cq.c 		sz = sizeof(struct ib_wc) * (cqe + 1);
cqe               434 drivers/infiniband/sw/rdmavt/cq.c 	if (head > (u32)cq->ibcq.cqe)
cqe               435 drivers/infiniband/sw/rdmavt/cq.c 		head = (u32)cq->ibcq.cqe;
cqe               436 drivers/infiniband/sw/rdmavt/cq.c 	if (tail > (u32)cq->ibcq.cqe)
cqe               437 drivers/infiniband/sw/rdmavt/cq.c 		tail = (u32)cq->ibcq.cqe;
cqe               439 drivers/infiniband/sw/rdmavt/cq.c 		n = cq->ibcq.cqe + 1 + head - tail;
cqe               442 drivers/infiniband/sw/rdmavt/cq.c 	if (unlikely((u32)cqe < n)) {
cqe               451 drivers/infiniband/sw/rdmavt/cq.c 		if (tail == (u32)cq->ibcq.cqe)
cqe               456 drivers/infiniband/sw/rdmavt/cq.c 	cq->ibcq.cqe = cqe;
cqe               533 drivers/infiniband/sw/rdmavt/cq.c 	if (tail > (u32)cq->ibcq.cqe)
cqe               534 drivers/infiniband/sw/rdmavt/cq.c 		tail = (u32)cq->ibcq.cqe;
cqe               541 drivers/infiniband/sw/rdmavt/cq.c 		if (tail >= cq->ibcq.cqe)
cqe                58 drivers/infiniband/sw/rdmavt/cq.h int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
cqe                83 drivers/infiniband/sw/rdmavt/trace_cq.h 				     __field(unsigned int, cqe)
cqe                90 drivers/infiniband/sw/rdmavt/trace_cq.h 				   __entry->cqe = attr->cqe;
cqe                97 drivers/infiniband/sw/rdmavt/trace_cq.h 			      __entry->ip ? "true" : "false", __entry->cqe,
cqe               403 drivers/infiniband/sw/rxe/rxe_comp.c 			  struct rxe_cqe *cqe)
cqe               405 drivers/infiniband/sw/rxe/rxe_comp.c 	memset(cqe, 0, sizeof(*cqe));
cqe               408 drivers/infiniband/sw/rxe/rxe_comp.c 		struct ib_wc		*wc	= &cqe->ibwc;
cqe               419 drivers/infiniband/sw/rxe/rxe_comp.c 		struct ib_uverbs_wc	*uwc	= &cqe->uibwc;
cqe               443 drivers/infiniband/sw/rxe/rxe_comp.c 	struct rxe_cqe cqe;
cqe               448 drivers/infiniband/sw/rxe/rxe_comp.c 		make_send_cqe(qp, wqe, &cqe);
cqe               450 drivers/infiniband/sw/rxe/rxe_comp.c 		rxe_cq_post(qp->scq, &cqe, 0);
cqe                39 drivers/infiniband/sw/rxe/rxe_cq.c 		    int cqe, int comp_vector)
cqe                43 drivers/infiniband/sw/rxe/rxe_cq.c 	if (cqe <= 0) {
cqe                44 drivers/infiniband/sw/rxe/rxe_cq.c 		pr_warn("cqe(%d) <= 0\n", cqe);
cqe                48 drivers/infiniband/sw/rxe/rxe_cq.c 	if (cqe > rxe->attr.max_cqe) {
cqe                50 drivers/infiniband/sw/rxe/rxe_cq.c 			cqe, rxe->attr.max_cqe);
cqe                56 drivers/infiniband/sw/rxe/rxe_cq.c 		if (cqe < count) {
cqe                58 drivers/infiniband/sw/rxe/rxe_cq.c 				cqe, count);
cqe                84 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
cqe                90 drivers/infiniband/sw/rxe/rxe_cq.c 	cq->queue = rxe_queue_init(rxe, &cqe,
cqe               113 drivers/infiniband/sw/rxe/rxe_cq.c 	cq->ibcq.cqe = cqe;
cqe               117 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
cqe               123 drivers/infiniband/sw/rxe/rxe_cq.c 	err = rxe_queue_resize(cq->queue, (unsigned int *)&cqe,
cqe               127 drivers/infiniband/sw/rxe/rxe_cq.c 		cq->ibcq.cqe = cqe;
cqe               132 drivers/infiniband/sw/rxe/rxe_cq.c int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
cqe               151 drivers/infiniband/sw/rxe/rxe_cq.c 	memcpy(producer_addr(cq->queue), cqe, sizeof(*cqe));
cqe                53 drivers/infiniband/sw/rxe/rxe_loc.h 		    int cqe, int comp_vector);
cqe                55 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
cqe                63 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
cqe               840 drivers/infiniband/sw/rxe/rxe_resp.c 	struct rxe_cqe cqe;
cqe               841 drivers/infiniband/sw/rxe/rxe_resp.c 	struct ib_wc *wc = &cqe.ibwc;
cqe               842 drivers/infiniband/sw/rxe/rxe_resp.c 	struct ib_uverbs_wc *uwc = &cqe.uibwc;
cqe               849 drivers/infiniband/sw/rxe/rxe_resp.c 	memset(&cqe, 0, sizeof(cqe));
cqe               944 drivers/infiniband/sw/rxe/rxe_resp.c 	if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
cqe               799 drivers/infiniband/sw/rxe/rxe_verbs.c 	err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
cqe               803 drivers/infiniband/sw/rxe/rxe_verbs.c 	err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
cqe               820 drivers/infiniband/sw/rxe/rxe_verbs.c static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
cqe               833 drivers/infiniband/sw/rxe/rxe_verbs.c 	err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
cqe               837 drivers/infiniband/sw/rxe/rxe_verbs.c 	err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
cqe               851 drivers/infiniband/sw/rxe/rxe_verbs.c 	struct rxe_cqe *cqe;
cqe               856 drivers/infiniband/sw/rxe/rxe_verbs.c 		cqe = queue_head(cq->queue);
cqe               857 drivers/infiniband/sw/rxe/rxe_verbs.c 		if (!cqe)
cqe               860 drivers/infiniband/sw/rxe/rxe_verbs.c 		memcpy(wc++, &cqe->ibwc, sizeof(*wc));
cqe                50 drivers/infiniband/sw/siw/siw_cq.c 	struct siw_cqe *cqe;
cqe                55 drivers/infiniband/sw/siw/siw_cq.c 	cqe = &cq->queue[cq->cq_get % cq->num_cqe];
cqe                56 drivers/infiniband/sw/siw/siw_cq.c 	if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
cqe                58 drivers/infiniband/sw/siw/siw_cq.c 		wc->wr_id = cqe->id;
cqe                59 drivers/infiniband/sw/siw/siw_cq.c 		wc->status = map_cqe_status[cqe->status].ib;
cqe                60 drivers/infiniband/sw/siw/siw_cq.c 		wc->opcode = map_wc_opcode[cqe->opcode];
cqe                61 drivers/infiniband/sw/siw/siw_cq.c 		wc->byte_len = cqe->bytes;
cqe                69 drivers/infiniband/sw/siw/siw_cq.c 			if (cqe->flags & SIW_WQE_REM_INVAL) {
cqe                70 drivers/infiniband/sw/siw/siw_cq.c 				wc->ex.invalidate_rkey = cqe->inval_stag;
cqe                73 drivers/infiniband/sw/siw/siw_cq.c 			wc->qp = cqe->base_qp;
cqe                76 drivers/infiniband/sw/siw/siw_cq.c 				   cq->cq_get % cq->num_cqe, cqe->opcode,
cqe                77 drivers/infiniband/sw/siw/siw_cq.c 				   cqe->flags, (void *)(uintptr_t)cqe->id);
cqe                79 drivers/infiniband/sw/siw/siw_cq.c 		WRITE_ONCE(cqe->flags, 0);
cqe              1055 drivers/infiniband/sw/siw/siw_qp.c 		struct siw_cqe *cqe;
cqe              1062 drivers/infiniband/sw/siw/siw_qp.c 		cqe = &cq->queue[idx];
cqe              1064 drivers/infiniband/sw/siw/siw_qp.c 		if (!READ_ONCE(cqe->flags)) {
cqe              1067 drivers/infiniband/sw/siw/siw_qp.c 			cqe->id = sqe->id;
cqe              1068 drivers/infiniband/sw/siw/siw_qp.c 			cqe->opcode = sqe->opcode;
cqe              1069 drivers/infiniband/sw/siw/siw_qp.c 			cqe->status = status;
cqe              1070 drivers/infiniband/sw/siw/siw_qp.c 			cqe->imm_data = 0;
cqe              1071 drivers/infiniband/sw/siw/siw_qp.c 			cqe->bytes = bytes;
cqe              1074 drivers/infiniband/sw/siw/siw_qp.c 				cqe->base_qp = qp->ib_qp;
cqe              1076 drivers/infiniband/sw/siw/siw_qp.c 				cqe->qp_id = qp_id(qp);
cqe              1079 drivers/infiniband/sw/siw/siw_qp.c 			WRITE_ONCE(cqe->flags, SIW_WQE_VALID);
cqe              1112 drivers/infiniband/sw/siw/siw_qp.c 		struct siw_cqe *cqe;
cqe              1119 drivers/infiniband/sw/siw/siw_qp.c 		cqe = &cq->queue[idx];
cqe              1121 drivers/infiniband/sw/siw/siw_qp.c 		if (!READ_ONCE(cqe->flags)) {
cqe              1125 drivers/infiniband/sw/siw/siw_qp.c 			cqe->id = rqe->id;
cqe              1126 drivers/infiniband/sw/siw/siw_qp.c 			cqe->opcode = SIW_OP_RECEIVE;
cqe              1127 drivers/infiniband/sw/siw/siw_qp.c 			cqe->status = status;
cqe              1128 drivers/infiniband/sw/siw/siw_qp.c 			cqe->imm_data = 0;
cqe              1129 drivers/infiniband/sw/siw/siw_qp.c 			cqe->bytes = bytes;
cqe              1132 drivers/infiniband/sw/siw/siw_qp.c 				cqe->base_qp = qp->ib_qp;
cqe              1135 drivers/infiniband/sw/siw/siw_qp.c 					cqe->inval_stag = inval_stag;
cqe              1138 drivers/infiniband/sw/siw/siw_qp.c 				cqe->qp_id = qp_id(qp);
cqe              1141 drivers/infiniband/sw/siw/siw_qp.c 			WRITE_ONCE(cqe->flags, cqe_flags);
cqe              1116 drivers/infiniband/sw/siw/siw_verbs.c 	int rv, size = attr->cqe;
cqe              1129 drivers/infiniband/sw/siw/siw_verbs.c 	cq->base_cq.cqe = size;
cqe               176 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	cq_attr.cqe = size;
cqe               185 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	cq_attr.cqe = ipoib_sendq_size;
cqe               252 drivers/infiniband/ulp/iser/iscsi_iser.h 	struct ib_cqe		     cqe;
cqe               278 drivers/infiniband/ulp/iser/iscsi_iser.h 	struct ib_cqe		     cqe;
cqe               298 drivers/infiniband/ulp/iser/iscsi_iser.h 	struct ib_cqe		     cqe;
cqe               656 drivers/infiniband/ulp/iser/iscsi_iser.h iser_rx(struct ib_cqe *cqe)
cqe               658 drivers/infiniband/ulp/iser/iscsi_iser.h 	return container_of(cqe, struct iser_rx_desc, cqe);
cqe               662 drivers/infiniband/ulp/iser/iscsi_iser.h iser_tx(struct ib_cqe *cqe)
cqe               664 drivers/infiniband/ulp/iser/iscsi_iser.h 	return container_of(cqe, struct iser_tx_desc, cqe);
cqe               668 drivers/infiniband/ulp/iser/iscsi_iser.h iser_login(struct ib_cqe *cqe)
cqe               670 drivers/infiniband/ulp/iser/iscsi_iser.h 	return container_of(cqe, struct iser_login_desc, cqe);
cqe               276 drivers/infiniband/ulp/iser/iser_initiator.c 		rx_desc->cqe.done = iser_task_rsp;
cqe               379 drivers/infiniband/ulp/iser/iser_initiator.c 	tx_desc->cqe.done = iser_cmd_comp;
cqe               459 drivers/infiniband/ulp/iser/iser_initiator.c 	tx_desc->cqe.done = iser_dataout_comp;
cqe               508 drivers/infiniband/ulp/iser/iser_initiator.c 	mdesc->cqe.done = iser_ctrl_comp;
cqe               368 drivers/infiniband/ulp/iser/iser_memory.c 	      struct ib_cqe *cqe,
cqe               372 drivers/infiniband/ulp/iser/iser_memory.c 	inv_wr->wr_cqe = cqe;
cqe               387 drivers/infiniband/ulp/iser/iser_memory.c 	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
cqe               401 drivers/infiniband/ulp/iser/iser_memory.c 		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
cqe               416 drivers/infiniband/ulp/iser/iser_memory.c 	wr->wr.wr_cqe = cqe;
cqe               444 drivers/infiniband/ulp/iser/iser_memory.c 	struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
cqe               450 drivers/infiniband/ulp/iser/iser_memory.c 		iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
cqe               463 drivers/infiniband/ulp/iser/iser_memory.c 	wr->wr.wr_cqe = cqe;
cqe               983 drivers/infiniband/ulp/iser/iser_verbs.c 	desc->cqe.done = iser_login_rsp;
cqe               984 drivers/infiniband/ulp/iser/iser_verbs.c 	wr.wr_cqe = &desc->cqe;
cqe              1009 drivers/infiniband/ulp/iser/iser_verbs.c 		rx_desc->cqe.done = iser_task_rsp;
cqe              1010 drivers/infiniband/ulp/iser/iser_verbs.c 		wr->wr_cqe = &rx_desc->cqe;
cqe              1049 drivers/infiniband/ulp/iser/iser_verbs.c 	wr->wr_cqe = &tx_desc->cqe;
cqe              2119 drivers/infiniband/ulp/isert/ib_isert.c 		struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
cqe              2167 drivers/infiniband/ulp/isert/ib_isert.c 	ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
cqe              2179 drivers/infiniband/ulp/isert/ib_isert.c 	struct ib_cqe *cqe = NULL;
cqe              2188 drivers/infiniband/ulp/isert/ib_isert.c 		cqe = &isert_cmd->tx_desc.tx_cqe;
cqe              2210 drivers/infiniband/ulp/isert/ib_isert.c 	rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
cqe                93 drivers/infiniband/ulp/isert/ib_isert.h static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
cqe                95 drivers/infiniband/ulp/isert/ib_isert.h 	return container_of(cqe, struct iser_rx_desc, rx_cqe);
cqe               109 drivers/infiniband/ulp/isert/ib_isert.h static inline struct iser_tx_desc *cqe_to_tx_desc(struct ib_cqe *cqe)
cqe               111 drivers/infiniband/ulp/isert/ib_isert.h 	return container_of(cqe, struct iser_tx_desc, tx_cqe);
cqe              2052 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
cqe              2083 drivers/infiniband/ulp/srp/ib_srp.c 	iu->cqe.done = srp_send_done;
cqe              2086 drivers/infiniband/ulp/srp/ib_srp.c 	wr.wr_cqe     = &iu->cqe;
cqe              2105 drivers/infiniband/ulp/srp/ib_srp.c 	iu->cqe.done = srp_recv_done;
cqe              2108 drivers/infiniband/ulp/srp/ib_srp.c 	wr.wr_cqe   = &iu->cqe;
cqe              2250 drivers/infiniband/ulp/srp/ib_srp.c 	struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
cqe               278 drivers/infiniband/ulp/srp/ib_srp.h 	struct ib_cqe		cqe;
cqe               832 drivers/infiniband/ulp/srpt/ib_srpt.c 	ioctx->ioctx.cqe.done = srpt_recv_done;
cqe               833 drivers/infiniband/ulp/srpt/ib_srpt.c 	wr.wr_cqe = &ioctx->ioctx.cqe;
cqe              1688 drivers/infiniband/ulp/srpt/ib_srpt.c 		container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
cqe              1749 drivers/infiniband/ulp/srpt/ib_srpt.c 		container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
cqe              1864 drivers/infiniband/ulp/srpt/ib_srpt.c 		 __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
cqe              2730 drivers/infiniband/ulp/srpt/ib_srpt.c 	struct ib_cqe *cqe = &ioctx->rdma_cqe;
cqe              2750 drivers/infiniband/ulp/srpt/ib_srpt.c 	cqe->done = srpt_rdma_read_done;
cqe              2755 drivers/infiniband/ulp/srpt/ib_srpt.c 				cqe, first_wr);
cqe              2756 drivers/infiniband/ulp/srpt/ib_srpt.c 		cqe = NULL;
cqe              2862 drivers/infiniband/ulp/srpt/ib_srpt.c 	ioctx->ioctx.cqe.done = srpt_send_done;
cqe              2864 drivers/infiniband/ulp/srpt/ib_srpt.c 	send_wr.wr_cqe = &ioctx->ioctx.cqe;
cqe               175 drivers/infiniband/ulp/srpt/ib_srpt.h 	struct ib_cqe		cqe;
cqe               265 drivers/mmc/core/core.c 			     bool cqe)
cqe               275 drivers/mmc/core/core.c 			 mmc_hostname(host), cqe ? "CQE direct " : "",
cqe               277 drivers/mmc/core/core.c 	} else if (cqe) {
cqe               459 drivers/net/ethernet/amazon/ena/ena_com.c 						   struct ena_admin_acq_entry *cqe)
cqe               464 drivers/net/ethernet/amazon/ena/ena_com.c 	cmd_id = cqe->acq_common_descriptor.command &
cqe               475 drivers/net/ethernet/amazon/ena/ena_com.c 	comp_ctx->comp_status = cqe->acq_common_descriptor.status;
cqe               478 drivers/net/ethernet/amazon/ena/ena_com.c 		memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
cqe               486 drivers/net/ethernet/amazon/ena/ena_com.c 	struct ena_admin_acq_entry *cqe = NULL;
cqe               494 drivers/net/ethernet/amazon/ena/ena_com.c 	cqe = &admin_queue->cq.entries[head_masked];
cqe               497 drivers/net/ethernet/amazon/ena/ena_com.c 	while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
cqe               503 drivers/net/ethernet/amazon/ena/ena_com.c 		ena_com_handle_single_admin_completion(admin_queue, cqe);
cqe               512 drivers/net/ethernet/amazon/ena/ena_com.c 		cqe = &admin_queue->cq.entries[head_masked];
cqe               809 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h #define BNX2X_RX_SUM_FIX(cqe) \
cqe               810 drivers/net/ethernet/broadcom/bnx2x/bnx2x.h 	BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
cqe               356 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					 struct eth_end_agg_rx_cqe *cqe)
cqe               369 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
cqe               372 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
cqe               377 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
cqe               411 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			    const struct eth_fast_path_rx_cqe *cqe,
cqe               416 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
cqe               419 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
cqe               424 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		return le32_to_cpu(cqe->rss_hash_result);
cqe               432 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			    struct eth_fast_path_rx_cqe *cqe)
cqe               475 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		le16_to_cpu(cqe->pars_flags.flags);
cqe               476 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
cqe               478 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
cqe               479 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tpa_info->placement_offset = cqe->placement_offset;
cqe               480 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
cqe               482 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
cqe               589 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			       struct eth_end_agg_rx_cqe *cqe,
cqe               598 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
cqe               608 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				     le16_to_cpu(cqe->pkt_len),
cqe               609 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				     le16_to_cpu(cqe->num_of_coalesced_segs));
cqe               615 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
cqe               623 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
cqe               756 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			   struct eth_end_agg_rx_cqe *cqe,
cqe               802 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 					 skb, cqe, cqe_idx)) {
cqe               857 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
cqe               866 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (cqe->fast_path_cqe.status_flags &
cqe               872 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	if (cqe->fast_path_cqe.type_error_flags &
cqe               886 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	union eth_rx_cqe *cqe;
cqe               903 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe               904 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 	cqe_fp = &cqe->fast_path_cqe;
cqe               952 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_sp_event(fp, cqe);
cqe               983 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			queue = cqe->end_agg_cqe.queue_index;
cqe               989 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
cqe              1000 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 				       &cqe->end_agg_cqe, comp_ring_cons);
cqe              1006 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
cqe              1076 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 			bnx2x_csum_validate(skb, cqe, fp,
cqe              1082 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		if (unlikely(cqe->fast_path_cqe.type_error_flags &
cqe              1110 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe              1111 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c 		cqe_fp = &cqe->fast_path_cqe;
cqe               781 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	union eth_rx_cqe *cqe;
cqe               785 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	cqe = &fp->rx_comp_ring[cons];
cqe               786 drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h 	cqe_fp = &cqe->fast_path_cqe;
cqe              2506 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	union eth_rx_cqe *cqe;
cqe              2658 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
cqe              2659 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
cqe              2664 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
cqe              2672 drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c 	data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
cqe              1130 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
cqe              1133 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c 				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
cqe              1617 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 				   union event_ring_elem *cqe,
cqe              1637 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c 	if (cqe->message.error)
cqe               438 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h 			union event_ring_elem *cqe,
cqe              2917 drivers/net/ethernet/broadcom/cnic.c 	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
cqe              2933 drivers/net/ethernet/broadcom/cnic.c 		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
cqe              2934 drivers/net/ethernet/broadcom/cnic.c 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
cqe              2936 drivers/net/ethernet/broadcom/cnic.c 			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
cqe               319 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe_size = wq->q_depth * sizeof(*rq->cqe);
cqe               320 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	rq->cqe = vzalloc(cqe_size);
cqe               321 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	if (!rq->cqe)
cqe               330 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		rq->cqe[i] = dma_alloc_coherent(&pdev->dev,
cqe               331 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 						sizeof(*rq->cqe[i]),
cqe               333 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		if (!rq->cqe[i])
cqe               341 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[j]), rq->cqe[j],
cqe               347 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->cqe);
cqe               363 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 		dma_free_coherent(&pdev->dev, sizeof(*rq->cqe[i]), rq->cqe[i],
cqe               367 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	vfree(rq->cqe);
cqe               840 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe;
cqe               848 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe = rq->cqe[*cons_idx];
cqe               850 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	status = be32_to_cpu(cqe->status);
cqe               900 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
cqe               901 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	u32 status = be32_to_cpu(cqe->status);
cqe               906 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	cqe->status = cpu_to_be32(status);
cqe               923 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe = rq->cqe[cons_idx];
cqe               924 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	u32 len = be32_to_cpu(cqe->len);
cqe               943 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	struct hinic_rq_cqe *cqe = rq->cqe[prod_idx];
cqe               955 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c 	hinic_set_sge(&cqe_sect->sge, cqe_dma, sizeof(*cqe));
cqe               105 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h 	struct hinic_rq_cqe     **cqe;
cqe                45 drivers/net/ethernet/huawei/hinic/hinic_rx.c #define LRO_PKT_HDR_LEN(cqe)		\
cqe                46 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	(HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
cqe               334 drivers/net/ethernet/huawei/hinic/hinic_rx.c 	struct hinic_rq_cqe *cqe;
cqe               356 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		cqe = rq->cqe[ci];
cqe               357 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		status =  be32_to_cpu(cqe->status);
cqe               379 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		offload_type = be32_to_cpu(cqe->offload_type);
cqe               380 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		vlan_len = be32_to_cpu(cqe->len);
cqe               398 drivers/net/ethernet/huawei/hinic/hinic_rx.c 				     LRO_PKT_HDR_LEN(cqe));
cqe               405 drivers/net/ethernet/huawei/hinic/hinic_rx.c 		cqe->status = 0;
cqe               528 drivers/net/ethernet/ibm/ehea/ehea_main.c static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
cqe               530 drivers/net/ethernet/ibm/ehea/ehea_main.c 	*rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
cqe               531 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
cqe               533 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
cqe               534 drivers/net/ethernet/ibm/ehea/ehea_main.c 	    (cqe->header_length == 0))
cqe               540 drivers/net/ethernet/ibm/ehea/ehea_main.c 				 struct sk_buff *skb, struct ehea_cqe *cqe,
cqe               543 drivers/net/ethernet/ibm/ehea/ehea_main.c 	int length = cqe->num_bytes_transfered - 4;	/*remove CRC */
cqe               550 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
cqe               552 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb->csum = csum_unfold(~cqe->inet_checksum_value);
cqe               561 drivers/net/ethernet/ibm/ehea/ehea_main.c 					       struct ehea_cqe *cqe)
cqe               563 drivers/net/ethernet/ibm/ehea/ehea_main.c 	int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
cqe               614 drivers/net/ethernet/ibm/ehea/ehea_main.c 				 struct ehea_cqe *cqe, int *processed_rq2,
cqe               619 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
cqe               621 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (cqe->status & EHEA_CQE_STAT_ERR_IP)
cqe               623 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
cqe               628 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
cqe               632 drivers/net/ethernet/ibm/ehea/ehea_main.c 		skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
cqe               636 drivers/net/ethernet/ibm/ehea/ehea_main.c 	if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
cqe               640 drivers/net/ethernet/ibm/ehea/ehea_main.c 			ehea_dump(cqe, sizeof(*cqe), "CQE");
cqe               655 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct ehea_cqe *cqe;
cqe               670 drivers/net/ethernet/ibm/ehea/ehea_main.c 	cqe = ehea_poll_rq1(qp, &wqe_index);
cqe               671 drivers/net/ethernet/ibm/ehea/ehea_main.c 	while ((processed < budget) && cqe) {
cqe               676 drivers/net/ethernet/ibm/ehea/ehea_main.c 			ehea_dump(cqe, sizeof(*cqe), "CQE");
cqe               680 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (!ehea_check_cqe(cqe, &rq)) {
cqe               695 drivers/net/ethernet/ibm/ehea/ehea_main.c 				skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
cqe               696 drivers/net/ethernet/ibm/ehea/ehea_main.c 						 cqe->num_bytes_transfered - 4);
cqe               697 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_fill_skb(dev, skb, cqe, pr);
cqe               701 drivers/net/ethernet/ibm/ehea/ehea_main.c 						       skb_arr_rq2_len, cqe);
cqe               707 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_fill_skb(dev, skb, cqe, pr);
cqe               712 drivers/net/ethernet/ibm/ehea/ehea_main.c 						       skb_arr_rq3_len, cqe);
cqe               718 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_fill_skb(dev, skb, cqe, pr);
cqe               724 drivers/net/ethernet/ibm/ehea/ehea_main.c 			if (cqe->status & EHEA_CQE_VLAN_TAG_XTRACT)
cqe               726 drivers/net/ethernet/ibm/ehea/ehea_main.c 						       cqe->vlan_tag);
cqe               731 drivers/net/ethernet/ibm/ehea/ehea_main.c 			port_reset = ehea_treat_poll_error(pr, rq, cqe,
cqe               737 drivers/net/ethernet/ibm/ehea/ehea_main.c 		cqe = ehea_poll_rq1(qp, &wqe_index);
cqe               801 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct ehea_cqe *cqe;
cqe               809 drivers/net/ethernet/ibm/ehea/ehea_main.c 	cqe = ehea_poll_cq(send_cq);
cqe               810 drivers/net/ethernet/ibm/ehea/ehea_main.c 	while (cqe && (quota > 0)) {
cqe               816 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (cqe->wr_id == SWQE_RESTART_CHECK) {
cqe               822 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
cqe               824 drivers/net/ethernet/ibm/ehea/ehea_main.c 			       cqe->status);
cqe               827 drivers/net/ethernet/ibm/ehea/ehea_main.c 				ehea_dump(cqe, sizeof(*cqe), "Send CQE");
cqe               829 drivers/net/ethernet/ibm/ehea/ehea_main.c 			if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
cqe               837 drivers/net/ethernet/ibm/ehea/ehea_main.c 			ehea_dump(cqe, sizeof(*cqe), "CQE");
cqe               839 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
cqe               842 drivers/net/ethernet/ibm/ehea/ehea_main.c 			index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
cqe               848 drivers/net/ethernet/ibm/ehea/ehea_main.c 		swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
cqe               851 drivers/net/ethernet/ibm/ehea/ehea_main.c 		cqe = ehea_poll_cq(send_cq);
cqe               868 drivers/net/ethernet/ibm/ehea/ehea_main.c 	return cqe;
cqe               878 drivers/net/ethernet/ibm/ehea/ehea_main.c 	struct ehea_cqe *cqe;
cqe               893 drivers/net/ethernet/ibm/ehea/ehea_main.c 		cqe = ehea_poll_rq1(pr->qp, &wqe_index);
cqe               896 drivers/net/ethernet/ibm/ehea/ehea_main.c 		if (!cqe && !cqe_skb)
cqe               364 drivers/net/ethernet/ibm/ehea/ehea_qmr.h struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
cqe                50 drivers/net/ethernet/mellanox/mlx4/en_clock.c u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe)
cqe                53 drivers/net/ethernet/mellanox/mlx4/en_clock.c 	struct mlx4_ts_cqe *ts_cqe = (struct mlx4_ts_cqe *)cqe;
cqe              1667 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			struct mlx4_cqe *cqe = NULL;
cqe              1669 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) +
cqe              1671 drivers/net/ethernet/mellanox/mlx4/en_netdev.c 			cqe->owner_sr_opcode = MLX4_CQE_OWNER_MASK;
cqe               625 drivers/net/ethernet/mellanox/mlx4/en_rx.c static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
cqe               643 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
cqe               645 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
cqe               652 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
cqe               672 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	struct mlx4_cqe *cqe;
cqe               692 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
cqe               695 drivers/net/ethernet/mellanox/mlx4/en_rx.c 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cqe               714 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
cqe               717 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
cqe               718 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			       ((struct mlx4_err_cqe *)cqe)->syndrome);
cqe               721 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
cqe               763 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		length = be32_to_cpu(cqe->byte_cnt);
cqe               827 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			u64 timestamp = mlx4_en_get_cqe_ts(cqe);
cqe               840 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
cqe               842 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
cqe               843 drivers/net/ethernet/mellanox/mlx4/en_rx.c 			    cqe->checksum == cpu_to_be16(0xffff)) {
cqe               847 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
cqe               855 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				      (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
cqe               857 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				if (check_csum(cqe, skb, va, dev->features))
cqe               872 drivers/net/ethernet/mellanox/mlx4/en_rx.c 				     be32_to_cpu(cqe->immed_rss_invalid),
cqe               875 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		if ((cqe->vlan_my_qpn &
cqe               879 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					       be16_to_cpu(cqe->sl_vid));
cqe               880 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		else if ((cqe->vlan_my_qpn &
cqe               884 drivers/net/ethernet/mellanox/mlx4/en_rx.c 					       be16_to_cpu(cqe->sl_vid));
cqe               899 drivers/net/ethernet/mellanox/mlx4/en_rx.c 		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
cqe               401 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	struct mlx4_cqe *cqe;
cqe               423 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
cqe               430 drivers/net/ethernet/mellanox/mlx4/en_tx.c 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
cqe               440 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
cqe               442 drivers/net/ethernet/mellanox/mlx4/en_tx.c 			struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
cqe               450 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
cqe               459 drivers/net/ethernet/mellanox/mlx4/en_tx.c 				timestamp = mlx4_en_get_cqe_ts(cqe);
cqe               479 drivers/net/ethernet/mellanox/mlx4/en_tx.c 		cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor;
cqe               803 drivers/net/ethernet/mellanox/mlx4/mlx4_en.h u64 mlx4_en_get_cqe_ts(struct mlx4_cqe *cqe);
cqe               949 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
cqe               950 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
cqe               963 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
cqe               966 drivers/net/ethernet/mellanox/mlx5/core/en.h mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
cqe                 9 drivers/net/ethernet/mellanox/mlx5/core/en/health.h #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
cqe               390 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	struct mlx5_cqe64 *cqe;
cqe               400 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
cqe               401 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	if (!cqe)
cqe               416 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
cqe               418 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 		if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ))
cqe               421 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 					 get_cqe_opcode(cqe));
cqe               435 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
cqe               152 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 					      struct mlx5_cqe64 *cqe,
cqe               176 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 	if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
cqe                24 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h 					      struct mlx5_cqe64 *cqe,
cqe                46 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
cqe              1563 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe              1565 drivers/net/ethernet/mellanox/mlx5/core/en_main.c 		cqe->op_own = 0xf1;
cqe               192 drivers/net/ethernet/mellanox/mlx5/core/en_rep.h void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
cqe                96 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe                98 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		cqe->op_own = op_own;
cqe               104 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe               106 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			cqe->op_own = op_own;
cqe               593 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_cqe64 *cqe;
cqe               600 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
cqe               601 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (likely(!cqe))
cqe               616 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
cqe               618 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
cqe               620 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
cqe               644 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
cqe               711 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
cqe               713 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
cqe               718 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
cqe               722 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		tcp->ack_seq            = cqe->lro_ack_seq_num;
cqe               723 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		tcp->window             = cqe->lro_tcp_win;
cqe               727 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
cqe               749 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		ipv4->ttl               = cqe->lro_min_ttl;
cqe               755 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
cqe               757 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				     csum_unfold((__force __sum16)cqe->check_sum));
cqe               769 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		ipv6->hop_limit         = cqe->lro_min_ttl;
cqe               772 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
cqe               774 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				     csum_unfold((__force __sum16)cqe->check_sum));
cqe               781 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
cqe               784 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u8 cht = cqe->rss_hash_type;
cqe               788 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
cqe               907 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				     struct mlx5_cqe64 *cqe,
cqe               946 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
cqe               957 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
cqe               958 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
cqe               960 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (cqe_is_tunneled(cqe)) {
cqe               976 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
cqe               981 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
cqe               992 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
cqe              1004 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
cqe              1009 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_skb_set_hash(cqe, skb);
cqe              1011 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (cqe_has_vlan(cqe)) {
cqe              1013 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				       be16_to_cpu(cqe->vlan_info));
cqe              1017 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
cqe              1019 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
cqe              1021 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
cqe              1028 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 struct mlx5_cqe64 *cqe,
cqe              1036 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
cqe              1058 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
cqe              1094 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
cqe              1137 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe              1139 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
cqe              1146 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe              1154 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
cqe              1156 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
cqe              1158 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
cqe              1159 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		trigger_report(rq, cqe);
cqe              1167 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, cqe, wi, cqe_bcnt);
cqe              1179 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
cqe              1189 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe              1201 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
cqe              1203 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
cqe              1205 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
cqe              1210 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt);
cqe              1222 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
cqe              1328 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe              1330 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
cqe              1331 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
cqe              1333 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
cqe              1344 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
cqe              1345 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		trigger_report(rq, cqe);
cqe              1350 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
cqe              1358 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
cqe              1367 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
cqe              1377 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
cqe              1384 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_cqe64 *cqe;
cqe              1396 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe = mlx5_cqwq_get_cqe(cqwq);
cqe              1397 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (!cqe) {
cqe              1404 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
cqe              1414 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5e_handle_rx_cqe, rq, cqe);
cqe              1415 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
cqe              1435 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 					 struct mlx5_cqe64 *cqe,
cqe              1448 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
cqe              1465 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
cqe              1484 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
cqe              1493 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 				mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
cqe              1498 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 		mlx5e_skb_set_hash(cqe, skb);
cqe              1512 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe              1520 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
cqe              1522 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
cqe              1524 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
cqe              1532 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, cqe, wi, cqe_bcnt);
cqe              1536 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
cqe              1552 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c void mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
cqe              1560 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
cqe              1562 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
cqe              1564 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
cqe              1572 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 			      rq, cqe, wi, cqe_bcnt);
cqe              1580 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
cqe               423 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	struct mlx5_cqe64 *cqe;
cqe               435 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
cqe               436 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	if (!cqe)
cqe               459 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
cqe               461 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
cqe               465 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 						     (struct mlx5_err_cqe *)cqe);
cqe               496 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 								  get_cqe_ts(cqe));
cqe               513 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
cqe               251 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 				  struct mlx5_cqe64 *cqe, u8 status)
cqe               256 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.rq.size - 1);
cqe               276 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	buf->sg[0].size = be32_to_cpu(cqe->byte_cnt);
cqe               291 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 				  struct mlx5_cqe64 *cqe, u8 status)
cqe               299 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1);
cqe               333 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 				      struct mlx5_cqe64 *cqe)
cqe               337 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	opcode = get_cqe_opcode(cqe);
cqe               341 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		status = ((struct mlx5_err_cqe *)cqe)->syndrome;
cqe               344 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		mlx5_fpga_conn_sq_cqe(conn, cqe, status);
cqe               348 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		status = ((struct mlx5_err_cqe *)cqe)->syndrome;
cqe               351 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		mlx5_fpga_conn_rq_cqe(conn, cqe, status);
cqe               385 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_cqe64 *cqe;
cqe               388 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		cqe = mlx5_cqwq_get_cqe(&conn->cq.wq);
cqe               389 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		if (!cqe)
cqe               394 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		mlx5_fpga_conn_handle_cqe(conn, cqe);
cqe               435 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 	struct mlx5_cqe64 *cqe;
cqe               454 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i);
cqe               455 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c 		cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
cqe               126 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
cqe               705 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	struct mlx5_cqe64 *cqe;
cqe               730 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
cqe               731 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
cqe               185 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix);
cqe               188 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	cqe += wq->fbc.log_stride == 7;
cqe               190 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	return cqe;
cqe               216 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
cqe               217 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
cqe               226 drivers/net/ethernet/mellanox/mlx5/core/wq.h 	return cqe;
cqe               507 drivers/net/ethernet/mellanox/mlxsw/pci.c 				     char *cqe)
cqe               543 drivers/net/ethernet/mellanox/mlxsw/pci.c 				     enum mlxsw_pci_cqe_v cqe_v, char *cqe)
cqe               563 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
cqe               565 drivers/net/ethernet/mellanox/mlxsw/pci.c 		rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
cqe               567 drivers/net/ethernet/mellanox/mlxsw/pci.c 			mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
cqe               570 drivers/net/ethernet/mellanox/mlxsw/pci.c 		rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
cqe               573 drivers/net/ethernet/mellanox/mlxsw/pci.c 	rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
cqe               575 drivers/net/ethernet/mellanox/mlxsw/pci.c 	byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
cqe               576 drivers/net/ethernet/mellanox/mlxsw/pci.c 	if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
cqe               611 drivers/net/ethernet/mellanox/mlxsw/pci.c 	char *cqe;
cqe               615 drivers/net/ethernet/mellanox/mlxsw/pci.c 	while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
cqe               616 drivers/net/ethernet/mellanox/mlxsw/pci.c 		u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
cqe               617 drivers/net/ethernet/mellanox/mlxsw/pci.c 		u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
cqe               618 drivers/net/ethernet/mellanox/mlxsw/pci.c 		u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
cqe               621 drivers/net/ethernet/mellanox/mlxsw/pci.c 		memcpy(ncqe, cqe, q->elem_size);
cqe               111 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe)	\
cqe               116 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 		return mlxsw_pci_cqe##v0##_##name##_get(cqe);			\
cqe               118 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 		return mlxsw_pci_cqe##v1##_##name##_get(cqe);			\
cqe               120 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 		return mlxsw_pci_cqe##v2##_##name##_get(cqe);			\
cqe               124 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 					      char *cqe, u32 val)		\
cqe               129 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 		mlxsw_pci_cqe##v0##_##name##_set(cqe, val);			\
cqe               132 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 		mlxsw_pci_cqe##v1##_##name##_set(cqe, val);			\
cqe               135 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h 		mlxsw_pci_cqe##v2##_##name##_set(cqe, val);			\
cqe               153 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
cqe               164 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, cqe, wqe_counter, 0x04, 16, 16);
cqe               171 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, cqe, byte_count, 0x04, 0, 14);
cqe               176 drivers/net/ethernet/mellanox/mlxsw/pci_hw.h MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
cqe              2879 drivers/net/ethernet/qlogic/qed/qed_l2.c 				 u8 rss_id, struct eth_slow_path_rx_cqe *cqe)
cqe              2882 drivers/net/ethernet/qlogic/qed/qed_l2.c 				      cqe);
cqe               524 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	union core_rx_cqe_union *cqe = NULL;
cqe               536 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		cqe =
cqe               544 drivers/net/ethernet/qlogic/qed/qed_ll2.c 			   cq_old_idx, cq_new_idx, cqe->rx_cqe_sp.type);
cqe               546 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		switch (cqe->rx_cqe_sp.type) {
cqe               549 drivers/net/ethernet/qlogic/qed/qed_ll2.c 						     cqe, &flags);
cqe               554 drivers/net/ethernet/qlogic/qed/qed_ll2.c 							   cqe, &flags,
cqe               638 drivers/net/ethernet/qlogic/qed/qed_ll2.c 	union core_rx_cqe_union *cqe = NULL;
cqe               653 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		cqe = qed_chain_consume(&p_rx->rcq_chain);
cqe               655 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		cqe_type = cqe->rx_cqe_sp.type;
cqe               659 drivers/net/ethernet/qlogic/qed/qed_ll2.c 							    &cqe->rx_cqe_sp))
cqe               668 drivers/net/ethernet/qlogic/qed/qed_ll2.c 		p_cqe_fp = &cqe->rx_cqe_fp;
cqe                69 drivers/net/ethernet/qlogic/qed/qed_sp.h 			   struct eth_slow_path_rx_cqe *cqe);
cqe               456 drivers/net/ethernet/qlogic/qed/qed_spq.c 			      struct eth_slow_path_rx_cqe *cqe,
cqe               466 drivers/net/ethernet/qlogic/qed/qed_spq.c 	return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
cqe               470 drivers/net/ethernet/qlogic/qed/qed_spq.c 			   struct eth_slow_path_rx_cqe *cqe)
cqe               474 drivers/net/ethernet/qlogic/qed/qed_spq.c 	rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
cqe               478 drivers/net/ethernet/qlogic/qed/qed_spq.c 			  cqe->ramrod_cmd_id);
cqe              1573 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 	union eth_rx_cqe *cqe;
cqe              1611 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
cqe              1616 drivers/net/ethernet/qlogic/qede/qede_ethtool.c 		fp_cqe = &cqe->fast_path_regular;
cqe               627 drivers/net/ethernet/qlogic/qede/qede_fp.c 				struct eth_fast_path_rx_tpa_start_cqe *cqe)
cqe               629 drivers/net/ethernet/qlogic/qede/qede_fp.c 	u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
cqe               637 drivers/net/ethernet/qlogic/qede/qede_fp.c 	skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
cqe               638 drivers/net/ethernet/qlogic/qede/qede_fp.c 				    cqe->header_len;
cqe               807 drivers/net/ethernet/qlogic/qede/qede_fp.c 			   struct eth_fast_path_rx_tpa_start_cqe *cqe)
cqe               809 drivers/net/ethernet/qlogic/qede/qede_fp.c 	struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
cqe               814 drivers/net/ethernet/qlogic/qede/qede_fp.c 	pad = cqe->placement_offset + rxq->rx_headroom;
cqe               817 drivers/net/ethernet/qlogic/qede/qede_fp.c 					      le16_to_cpu(cqe->len_on_first_bd),
cqe               838 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if ((le16_to_cpu(cqe->pars_flags.flags) >>
cqe               841 drivers/net/ethernet/qlogic/qede/qede_fp.c 		tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
cqe               845 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
cqe               848 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_set_gro_params(edev, tpa_info->skb, cqe);
cqe               851 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (likely(cqe->ext_bd_len_list[0]))
cqe               852 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
cqe               853 drivers/net/ethernet/qlogic/qede/qede_fp.c 				   le16_to_cpu(cqe->ext_bd_len_list[0]));
cqe               855 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(cqe->ext_bd_len_list[1])) {
cqe               933 drivers/net/ethernet/qlogic/qede/qede_fp.c 				 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
cqe               937 drivers/net/ethernet/qlogic/qede/qede_fp.c 	for (i = 0; cqe->len_list[i]; i++)
cqe               938 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
cqe               939 drivers/net/ethernet/qlogic/qede/qede_fp.c 				   le16_to_cpu(cqe->len_list[i]));
cqe               948 drivers/net/ethernet/qlogic/qede/qede_fp.c 			struct eth_fast_path_rx_tpa_end_cqe *cqe)
cqe               955 drivers/net/ethernet/qlogic/qede/qede_fp.c 	tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
cqe               962 drivers/net/ethernet/qlogic/qede/qede_fp.c 	for (i = 0; cqe->len_list[i]; i++)
cqe               963 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
cqe               964 drivers/net/ethernet/qlogic/qede/qede_fp.c 				   le16_to_cpu(cqe->len_list[i]));
cqe               973 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
cqe               976 drivers/net/ethernet/qlogic/qede/qede_fp.c 		       cqe->num_of_bds, tpa_info->frag_id);
cqe               977 drivers/net/ethernet/qlogic/qede/qede_fp.c 	if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
cqe               980 drivers/net/ethernet/qlogic/qede/qede_fp.c 		       le16_to_cpu(cqe->total_packet_len), skb->len);
cqe               989 drivers/net/ethernet/qlogic/qede/qede_fp.c 	NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
cqe              1038 drivers/net/ethernet/qlogic/qede/qede_fp.c static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
cqe              1041 drivers/net/ethernet/qlogic/qede/qede_fp.c 	u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
cqe              1058 drivers/net/ethernet/qlogic/qede/qede_fp.c 			struct eth_fast_path_rx_reg_cqe *cqe,
cqe              1118 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
cqe              1127 drivers/net/ethernet/qlogic/qede/qede_fp.c 			       struct eth_fast_path_rx_reg_cqe *cqe,
cqe              1130 drivers/net/ethernet/qlogic/qede/qede_fp.c 	u16 pkt_len = le16_to_cpu(cqe->pkt_len);
cqe              1138 drivers/net/ethernet/qlogic/qede/qede_fp.c 	for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
cqe              1184 drivers/net/ethernet/qlogic/qede/qede_fp.c 				   union eth_rx_cqe *cqe,
cqe              1189 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
cqe              1192 drivers/net/ethernet/qlogic/qede/qede_fp.c 		qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
cqe              1195 drivers/net/ethernet/qlogic/qede/qede_fp.c 		return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
cqe              1209 drivers/net/ethernet/qlogic/qede/qede_fp.c 	union eth_rx_cqe *cqe;
cqe              1216 drivers/net/ethernet/qlogic/qede/qede_fp.c 	cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
cqe              1217 drivers/net/ethernet/qlogic/qede/qede_fp.c 	cqe_type = cqe->fast_path_regular.type;
cqe              1223 drivers/net/ethernet/qlogic/qede/qede_fp.c 		sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
cqe              1230 drivers/net/ethernet/qlogic/qede/qede_fp.c 		return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
cqe              1238 drivers/net/ethernet/qlogic/qede/qede_fp.c 	fp_cqe = &cqe->fast_path_regular;
cqe              1249 drivers/net/ethernet/qlogic/qede/qede_fp.c 	flags = cqe->fast_path_regular.pars_flags.flags;
cqe              1289 drivers/net/ethernet/qlogic/qede/qede_fp.c 	qede_ptp_record_rx_ts(edev, cqe, skb);
cqe                48 drivers/net/ethernet/qlogic/qede/qede_ptp.h 					 union eth_rx_cqe *cqe,
cqe                52 drivers/net/ethernet/qlogic/qede/qede_ptp.h 	if (unlikely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags) &
cqe                54 drivers/net/ethernet/qlogic/qede/qede_ptp.h 		if (likely(le16_to_cpu(cqe->fast_path_regular.pars_flags.flags)
cqe              1561 drivers/nvme/host/fc.c 	struct nvme_completion *cqe = &op->rsp_iu.cqe;
cqe              1666 drivers/nvme/host/fc.c 			     sqe->common.command_id != cqe->command_id)) {
cqe              1677 drivers/nvme/host/fc.c 				cqe->command_id);
cqe              1680 drivers/nvme/host/fc.c 		result = cqe->result;
cqe              1681 drivers/nvme/host/fc.c 		status = cqe->status;
cqe               949 drivers/nvme/host/pci.c 	volatile struct nvme_completion *cqe = &nvmeq->cqes[idx];
cqe               952 drivers/nvme/host/pci.c 	if (unlikely(cqe->command_id >= nvmeq->q_depth)) {
cqe               955 drivers/nvme/host/pci.c 			cqe->command_id, le16_to_cpu(cqe->sq_id));
cqe               966 drivers/nvme/host/pci.c 			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
cqe               968 drivers/nvme/host/pci.c 				cqe->status, &cqe->result);
cqe               972 drivers/nvme/host/pci.c 	req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
cqe               973 drivers/nvme/host/pci.c 	trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
cqe               974 drivers/nvme/host/pci.c 	nvme_end_request(req, cqe->status, cqe->result);
cqe                46 drivers/nvme/host/rdma.c 	struct ib_cqe		cqe;
cqe              1328 drivers/nvme/host/rdma.c 		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
cqe              1354 drivers/nvme/host/rdma.c 	wr.wr_cqe     = &qe->cqe;
cqe              1384 drivers/nvme/host/rdma.c 	qe->cqe.done = nvme_rdma_recv_done;
cqe              1387 drivers/nvme/host/rdma.c 	wr.wr_cqe   = &qe->cqe;
cqe              1432 drivers/nvme/host/rdma.c 	sqe->cqe.done = nvme_rdma_async_done;
cqe              1442 drivers/nvme/host/rdma.c 		struct nvme_completion *cqe, struct ib_wc *wc)
cqe              1447 drivers/nvme/host/rdma.c 	rq = blk_mq_tag_to_rq(nvme_rdma_tagset(queue), cqe->command_id);
cqe              1451 drivers/nvme/host/rdma.c 			cqe->command_id, queue->qp->qp_num);
cqe              1457 drivers/nvme/host/rdma.c 	req->status = cqe->status;
cqe              1458 drivers/nvme/host/rdma.c 	req->result = cqe->result;
cqe              1488 drivers/nvme/host/rdma.c 		container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
cqe              1491 drivers/nvme/host/rdma.c 	struct nvme_completion *cqe = qe->data;
cqe              1507 drivers/nvme/host/rdma.c 			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
cqe              1508 drivers/nvme/host/rdma.c 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
cqe              1509 drivers/nvme/host/rdma.c 				&cqe->result);
cqe              1511 drivers/nvme/host/rdma.c 		nvme_rdma_process_nvme_rsp(queue, cqe, wc);
cqe              1777 drivers/nvme/host/rdma.c 	sqe->cqe.done = nvme_rdma_send_done;
cqe               427 drivers/nvme/host/tcp.c 		struct nvme_completion *cqe)
cqe               431 drivers/nvme/host/tcp.c 	rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue), cqe->command_id);
cqe               435 drivers/nvme/host/tcp.c 			nvme_tcp_queue_id(queue), cqe->command_id);
cqe               440 drivers/nvme/host/tcp.c 	nvme_end_request(rq, cqe->status, cqe->result);
cqe               483 drivers/nvme/host/tcp.c 	struct nvme_completion *cqe = &pdu->cqe;
cqe               493 drivers/nvme/host/tcp.c 	    cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH))
cqe               494 drivers/nvme/host/tcp.c 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
cqe               495 drivers/nvme/host/tcp.c 				&cqe->result);
cqe               497 drivers/nvme/host/tcp.c 		ret = nvme_tcp_process_nvme_cqe(queue, cqe);
cqe               672 drivers/nvme/target/core.c 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
cqe               681 drivers/nvme/target/core.c 	req->cqe->status = cpu_to_le16(status << 1);
cqe               701 drivers/nvme/target/core.c 	req->cqe->status |= cpu_to_le16(1 << 14);
cqe               708 drivers/nvme/target/core.c 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
cqe               709 drivers/nvme/target/core.c 	req->cqe->command_id = req->cmd->common.command_id;
cqe               869 drivers/nvme/target/core.c 	req->cqe->status = 0;
cqe               870 drivers/nvme/target/core.c 	req->cqe->sq_head = 0;
cqe              1099 drivers/nvme/target/core.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
cqe              1120 drivers/nvme/target/core.c 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
cqe              1219 drivers/nvme/target/core.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
cqe              1228 drivers/nvme/target/core.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
cqe                75 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u64 = cpu_to_le64(val);
cqe               129 drivers/nvme/target/fabrics-cmd.c 		req->cqe->sq_head = cpu_to_le16(0xffff);
cqe               166 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u32 = 0;
cqe               180 drivers/nvme/target/fabrics-cmd.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
cqe               203 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
cqe               230 drivers/nvme/target/fabrics-cmd.c 	req->cqe->result.u32 = 0;
cqe               248 drivers/nvme/target/fabrics-cmd.c 		req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
cqe               255 drivers/nvme/target/fabrics-cmd.c 		req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
cqe              1778 drivers/nvme/target/fc.c 	struct nvme_completion *cqe = &ersp->cqe;
cqe              1779 drivers/nvme/target/fc.c 	u32 *cqewd = (u32 *)cqe;
cqe              1811 drivers/nvme/target/fc.c 	    (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
cqe              1813 drivers/nvme/target/fc.c 	    queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
cqe              2084 drivers/nvme/target/fc.c 	struct nvme_completion *cqe = &fod->rspiubuf.cqe;
cqe              2094 drivers/nvme/target/fc.c 		fod->queue->sqhd = cqe->sq_head;
cqe              2104 drivers/nvme/target/fc.c 		memset(cqe, 0, sizeof(*cqe));
cqe              2105 drivers/nvme/target/fc.c 		cqe->sq_head = fod->queue->sqhd;	/* echo last cqe sqhd */
cqe              2106 drivers/nvme/target/fc.c 		cqe->sq_id = cpu_to_le16(fod->queue->qid);
cqe              2107 drivers/nvme/target/fc.c 		cqe->command_id = sqe->command_id;
cqe              2108 drivers/nvme/target/fc.c 		cqe->status = cpu_to_le16(status);
cqe              2187 drivers/nvme/target/fc.c 	fod->req.cqe = &fod->rspiubuf.cqe;
cqe                21 drivers/nvme/target/loop.c 	struct nvme_completion	cqe;
cqe                97 drivers/nvme/target/loop.c 	struct nvme_completion *cqe = req->cqe;
cqe               106 drivers/nvme/target/loop.c 			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
cqe               107 drivers/nvme/target/loop.c 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
cqe               108 drivers/nvme/target/loop.c 				&cqe->result);
cqe               112 drivers/nvme/target/loop.c 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
cqe               116 drivers/nvme/target/loop.c 				cqe->command_id, nvme_loop_queue_idx(queue));
cqe               120 drivers/nvme/target/loop.c 		nvme_end_request(rq, cqe->status, cqe->result);
cqe               198 drivers/nvme/target/loop.c 	iod->req.cqe = &iod->cqe;
cqe               289 drivers/nvme/target/nvmet.h 	struct nvme_completion	*cqe;
cqe               327 drivers/nvme/target/nvmet.h 	req->cqe->result.u32 = cpu_to_le32(result);
cqe                36 drivers/nvme/target/rdma.c 	struct ib_cqe		cqe;
cqe               163 drivers/nvme/target/rdma.c 		!rsp->req.cqe->status &&
cqe               298 drivers/nvme/target/rdma.c 	c->cqe.done = nvmet_rdma_recv_done;
cqe               300 drivers/nvme/target/rdma.c 	c->wr.wr_cqe = &c->cqe;
cqe               367 drivers/nvme/target/rdma.c 	r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
cqe               368 drivers/nvme/target/rdma.c 	if (!r->req.cqe)
cqe               371 drivers/nvme/target/rdma.c 	r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
cqe               372 drivers/nvme/target/rdma.c 			sizeof(*r->req.cqe), DMA_TO_DEVICE);
cqe               377 drivers/nvme/target/rdma.c 	r->send_sge.length = sizeof(*r->req.cqe);
cqe               392 drivers/nvme/target/rdma.c 	kfree(r->req.cqe);
cqe               401 drivers/nvme/target/rdma.c 				sizeof(*r->req.cqe), DMA_TO_DEVICE);
cqe               402 drivers/nvme/target/rdma.c 	kfree(r->req.cqe);
cqe               790 drivers/nvme/target/rdma.c 		container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe);
cqe              1032 drivers/nvme/target/rdma.c 		 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge,
cqe               164 drivers/nvme/target/tcp.c 	return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
cqe               171 drivers/nvme/target/tcp.c 		!cmd->req.cqe->status;
cqe               381 drivers/nvme/target/tcp.c 	pdu->command_id = cmd->req.cqe->command_id;
cqe              1227 drivers/nvme/target/tcp.c 	c->req.cqe = &c->rsp_pdu->cqe;
cqe               121 drivers/nvme/target/trace.h 		__entry->cid = req->cqe->command_id;
cqe               122 drivers/nvme/target/trace.h 		__entry->result = le64_to_cpu(req->cqe->result.u64);
cqe               123 drivers/nvme/target/trace.h 		__entry->status = le16_to_cpu(req->cqe->status) >> 1;
cqe              1029 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	struct fcoe_cqe *cqe;
cqe              1048 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	cqe = &cq[cq_cons];
cqe              1050 drivers/scsi/bnx2fc/bnx2fc_hwi.c 	while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
cqe              1062 drivers/scsi/bnx2fc/bnx2fc_hwi.c 		cqe++;
cqe              1068 drivers/scsi/bnx2fc/bnx2fc_hwi.c 			cqe = cq;
cqe               650 drivers/scsi/bnx2i/bnx2i.h 	struct cqe *cq_virt;
cqe               654 drivers/scsi/bnx2i/bnx2i.h 	struct cqe *cq_prod_qe;
cqe               655 drivers/scsi/bnx2i/bnx2i.h 	struct cqe *cq_cons_qe;
cqe               656 drivers/scsi/bnx2i/bnx2i.h 	struct cqe *cq_first_qe;
cqe               657 drivers/scsi/bnx2i/bnx2i.h 	struct cqe *cq_last_qe;
cqe               774 drivers/scsi/bnx2i/bnx2i.h 	struct cqe cqe;
cqe               881 drivers/scsi/bnx2i/bnx2i.h 				       struct cqe *cqe);
cqe              1337 drivers/scsi/bnx2i/bnx2i_hwi.c 				struct cqe *cqe)
cqe              1347 drivers/scsi/bnx2i/bnx2i_hwi.c 	resp_cqe = (struct bnx2i_cmd_response *)cqe;
cqe              1382 drivers/scsi/bnx2i/bnx2i_hwi.c 	resp_cqe = (struct bnx2i_cmd_response *)cqe;
cqe              1434 drivers/scsi/bnx2i/bnx2i_hwi.c 				    struct cqe *cqe)
cqe              1443 drivers/scsi/bnx2i/bnx2i_hwi.c 	login = (struct bnx2i_login_response *) cqe;
cqe              1502 drivers/scsi/bnx2i/bnx2i_hwi.c 				   struct cqe *cqe)
cqe              1511 drivers/scsi/bnx2i/bnx2i_hwi.c 	text = (struct bnx2i_text_response *) cqe;
cqe              1563 drivers/scsi/bnx2i/bnx2i_hwi.c 				  struct cqe *cqe)
cqe              1570 drivers/scsi/bnx2i/bnx2i_hwi.c 	tmf_cqe = (struct bnx2i_tmf_response *)cqe;
cqe              1602 drivers/scsi/bnx2i/bnx2i_hwi.c 				     struct cqe *cqe)
cqe              1609 drivers/scsi/bnx2i/bnx2i_hwi.c 	logout = (struct bnx2i_logout_response *) cqe;
cqe              1648 drivers/scsi/bnx2i/bnx2i_hwi.c 					   struct cqe *cqe)
cqe              1654 drivers/scsi/bnx2i/bnx2i_hwi.c 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
cqe              1689 drivers/scsi/bnx2i/bnx2i_hwi.c 				     struct cqe *cqe)
cqe              1697 drivers/scsi/bnx2i/bnx2i_hwi.c 	nop_in = (struct bnx2i_nop_in_msg *)cqe;
cqe              1741 drivers/scsi/bnx2i/bnx2i_hwi.c 				     struct cqe *cqe)
cqe              1749 drivers/scsi/bnx2i/bnx2i_hwi.c 	async_cqe = (struct bnx2i_async_msg *)cqe;
cqe              1791 drivers/scsi/bnx2i/bnx2i_hwi.c 				      struct cqe *cqe)
cqe              1797 drivers/scsi/bnx2i/bnx2i_hwi.c 	reject = (struct bnx2i_reject_msg *) cqe;
cqe              1828 drivers/scsi/bnx2i/bnx2i_hwi.c 					   struct cqe *cqe)
cqe              1834 drivers/scsi/bnx2i/bnx2i_hwi.c 	cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
cqe              1870 drivers/scsi/bnx2i/bnx2i_hwi.c 							    &work->cqe);
cqe              1899 drivers/scsi/bnx2i/bnx2i_hwi.c 				     struct bnx2i_nop_in_msg *cqe)
cqe              1909 drivers/scsi/bnx2i/bnx2i_hwi.c 				 cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
cqe              1930 drivers/scsi/bnx2i/bnx2i_hwi.c 		memcpy(&bnx2i_work->cqe, cqe, sizeof(struct cqe));
cqe              1940 drivers/scsi/bnx2i/bnx2i_hwi.c 	bnx2i_process_scsi_cmd_resp(session, bnx2i_conn, (struct cqe *)cqe);
cqe               447 drivers/scsi/bnx2i/bnx2i_init.c 					    work->bnx2i_conn, &work->cqe);
cqe               893 drivers/scsi/lpfc/lpfc_init.c 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
cqe              5849 drivers/scsi/lpfc/lpfc_init.c 		switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
cqe              5852 drivers/scsi/lpfc/lpfc_init.c 						 &cq_event->cqe.acqe_link);
cqe              5855 drivers/scsi/lpfc/lpfc_init.c 			lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
cqe              5859 drivers/scsi/lpfc/lpfc_init.c 						 &cq_event->cqe.acqe_dcbx);
cqe              5863 drivers/scsi/lpfc/lpfc_init.c 						 &cq_event->cqe.acqe_grp5);
cqe              5866 drivers/scsi/lpfc/lpfc_init.c 			lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
cqe              5869 drivers/scsi/lpfc/lpfc_init.c 			lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
cqe              5875 drivers/scsi/lpfc/lpfc_init.c 					&cq_event->cqe.mcqe_cmpl));
cqe              9934 drivers/scsi/lpfc/lpfc_init.c 	struct lpfc_cq_event *cqe;
cqe              9951 drivers/scsi/lpfc/lpfc_init.c 		list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
cqe              9952 drivers/scsi/lpfc/lpfc_init.c 		lpfc_sli4_cq_event_release(phba, cqe);
cqe              1053 drivers/scsi/lpfc/lpfc_nvme.c 		ptr = (uint32_t *)&ep->cqe.result.u64;
cqe              1057 drivers/scsi/lpfc/lpfc_nvme.c 		ep->cqe.sq_head = sqhd;
cqe              1058 drivers/scsi/lpfc/lpfc_nvme.c 		ep->cqe.sq_id =  nCmd->sqid;
cqe              1059 drivers/scsi/lpfc/lpfc_nvme.c 		ep->cqe.command_id = cid;
cqe              1060 drivers/scsi/lpfc/lpfc_nvme.c 		ep->cqe.status = 0;
cqe              1976 drivers/scsi/lpfc/lpfc_nvmet.c 	size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
cqe                82 drivers/scsi/lpfc/lpfc_sli.c 				   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
cqe               541 drivers/scsi/lpfc/lpfc_sli.c 	struct lpfc_cqe *cqe;
cqe               546 drivers/scsi/lpfc/lpfc_sli.c 	cqe = lpfc_sli4_qe(q, q->host_index);
cqe               549 drivers/scsi/lpfc/lpfc_sli.c 	if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
cqe               561 drivers/scsi/lpfc/lpfc_sli.c 	return cqe;
cqe               566 drivers/scsi/lpfc/lpfc_sli.c 			struct lpfc_cqe *cqe)
cqe               569 drivers/scsi/lpfc/lpfc_sli.c 		bf_set_le32(lpfc_cqe_valid, cqe, 0);
cqe              3887 drivers/scsi/lpfc/lpfc_sli.c 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
cqe              12848 drivers/scsi/lpfc/lpfc_sli.c 		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
cqe              12994 drivers/scsi/lpfc/lpfc_sli.c 	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
cqe              13034 drivers/scsi/lpfc/lpfc_sli.c 	memcpy(&cq_event->cqe, entry, size);
cqe              13204 drivers/scsi/lpfc/lpfc_sli.c 			 struct lpfc_cqe *cqe)
cqe              13212 drivers/scsi/lpfc/lpfc_sli.c 	lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
cqe              13271 drivers/scsi/lpfc/lpfc_sli.c 	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
cqe              13415 drivers/scsi/lpfc/lpfc_sli.c 		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
cqe              13475 drivers/scsi/lpfc/lpfc_sli.c 			 struct lpfc_cqe *cqe)
cqe              13481 drivers/scsi/lpfc/lpfc_sli.c 	lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
cqe              13590 drivers/scsi/lpfc/lpfc_sli.c 	struct lpfc_cqe *cqe;
cqe              13603 drivers/scsi/lpfc/lpfc_sli.c 	cqe = lpfc_sli4_cq_get(cq);
cqe              13604 drivers/scsi/lpfc/lpfc_sli.c 	while (cqe) {
cqe              13605 drivers/scsi/lpfc/lpfc_sli.c 		workposted |= handler(phba, cq, cqe);
cqe              13606 drivers/scsi/lpfc/lpfc_sli.c 		__lpfc_sli4_consume_cqe(phba, cq, cqe);
cqe              13621 drivers/scsi/lpfc/lpfc_sli.c 		cqe = lpfc_sli4_cq_get(cq);
cqe              13981 drivers/scsi/lpfc/lpfc_sli.c 			 struct lpfc_cqe *cqe)
cqe              13987 drivers/scsi/lpfc/lpfc_sli.c 	lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
cqe              17796 drivers/scsi/lpfc/lpfc_sli.c 				       &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
cqe              17827 drivers/scsi/lpfc/lpfc_sli.c 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
cqe              17852 drivers/scsi/lpfc/lpfc_sli.c 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
cqe              17936 drivers/scsi/lpfc/lpfc_sli.c 	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
cqe              18049 drivers/scsi/lpfc/lpfc_sli.c 		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
cqe              18051 drivers/scsi/lpfc/lpfc_sli.c 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
cqe              18054 drivers/scsi/lpfc/lpfc_sli.c 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
cqe              18061 drivers/scsi/lpfc/lpfc_sli.c 				       &dmabuf->cq_event.cqe.rcqe_cmpl));
cqe                52 drivers/scsi/lpfc/lpfc_sli.h 	} cqe;
cqe               253 drivers/scsi/qedf/qedf.h 	struct fcoe_cqe cqe;
cqe               483 drivers/scsi/qedf/qedf.h extern void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe               486 drivers/scsi/qedf/qedf.h 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
cqe               488 drivers/scsi/qedf/qedf.h 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
cqe               493 drivers/scsi/qedf/qedf.h extern void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe               506 drivers/scsi/qedf/qedf.h extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe               513 drivers/scsi/qedf/qedf.h 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
cqe               515 drivers/scsi/qedf/qedf.h extern void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe               517 drivers/scsi/qedf/qedf.h extern void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe);
cqe               526 drivers/scsi/qedf/qedf.h 	struct fcoe_cqe *cqe);
cqe               532 drivers/scsi/qedf/qedf.h 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
cqe               142 drivers/scsi/qedf/qedf_els.c void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe               156 drivers/scsi/qedf/qedf_els.c 	mp_info = &cqe->cqe_info.midpath_info;
cqe               729 drivers/scsi/qedf/qedf_els.c 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req)
cqe               737 drivers/scsi/qedf/qedf_els.c 	if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) {
cqe              1126 drivers/scsi/qedf/qedf_io.c void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe              1140 drivers/scsi/qedf/qedf_io.c 	if (!cqe)
cqe              1155 drivers/scsi/qedf/qedf_io.c 	fcp_rsp = &cqe->cqe_info.rsp_info;
cqe              1215 drivers/scsi/qedf/qedf_io.c 	fw_residual_flag = GET_FIELD(cqe->cqe_info.rsp_info.fw_error_flags,
cqe              1222 drivers/scsi/qedf/qedf_io.c 			 cqe->cqe_info.rsp_info.fw_residual, sc_cmd->cmnd[2],
cqe              1432 drivers/scsi/qedf/qedf_io.c void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe              1440 drivers/scsi/qedf/qedf_io.c 	if (!cqe) {
cqe              1451 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
cqe              1452 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
cqe              1455 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
cqe              1456 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
cqe              1457 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
cqe              1461 drivers/scsi/qedf/qedf_io.c 	    ((u64)cqe->cqe_info.err_info.err_warn_bitmap_hi << 32) |
cqe              1462 drivers/scsi/qedf/qedf_io.c 	    (u64)cqe->cqe_info.err_info.err_warn_bitmap_lo;
cqe              1477 drivers/scsi/qedf/qedf_io.c 				    cqe->cqe_info.err_info.rx_buf_off;
cqe              1479 drivers/scsi/qedf/qedf_io.c 				    cqe->cqe_info.err_info.tx_buf_off;
cqe              1480 drivers/scsi/qedf/qedf_io.c 				io_req->rx_id = cqe->cqe_info.err_info.rx_id;
cqe              1502 drivers/scsi/qedf/qedf_io.c void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe              1507 drivers/scsi/qedf/qedf_io.c 	if (!cqe) {
cqe              1517 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_hi),
cqe              1518 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.err_warn_bitmap_lo));
cqe              1521 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.tx_buf_off),
cqe              1522 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.rx_buf_off),
cqe              1523 drivers/scsi/qedf/qedf_io.c 		  le32_to_cpu(cqe->cqe_info.err_info.rx_id));
cqe              1935 drivers/scsi/qedf/qedf_io.c void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe              1947 drivers/scsi/qedf/qedf_io.c 	r_ctl = cqe->cqe_info.abts_info.r_ctl;
cqe              2256 drivers/scsi/qedf/qedf_io.c void qedf_process_cleanup_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe              2488 drivers/scsi/qedf/qedf_io.c void qedf_process_tmf_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
cqe              2495 drivers/scsi/qedf/qedf_io.c 	fcp_rsp = &cqe->cqe_info.rsp_info;
cqe              2503 drivers/scsi/qedf/qedf_io.c 	struct fcoe_cqe *cqe)
cqe              2507 drivers/scsi/qedf/qedf_io.c 	uint16_t pktlen = cqe->cqe_info.unsolic_info.pkt_len;
cqe              2516 drivers/scsi/qedf/qedf_io.c 	p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
cqe              2587 drivers/scsi/qedf/qedf_io.c 	memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
cqe              2095 drivers/scsi/qedf/qedf_main.c 	struct fcoe_cqe *cqe;
cqe              2121 drivers/scsi/qedf/qedf_main.c 		cqe = &que->cq[que->cq_cons_idx];
cqe              2123 drivers/scsi/qedf/qedf_main.c 		comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
cqe              2133 drivers/scsi/qedf/qedf_main.c 			qedf_process_unsol_compl(qedf, fp->sb_id, cqe);
cqe              2141 drivers/scsi/qedf/qedf_main.c 		xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
cqe              2169 drivers/scsi/qedf/qedf_main.c 		memcpy(&io_work->cqe, cqe, sizeof(struct fcoe_cqe));
cqe              2568 drivers/scsi/qedf/qedf_main.c 	comp_type = (io_work->cqe.cqe_data >>
cqe              2575 drivers/scsi/qedf/qedf_main.c 		qedf_process_cqe(io_work->qedf, &io_work->cqe);
cqe              2673 drivers/scsi/qedf/qedf_main.c void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe)
cqe              2680 drivers/scsi/qedf/qedf_main.c 	comp_type = (cqe->cqe_data >> FCOE_CQE_CQE_TYPE_SHIFT) &
cqe              2683 drivers/scsi/qedf/qedf_main.c 	xid = cqe->cqe_data & FCOE_CQE_TASK_ID_MASK;
cqe              2718 drivers/scsi/qedf/qedf_main.c 			qedf_scsi_completion(qedf, cqe, io_req);
cqe              2721 drivers/scsi/qedf/qedf_main.c 			qedf_process_els_compl(qedf, cqe, io_req);
cqe              2724 drivers/scsi/qedf/qedf_main.c 			qedf_process_tmf_compl(qedf, cqe, io_req);
cqe              2727 drivers/scsi/qedf/qedf_main.c 			qedf_process_seq_cleanup_compl(qedf, cqe, io_req);
cqe              2735 drivers/scsi/qedf/qedf_main.c 		qedf_process_error_detect(qedf, cqe, io_req);
cqe              2741 drivers/scsi/qedf/qedf_main.c 		qedf_process_cleanup_compl(qedf, cqe, io_req);
cqe              2747 drivers/scsi/qedf/qedf_main.c 		qedf_process_abts_compl(qedf, cqe, io_req);
cqe              2763 drivers/scsi/qedf/qedf_main.c 		qedf_process_warning_compl(qedf, cqe, io_req);
cqe               177 drivers/scsi/qedi/qedi.h 	struct iscsi_cqe_solicited cqe;
cqe               363 drivers/scsi/qedi/qedi.h 	union iscsi_cqe cqe;
cqe                31 drivers/scsi/qedi/qedi_fw.c 				     union iscsi_cqe *cqe,
cqe                42 drivers/scsi/qedi/qedi_fw.c 	cqe_logout_response = &cqe->cqe_common.iscsi_hdr.logout_response;
cqe                50 drivers/scsi/qedi/qedi_fw.c 	resp_hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
cqe                81 drivers/scsi/qedi/qedi_fw.c 				   union iscsi_cqe *cqe,
cqe                96 drivers/scsi/qedi/qedi_fw.c 	cqe_text_response = &cqe->cqe_common.iscsi_hdr.text_response;
cqe               108 drivers/scsi/qedi/qedi_fw.c 	resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
cqe               181 drivers/scsi/qedi/qedi_fw.c 				  union iscsi_cqe *cqe,
cqe               193 drivers/scsi/qedi/qedi_fw.c 	cqe_tmp_response = &cqe->cqe_common.iscsi_hdr.tmf_response;
cqe               217 drivers/scsi/qedi/qedi_fw.c 	resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
cqe               252 drivers/scsi/qedi/qedi_fw.c 				    union iscsi_cqe *cqe,
cqe               266 drivers/scsi/qedi/qedi_fw.c 	cqe_login_response = &cqe->cqe_common.iscsi_hdr.login_response;
cqe               279 drivers/scsi/qedi/qedi_fw.c 	resp_hdr_ptr->itt = build_itt(cqe->cqe_solicited.itid,
cqe               313 drivers/scsi/qedi/qedi_fw.c 				struct iscsi_cqe_unsolicited *cqe,
cqe               324 drivers/scsi/qedi/qedi_fw.c 	idx = cqe->rqe_opaque;
cqe               333 drivers/scsi/qedi/qedi_fw.c 		  "rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
cqe               336 drivers/scsi/qedi/qedi_fw.c 		  "unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
cqe               337 drivers/scsi/qedi/qedi_fw.c 	switch (cqe->unsol_cqe_type) {
cqe               352 drivers/scsi/qedi/qedi_fw.c 				struct iscsi_cqe_unsolicited *cqe,
cqe               360 drivers/scsi/qedi/qedi_fw.c 	idx = cqe->rqe_opaque;
cqe               391 drivers/scsi/qedi/qedi_fw.c 				      struct iscsi_cqe_unsolicited *cqe,
cqe               398 drivers/scsi/qedi/qedi_fw.c 	qedi_get_rq_bdq_buf(qedi, cqe, bdq_data, pdu_len);
cqe               399 drivers/scsi/qedi/qedi_fw.c 	qedi_put_rq_bdq_buf(qedi, cqe, (num_bdqs + 1));
cqe               403 drivers/scsi/qedi/qedi_fw.c 				   union iscsi_cqe *cqe,
cqe               419 drivers/scsi/qedi/qedi_fw.c 	cqe_nop_in = &cqe->cqe_common.iscsi_hdr.nop_in;
cqe               433 drivers/scsi/qedi/qedi_fw.c 	if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
cqe               435 drivers/scsi/qedi/qedi_fw.c 		qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
cqe               447 drivers/scsi/qedi/qedi_fw.c 		hdr->itt = build_itt(cqe->cqe_solicited.itid,
cqe               475 drivers/scsi/qedi/qedi_fw.c 				    union iscsi_cqe *cqe,
cqe               491 drivers/scsi/qedi/qedi_fw.c 	cqe_async_msg = &cqe->cqe_common.iscsi_hdr.async_msg;
cqe               496 drivers/scsi/qedi/qedi_fw.c 	if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
cqe               498 drivers/scsi/qedi/qedi_fw.c 		qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
cqe               529 drivers/scsi/qedi/qedi_fw.c 				     union iscsi_cqe *cqe,
cqe               542 drivers/scsi/qedi/qedi_fw.c 	cqe_reject = &cqe->cqe_common.iscsi_hdr.reject;
cqe               547 drivers/scsi/qedi/qedi_fw.c 	if (cqe->cqe_common.cqe_type == ISCSI_CQE_TYPE_UNSOLICITED) {
cqe               549 drivers/scsi/qedi/qedi_fw.c 		qedi_unsol_pdu_adjust_bdq(qedi, &cqe->cqe_unsolicited,
cqe               571 drivers/scsi/qedi/qedi_fw.c 				 union iscsi_cqe *cqe,
cqe               585 drivers/scsi/qedi/qedi_fw.c 	iscsi_cid  = cqe->cqe_common.conn_id;
cqe               588 drivers/scsi/qedi/qedi_fw.c 	cqe_data_in = &cqe->cqe_common.iscsi_hdr.data_in;
cqe               590 drivers/scsi/qedi/qedi_fw.c 		cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
cqe               627 drivers/scsi/qedi/qedi_fw.c 	hdr->itt = build_itt(cqe->cqe_solicited.itid, conn->session->age);
cqe               677 drivers/scsi/qedi/qedi_fw.c 				  union iscsi_cqe *cqe,
cqe               684 drivers/scsi/qedi/qedi_fw.c 	hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
cqe               690 drivers/scsi/qedi/qedi_fw.c 		qedi_scsi_completion(qedi, cqe, task, iscsi_conn);
cqe               693 drivers/scsi/qedi/qedi_fw.c 		qedi_process_login_resp(qedi, cqe, task, conn);
cqe               696 drivers/scsi/qedi/qedi_fw.c 		qedi_process_tmf_resp(qedi, cqe, task, conn);
cqe               699 drivers/scsi/qedi/qedi_fw.c 		qedi_process_text_resp(qedi, cqe, task, conn);
cqe               702 drivers/scsi/qedi/qedi_fw.c 		qedi_process_logout_resp(qedi, cqe, task, conn);
cqe               705 drivers/scsi/qedi/qedi_fw.c 		qedi_process_nopin_mesg(qedi, cqe, task, conn, que_idx);
cqe               713 drivers/scsi/qedi/qedi_fw.c 					  struct iscsi_cqe_solicited *cqe,
cqe               723 drivers/scsi/qedi/qedi_fw.c 		  cqe->itid, cmd->task_id);
cqe               734 drivers/scsi/qedi/qedi_fw.c 					  struct iscsi_cqe_solicited *cqe,
cqe               739 drivers/scsi/qedi/qedi_fw.c 	u32 proto_itt = cqe->itid;
cqe               751 drivers/scsi/qedi/qedi_fw.c 	iscsi_cid = cqe->conn_id;
cqe               755 drivers/scsi/qedi/qedi_fw.c 			  "icid not found 0x%x\n", cqe->conn_id);
cqe               828 drivers/scsi/qedi/qedi_fw.c 		qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
cqe               833 drivers/scsi/qedi/qedi_fw.c 			  cqe->itid, protoitt, qedi_conn->cmd_cleanup_cmpl,
cqe               840 drivers/scsi/qedi/qedi_fw.c 				    cqe->itid, qedi_conn->iscsi_conn_id);
cqe               848 drivers/scsi/qedi/qedi_fw.c 			  cqe->itid, qedi_conn->iscsi_conn_id);
cqe               849 drivers/scsi/qedi/qedi_fw.c 		qedi_clear_task_idx(qedi_conn->qedi, cqe->itid);
cqe               852 drivers/scsi/qedi/qedi_fw.c 		qedi_get_proto_itt(qedi, cqe->itid, &ptmp_itt);
cqe               857 drivers/scsi/qedi/qedi_fw.c 			 protoitt, cqe->itid, qedi_conn->iscsi_conn_id, task);
cqe               864 drivers/scsi/qedi/qedi_fw.c 	union iscsi_cqe *cqe = &work->cqe;
cqe               876 drivers/scsi/qedi/qedi_fw.c 	comp_type = cqe->cqe_common.cqe_type;
cqe               877 drivers/scsi/qedi/qedi_fw.c 	hdr_opcode = cqe->cqe_common.iscsi_hdr.common.hdr_first_byte;
cqe               879 drivers/scsi/qedi/qedi_fw.c 		cqe->cqe_common.error_bitmap.error_bits.cqe_error_status_bits;
cqe               883 drivers/scsi/qedi/qedi_fw.c 		  cqe->cqe_common.conn_id, comp_type, hdr_opcode);
cqe               890 drivers/scsi/qedi/qedi_fw.c 	iscsi_cid  = cqe->cqe_common.conn_id;
cqe               921 drivers/scsi/qedi/qedi_fw.c 		    (cqe->cqe_solicited.itid != (u16)RESERVED_ITT)) {
cqe               922 drivers/scsi/qedi/qedi_fw.c 			qedi_process_nopin_local_cmpl(qedi, &cqe->cqe_solicited,
cqe               925 drivers/scsi/qedi/qedi_fw.c 			cqe->cqe_solicited.itid =
cqe               926 drivers/scsi/qedi/qedi_fw.c 					       qedi_get_itt(cqe->cqe_solicited);
cqe               928 drivers/scsi/qedi/qedi_fw.c 			qedi_mtask_completion(qedi, cqe, task, q_conn, que_idx);
cqe               934 drivers/scsi/qedi/qedi_fw.c 			qedi_process_nopin_mesg(qedi, cqe, task, q_conn,
cqe               938 drivers/scsi/qedi/qedi_fw.c 			qedi_process_async_mesg(qedi, cqe, task, q_conn,
cqe               942 drivers/scsi/qedi/qedi_fw.c 			qedi_process_reject_mesg(qedi, cqe, task, q_conn,
cqe               952 drivers/scsi/qedi/qedi_fw.c 		qedi_process_cmd_cleanup_resp(qedi, &cqe->cqe_solicited, task,
cqe               231 drivers/scsi/qedi/qedi_iscsi.h #define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
cqe              1138 drivers/scsi/qedi/qedi_main.c static int qedi_queue_cqe(struct qedi_ctx *qedi, union iscsi_cqe *cqe,
cqe              1148 drivers/scsi/qedi/qedi_main.c 	iscsi_cid  = cqe->cqe_common.conn_id;
cqe              1158 drivers/scsi/qedi/qedi_main.c 	switch (cqe->cqe_common.cqe_type) {
cqe              1161 drivers/scsi/qedi/qedi_main.c 		qedi_cmd = qedi_get_cmd_from_tid(qedi, cqe->cqe_solicited.itid);
cqe              1168 drivers/scsi/qedi/qedi_main.c 		memcpy(&qedi_cmd->cqe_work.cqe, cqe, sizeof(union iscsi_cqe));
cqe              1183 drivers/scsi/qedi/qedi_main.c 		memcpy(&qedi_work->cqe, cqe, sizeof(union iscsi_cqe));
cqe              1204 drivers/scsi/qedi/qedi_main.c 	union iscsi_cqe *cqe;
cqe              1228 drivers/scsi/qedi/qedi_main.c 		cqe = &que->cq[que->cq_cons_idx];
cqe              1232 drivers/scsi/qedi/qedi_main.c 			  cqe, prod_idx, que->cq_cons_idx);
cqe              1234 drivers/scsi/qedi/qedi_main.c 		ret = qedi_queue_cqe(qedi, cqe, fp->sb_id, p);
cqe              1238 drivers/scsi/qedi/qedi_main.c 				  que->cq_cons_idx, cqe->cqe_common.conn_id);
cqe               270 fs/cifs/smbdirect.c 		container_of(wc->wr_cqe, struct smbd_request, cqe);
cqe               486 fs/cifs/smbdirect.c 		container_of(wc->wr_cqe, struct smbd_response, cqe);
cqe               746 fs/cifs/smbdirect.c 	request->cqe.done = send_done;
cqe               749 fs/cifs/smbdirect.c 	send_wr.wr_cqe = &request->cqe;
cqe               945 fs/cifs/smbdirect.c 	request->cqe.done = send_done;
cqe               948 fs/cifs/smbdirect.c 	send_wr.wr_cqe = &request->cqe;
cqe              1104 fs/cifs/smbdirect.c 	response->cqe.done = recv_done;
cqe              1106 fs/cifs/smbdirect.c 	recv_wr.wr_cqe = &response->cqe;
cqe              2247 fs/cifs/smbdirect.c 	struct ib_cqe *cqe;
cqe              2251 fs/cifs/smbdirect.c 		cqe = wc->wr_cqe;
cqe              2252 fs/cifs/smbdirect.c 		mr = container_of(cqe, struct smbd_mr, cqe);
cqe              2512 fs/cifs/smbdirect.c 	smbdirect_mr->cqe.done = register_mr_done;
cqe              2513 fs/cifs/smbdirect.c 	reg_wr->wr.wr_cqe = &smbdirect_mr->cqe;
cqe              2552 fs/cifs/smbdirect.c 	struct ib_cqe *cqe;
cqe              2554 fs/cifs/smbdirect.c 	cqe = wc->wr_cqe;
cqe              2555 fs/cifs/smbdirect.c 	smbdirect_mr = container_of(cqe, struct smbd_mr, cqe);
cqe              2580 fs/cifs/smbdirect.c 		smbdirect_mr->cqe.done = local_inv_done;
cqe              2581 fs/cifs/smbdirect.c 		wr->wr_cqe = &smbdirect_mr->cqe;
cqe               236 fs/cifs/smbdirect.h 	struct ib_cqe cqe;
cqe               252 fs/cifs/smbdirect.h 	struct ib_cqe cqe;
cqe               300 fs/cifs/smbdirect.h 	struct ib_cqe		cqe;
cqe               573 fs/io_uring.c  	struct io_uring_cqe *cqe;
cqe               580 fs/io_uring.c  	cqe = io_get_cqring(ctx);
cqe               581 fs/io_uring.c  	if (cqe) {
cqe               582 fs/io_uring.c  		WRITE_ONCE(cqe->user_data, ki_user_data);
cqe               583 fs/io_uring.c  		WRITE_ONCE(cqe->res, res);
cqe               584 fs/io_uring.c  		WRITE_ONCE(cqe->flags, 0);
cqe               810 include/linux/mlx5/device.h static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
cqe               812 include/linux/mlx5/device.h 	return (cqe->op_own >> 2) & 0x3;
cqe               815 include/linux/mlx5/device.h static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
cqe               817 include/linux/mlx5/device.h 	return cqe->op_own >> 4;
cqe               820 include/linux/mlx5/device.h static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
cqe               822 include/linux/mlx5/device.h 	return (cqe->lro_tcppsh_abort_dupack >> 6) & 1;
cqe               825 include/linux/mlx5/device.h static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
cqe               827 include/linux/mlx5/device.h 	return (cqe->l4_l3_hdr_type >> 4) & 0x7;
cqe               830 include/linux/mlx5/device.h static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
cqe               832 include/linux/mlx5/device.h 	return (cqe->l4_l3_hdr_type >> 2) & 0x3;
cqe               835 include/linux/mlx5/device.h static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
cqe               837 include/linux/mlx5/device.h 	return cqe->outer_l3_tunneled & 0x1;
cqe               840 include/linux/mlx5/device.h static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
cqe               842 include/linux/mlx5/device.h 	return cqe->l4_l3_hdr_type & 0x1;
cqe               845 include/linux/mlx5/device.h static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
cqe               849 include/linux/mlx5/device.h 	hi = be32_to_cpu(cqe->timestamp_h);
cqe               850 include/linux/mlx5/device.h 	lo = be32_to_cpu(cqe->timestamp_l);
cqe               863 include/linux/mlx5/device.h static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
cqe               865 include/linux/mlx5/device.h 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
cqe               875 include/linux/mlx5/device.h static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
cqe               877 include/linux/mlx5/device.h 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
cqe               882 include/linux/mlx5/device.h static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
cqe               884 include/linux/mlx5/device.h 	struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
cqe               889 include/linux/mlx5/device.h static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
cqe               891 include/linux/mlx5/device.h 	return be16_to_cpu(cqe->wqe_counter);
cqe                51 include/linux/nvme-fc.h 	struct nvme_completion	cqe;
cqe               141 include/linux/nvme-tcp.h 	struct nvme_completion	cqe;
cqe               348 include/linux/qed/eth_common.h 	union eth_rx_cqe cqe;
cqe               350 include/linux/qed/qed_eth_if.h 				  struct eth_slow_path_rx_cqe *cqe);
cqe               368 include/rdma/ib_verbs.h 	unsigned int	cqe;
cqe              1548 include/rdma/ib_verbs.h 	int               	cqe;
cqe              2393 include/rdma/ib_verbs.h 	int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
cqe              3833 include/rdma/ib_verbs.h int ib_resize_cq(struct ib_cq *cq, int cqe);
cqe                63 include/rdma/rw.h 		u8 port_num, struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
cqe                65 include/rdma/rw.h 		struct ib_cqe *cqe, struct ib_send_wr *chain_wr);
cqe              1612 include/trace/events/rpcrdma.h 		__field(const void *, cqe)
cqe              1618 include/trace/events/rpcrdma.h 		__entry->cqe = wc->wr_cqe;
cqe              1627 include/trace/events/rpcrdma.h 		__entry->cqe, rdma_show_wc_status(__entry->status),
cqe              1647 include/trace/events/rpcrdma.h 		__field(const void *, cqe)
cqe              1653 include/trace/events/rpcrdma.h 		__entry->cqe = wr->wr_cqe;
cqe              1660 include/trace/events/rpcrdma.h 		__entry->cqe, __entry->num_sge,
cqe              1676 include/trace/events/rpcrdma.h 		__field(const void *, cqe)
cqe              1681 include/trace/events/rpcrdma.h 		__entry->cqe = wr->wr_cqe;
cqe              1686 include/trace/events/rpcrdma.h 		__entry->cqe, __entry->status
cqe              1698 include/trace/events/rpcrdma.h 		__field(const void *, cqe)
cqe              1705 include/trace/events/rpcrdma.h 		__entry->cqe = wc->wr_cqe;
cqe              1717 include/trace/events/rpcrdma.h 		__entry->cqe, __entry->byte_len,
cqe              1725 include/trace/events/rpcrdma.h 		const void *cqe,
cqe              1729 include/trace/events/rpcrdma.h 	TP_ARGS(cqe, sqecount),
cqe              1732 include/trace/events/rpcrdma.h 		__field(const void *, cqe)
cqe              1737 include/trace/events/rpcrdma.h 		__entry->cqe = cqe;
cqe              1742 include/trace/events/rpcrdma.h 		__entry->cqe, __entry->sqecount
cqe               407 include/uapi/rdma/ib_user_verbs.h 	__u32 cqe;
cqe               421 include/uapi/rdma/ib_user_verbs.h 	__u32 cqe;
cqe               431 include/uapi/rdma/ib_user_verbs.h 	__u32 cqe;
cqe               444 include/uapi/rdma/ib_user_verbs.h 	__u32 cqe;
cqe               449 include/uapi/rdma/ib_user_verbs.h 	__u32 cqe;
cqe               106 net/9p/trans_rdma.c 	struct ib_cqe cqe;
cqe               295 net/9p/trans_rdma.c 		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
cqe               346 net/9p/trans_rdma.c 		container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
cqe               395 net/9p/trans_rdma.c 	c->cqe.done = recv_done;
cqe               402 net/9p/trans_rdma.c 	wr.wr_cqe = &c->cqe;
cqe               487 net/9p/trans_rdma.c 	c->cqe.done = send_done;
cqe               494 net/9p/trans_rdma.c 	wr.wr_cqe = &c->cqe;
cqe               487 net/rds/ib_cm.c 	cq_attr.cqe = ic->i_send_ring.w_nr + fr_queue_space + 1;
cqe               501 net/rds/ib_cm.c 	cq_attr.cqe = ic->i_recv_ring.w_nr;
cqe               473 net/smc/smc_ib.c 		.cqe = SMC_MAX_CQE, .comp_vector = 0 };
cqe               481 net/smc/smc_ib.c 		cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
cqe               403 net/sunrpc/xprtrdma/frwr_ops.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               405 net/sunrpc/xprtrdma/frwr_ops.c 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
cqe               485 net/sunrpc/xprtrdma/frwr_ops.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               487 net/sunrpc/xprtrdma/frwr_ops.c 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
cqe               504 net/sunrpc/xprtrdma/frwr_ops.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               506 net/sunrpc/xprtrdma/frwr_ops.c 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
cqe               606 net/sunrpc/xprtrdma/frwr_ops.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               608 net/sunrpc/xprtrdma/frwr_ops.c 		container_of(cqe, struct rpcrdma_frwr, fr_cqe);
cqe               308 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               314 net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 	ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
cqe               204 net/sunrpc/xprtrdma/svc_rdma_rw.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               206 net/sunrpc/xprtrdma/svc_rdma_rw.c 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
cqe               262 net/sunrpc/xprtrdma/svc_rdma_rw.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               264 net/sunrpc/xprtrdma/svc_rdma_rw.c 			container_of(cqe, struct svc_rdma_chunk_ctxt, cc_cqe);
cqe               305 net/sunrpc/xprtrdma/svc_rdma_rw.c 	struct ib_cqe *cqe;
cqe               312 net/sunrpc/xprtrdma/svc_rdma_rw.c 	cqe = &cc->cc_cqe;
cqe               318 net/sunrpc/xprtrdma/svc_rdma_rw.c 					   rdma->sc_port_num, cqe, first_wr);
cqe               319 net/sunrpc/xprtrdma/svc_rdma_rw.c 		cqe = NULL;
cqe               261 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               269 net/sunrpc/xprtrdma/svc_rdma_sendto.c 	ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
cqe               134 net/sunrpc/xprtrdma/verbs.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               136 net/sunrpc/xprtrdma/verbs.c 		container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
cqe               152 net/sunrpc/xprtrdma/verbs.c 	struct ib_cqe *cqe = wc->wr_cqe;
cqe               153 net/sunrpc/xprtrdma/verbs.c 	struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
cqe               247 tools/io_uring/io_uring-bench.c 	struct io_uring_cqe *cqe;
cqe               257 tools/io_uring/io_uring-bench.c 		cqe = &ring->cqes[head & cq_ring_mask];
cqe               259 tools/io_uring/io_uring-bench.c 			f = (struct file *) (uintptr_t) cqe->user_data;
cqe               261 tools/io_uring/io_uring-bench.c 			if (cqe->res != BS) {
cqe               262 tools/io_uring/io_uring-bench.c 				printf("io: unexpected ret=%d\n", cqe->res);
cqe               263 tools/io_uring/io_uring-bench.c 				if (polled && cqe->res == -EOPNOTSUPP)
cqe               126 tools/io_uring/io_uring-cp.c 	struct io_uring_cqe *cqe;
cqe               175 tools/io_uring/io_uring-cp.c 				ret = io_uring_wait_cqe(ring, &cqe);
cqe               178 tools/io_uring/io_uring-cp.c 				ret = io_uring_peek_cqe(ring, &cqe);
cqe               184 tools/io_uring/io_uring-cp.c 			if (!cqe)
cqe               187 tools/io_uring/io_uring-cp.c 			data = io_uring_cqe_get_data(cqe);
cqe               188 tools/io_uring/io_uring-cp.c 			if (cqe->res < 0) {
cqe               189 tools/io_uring/io_uring-cp.c 				if (cqe->res == -EAGAIN) {
cqe               191 tools/io_uring/io_uring-cp.c 					io_uring_cqe_seen(ring, cqe);
cqe               195 tools/io_uring/io_uring-cp.c 						strerror(-cqe->res));
cqe               197 tools/io_uring/io_uring-cp.c 			} else if ((size_t) cqe->res != data->iov.iov_len) {
cqe               199 tools/io_uring/io_uring-cp.c 				data->iov.iov_base += cqe->res;
cqe               200 tools/io_uring/io_uring-cp.c 				data->iov.iov_len -= cqe->res;
cqe               201 tools/io_uring/io_uring-cp.c 				data->offset += cqe->res;
cqe               203 tools/io_uring/io_uring-cp.c 				io_uring_cqe_seen(ring, cqe);
cqe               220 tools/io_uring/io_uring-cp.c 			io_uring_cqe_seen(ring, cqe);
cqe                80 tools/io_uring/liburing.h 				     struct io_uring_cqe *cqe)
cqe                82 tools/io_uring/liburing.h 	if (cqe) {
cqe               102 tools/io_uring/liburing.h static inline void *io_uring_cqe_get_data(struct io_uring_cqe *cqe)
cqe               104 tools/io_uring/liburing.h 	return (void *) (uintptr_t) cqe->user_data;