Lines Matching refs:cqe

85 	void *cqe = get_cqe(cq, n & cq->ibcq.cqe);  in get_sw_cqe()  local
88 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
91 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
92 return cqe; in get_sw_cqe()
121 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
125 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
139 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
171 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
185 be32_to_cpu(cqe->srqn)); in handle_responder()
191 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
202 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_responder()
204 switch (cqe->op_own >> 4) { in handle_responder()
208 wc->ex.imm_data = cqe->imm_inval_pkey; in handle_responder()
217 wc->ex.imm_data = cqe->imm_inval_pkey; in handle_responder()
222 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); in handle_responder()
225 wc->slid = be16_to_cpu(cqe->slid); in handle_responder()
226 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; in handle_responder()
227 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; in handle_responder()
228 wc->dlid_path_bits = cqe->ml_path; in handle_responder()
229 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; in handle_responder()
232 u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; in handle_responder()
241 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) in dump_cqe() argument
243 __be32 *p = (__be32 *)cqe; in dump_cqe()
247 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) in dump_cqe()
254 struct mlx5_err_cqe *cqe, in mlx5_handle_error_cqe() argument
259 switch (cqe->syndrome) { in mlx5_handle_error_cqe()
307 wc->vendor_err = cqe->vendor_err_synd; in mlx5_handle_error_cqe()
309 dump_cqe(dev, cqe); in mlx5_handle_error_cqe()
378 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, in get_sig_err_item() argument
381 u16 syndrome = be16_to_cpu(cqe->syndrome); in get_sig_err_item()
389 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; in get_sig_err_item()
390 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; in get_sig_err_item()
394 item->expected = be32_to_cpu(cqe->expected_reftag); in get_sig_err_item()
395 item->actual = be32_to_cpu(cqe->actual_reftag); in get_sig_err_item()
399 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; in get_sig_err_item()
400 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; in get_sig_err_item()
406 item->sig_err_offset = be64_to_cpu(cqe->err_offset); in get_sig_err_item()
407 item->key = be32_to_cpu(cqe->mkey); in get_sig_err_item()
425 void *cqe; in mlx5_poll_one() local
429 cqe = next_cqe_sw(cq); in mlx5_poll_one()
430 if (!cqe) in mlx5_poll_one()
433 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
688 void *cqe; in init_cq_buf() local
692 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); in init_cq_buf()
693 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_buf()
751 int entries = attr->cqe; in mlx5_ib_create_cq()
777 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
871 void *cqe, *dest; in __mlx5_ib_cq_clean() local
886 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
893 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
894 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
900 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
903 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
1104 if (entries == ibcq->cqe + 1) in mlx5_ib_resize_cq()
1151 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1170 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()