Lines Matching refs:cqe
84 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
87 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
90 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
91 return cqe; in get_sw_cqe()
120 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument
124 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req()
138 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req()
170 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument
184 be32_to_cpu(cqe->srqn)); in handle_responder()
190 wqe_ctr = be16_to_cpu(cqe->wqe_counter); in handle_responder()
201 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_responder()
203 switch (cqe->op_own >> 4) { in handle_responder()
207 wc->ex.imm_data = cqe->imm_inval_pkey; in handle_responder()
216 wc->ex.imm_data = cqe->imm_inval_pkey; in handle_responder()
221 wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey); in handle_responder()
224 wc->slid = be16_to_cpu(cqe->slid); in handle_responder()
225 wc->sl = (be32_to_cpu(cqe->flags_rqpn) >> 24) & 0xf; in handle_responder()
226 wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff; in handle_responder()
227 wc->dlid_path_bits = cqe->ml_path; in handle_responder()
228 g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3; in handle_responder()
230 wc->pkey_index = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff; in handle_responder()
233 static void dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) in dump_cqe() argument
235 __be32 *p = (__be32 *)cqe; in dump_cqe()
239 for (i = 0; i < sizeof(*cqe) / 16; i++, p += 4) in dump_cqe()
246 struct mlx5_err_cqe *cqe, in mlx5_handle_error_cqe() argument
251 switch (cqe->syndrome) { in mlx5_handle_error_cqe()
299 wc->vendor_err = cqe->vendor_err_synd; in mlx5_handle_error_cqe()
301 dump_cqe(dev, cqe); in mlx5_handle_error_cqe()
370 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe, in get_sig_err_item() argument
373 u16 syndrome = be16_to_cpu(cqe->syndrome); in get_sig_err_item()
381 item->expected = be32_to_cpu(cqe->expected_trans_sig) >> 16; in get_sig_err_item()
382 item->actual = be32_to_cpu(cqe->actual_trans_sig) >> 16; in get_sig_err_item()
386 item->expected = be32_to_cpu(cqe->expected_reftag); in get_sig_err_item()
387 item->actual = be32_to_cpu(cqe->actual_reftag); in get_sig_err_item()
391 item->expected = be32_to_cpu(cqe->expected_trans_sig) & 0xffff; in get_sig_err_item()
392 item->actual = be32_to_cpu(cqe->actual_trans_sig) & 0xffff; in get_sig_err_item()
398 item->sig_err_offset = be64_to_cpu(cqe->err_offset); in get_sig_err_item()
399 item->key = be32_to_cpu(cqe->mkey); in get_sig_err_item()
417 void *cqe; in mlx5_poll_one() local
421 cqe = next_cqe_sw(cq); in mlx5_poll_one()
422 if (!cqe) in mlx5_poll_one()
425 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
681 void *cqe; in init_cq_buf() local
685 cqe = get_cqe_from_buf(buf, i, buf->cqe_size); in init_cq_buf()
686 cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64; in init_cq_buf()
764 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
858 void *cqe, *dest; in __mlx5_ib_cq_clean() local
873 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
880 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
881 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
887 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
890 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
1091 if (entries == ibcq->cqe + 1) in mlx5_ib_resize_cq()
1138 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1157 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()