Lines Matching refs:cqe
80 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local
81 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe()
84 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe()
132 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument
134 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf()
139 u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument
144 *umem = ib_umem_get(context, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
175 int entries = attr->cqe; in mlx4_ib_create_cq()
193 cq->ibcq.cqe = entries - 1; in mlx4_ib_create_cq()
271 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_create_cq()
302 cq->resize_buf->cqe = entries - 1; in mlx4_alloc_resize_buf()
331 cq->resize_buf->cqe = entries - 1; in mlx4_alloc_resize_umem()
349 struct mlx4_cqe *cqe, *new_cqe; in mlx4_ib_cq_resize_copy_cqes() local
355 cqe = get_cqe(cq, i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
356 cqe += cqe_inc; in mlx4_ib_cq_resize_copy_cqes()
358 while ((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) != MLX4_CQE_OPCODE_RESIZE) { in mlx4_ib_cq_resize_copy_cqes()
360 (i + 1) & cq->resize_buf->cqe); in mlx4_ib_cq_resize_copy_cqes()
361 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); in mlx4_ib_cq_resize_copy_cqes()
364 new_cqe->owner_sr_opcode = (cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK) | in mlx4_ib_cq_resize_copy_cqes()
365 (((i + 1) & (cq->resize_buf->cqe + 1)) ? MLX4_CQE_OWNER_MASK : 0); in mlx4_ib_cq_resize_copy_cqes()
366 cqe = get_cqe(cq, ++i & cq->ibcq.cqe); in mlx4_ib_cq_resize_copy_cqes()
367 cqe += cqe_inc; in mlx4_ib_cq_resize_copy_cqes()
387 if (entries == ibcq->cqe + 1) { in mlx4_ib_resize_cq()
423 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
438 tmp_cqe = cq->ibcq.cqe; in mlx4_ib_resize_cq()
440 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_resize_cq()
457 cq->resize_buf->cqe); in mlx4_ib_resize_cq()
485 mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe); in mlx4_ib_destroy_cq()
494 static void dump_cqe(void *cqe) in dump_cqe() argument
496 __be32 *buf = cqe; in dump_cqe()
504 static void mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, in mlx4_ib_handle_error_cqe() argument
507 if (cqe->syndrome == MLX4_CQE_SYNDROME_LOCAL_QP_OP_ERR) { in mlx4_ib_handle_error_cqe()
511 be32_to_cpu(cqe->my_qpn), be16_to_cpu(cqe->wqe_index), in mlx4_ib_handle_error_cqe()
512 cqe->vendor_err_syndrome, in mlx4_ib_handle_error_cqe()
513 cqe->owner_sr_opcode & ~MLX4_CQE_OWNER_MASK); in mlx4_ib_handle_error_cqe()
514 dump_cqe(cqe); in mlx4_ib_handle_error_cqe()
517 switch (cqe->syndrome) { in mlx4_ib_handle_error_cqe()
562 wc->vendor_err = cqe->vendor_err_syndrome; in mlx4_ib_handle_error_cqe()
580 unsigned tail, struct mlx4_cqe *cqe, int is_eth) in use_tunnel_data() argument
660 struct mlx4_cqe *cqe; in mlx4_ib_poll_one() local
673 cqe = next_cqe_sw(cq); in mlx4_ib_poll_one()
674 if (!cqe) in mlx4_ib_poll_one()
678 cqe++; in mlx4_ib_poll_one()
688 is_send = cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK; in mlx4_ib_poll_one()
689 is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == in mlx4_ib_poll_one()
692 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && in mlx4_ib_poll_one()
699 if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_CQE_OPCODE_RESIZE)) { in mlx4_ib_poll_one()
703 mlx4_ib_free_cq_buf(dev, &cq->buf, cq->ibcq.cqe); in mlx4_ib_poll_one()
705 cq->ibcq.cqe = cq->resize_buf->cqe; in mlx4_ib_poll_one()
715 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { in mlx4_ib_poll_one()
722 be32_to_cpu(cqe->vlan_my_qpn)); in mlx4_ib_poll_one()
725 cq->mcq.cqn, be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK); in mlx4_ib_poll_one()
736 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); in mlx4_ib_poll_one()
751 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one()
758 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one()
763 wqe_ctr = be16_to_cpu(cqe->wqe_index); in mlx4_ib_poll_one()
774 mlx4_ib_handle_error_cqe((struct mlx4_err_cqe *) cqe, wc); in mlx4_ib_poll_one()
782 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { in mlx4_ib_poll_one()
796 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in mlx4_ib_poll_one()
828 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in mlx4_ib_poll_one()
830 switch (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) { in mlx4_ib_poll_one()
834 wc->ex.imm_data = cqe->immed_rss_invalid; in mlx4_ib_poll_one()
839 wc->ex.invalidate_rkey = be32_to_cpu(cqe->immed_rss_invalid); in mlx4_ib_poll_one()
848 wc->ex.imm_data = cqe->immed_rss_invalid; in mlx4_ib_poll_one()
860 cqe, is_eth); in mlx4_ib_poll_one()
863 wc->slid = be16_to_cpu(cqe->rlid); in mlx4_ib_poll_one()
864 g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); in mlx4_ib_poll_one()
868 wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; in mlx4_ib_poll_one()
869 wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, in mlx4_ib_poll_one()
870 cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; in mlx4_ib_poll_one()
872 wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; in mlx4_ib_poll_one()
873 if (be32_to_cpu(cqe->vlan_my_qpn) & in mlx4_ib_poll_one()
875 wc->vlan_id = be16_to_cpu(cqe->sl_vid) & in mlx4_ib_poll_one()
880 memcpy(wc->smac, cqe->smac, ETH_ALEN); in mlx4_ib_poll_one()
883 wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; in mlx4_ib_poll_one()
938 struct mlx4_cqe *cqe, *dest; in __mlx4_ib_cq_clean() local
950 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx4_ib_cq_clean()
958 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
959 cqe += cqe_inc; in __mlx4_ib_cq_clean()
961 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { in __mlx4_ib_cq_clean()
962 if (srq && !(cqe->owner_sr_opcode & MLX4_CQE_IS_SEND_MASK)) in __mlx4_ib_cq_clean()
963 mlx4_ib_free_srq_wqe(srq, be16_to_cpu(cqe->wqe_index)); in __mlx4_ib_cq_clean()
966 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx4_ib_cq_clean()
970 memcpy(dest, cqe, sizeof *cqe); in __mlx4_ib_cq_clean()