err_cqe            62 drivers/infiniband/hw/cxgb4/ev.c static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
err_cqe            64 drivers/infiniband/hw/cxgb4/ev.c 	__be64 *p = (void *)err_cqe;
err_cqe            69 drivers/infiniband/hw/cxgb4/ev.c 		CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
err_cqe            70 drivers/infiniband/hw/cxgb4/ev.c 		CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
err_cqe            71 drivers/infiniband/hw/cxgb4/ev.c 		CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
err_cqe            82 drivers/infiniband/hw/cxgb4/ev.c 	if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
err_cqe            83 drivers/infiniband/hw/cxgb4/ev.c 				 CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
err_cqe            84 drivers/infiniband/hw/cxgb4/ev.c 		print_tpte(dev, CQE_WRID_STAG(err_cqe));
err_cqe            89 drivers/infiniband/hw/cxgb4/ev.c 			  struct t4_cqe *err_cqe,
err_cqe            96 drivers/infiniband/hw/cxgb4/ev.c 	dump_err_cqe(dev, err_cqe);
err_cqe           120 drivers/infiniband/hw/cxgb4/ev.c void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
err_cqe           127 drivers/infiniband/hw/cxgb4/ev.c 	qhp = xa_load(&dev->qps, CQE_QPID(err_cqe));
err_cqe           130 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_QPID(err_cqe),
err_cqe           131 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
err_cqe           132 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
err_cqe           133 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_WRID_LOW(err_cqe));
err_cqe           138 drivers/infiniband/hw/cxgb4/ev.c 	if (SQ_TYPE(err_cqe))
err_cqe           145 drivers/infiniband/hw/cxgb4/ev.c 		       cqid, CQE_QPID(err_cqe),
err_cqe           146 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
err_cqe           147 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
err_cqe           148 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_WRID_LOW(err_cqe));
err_cqe           158 drivers/infiniband/hw/cxgb4/ev.c 	if (RQ_TYPE(err_cqe) &&
err_cqe           159 drivers/infiniband/hw/cxgb4/ev.c 	    (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) {
err_cqe           160 drivers/infiniband/hw/cxgb4/ev.c 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR);
err_cqe           164 drivers/infiniband/hw/cxgb4/ev.c 	switch (CQE_STATUS(err_cqe)) {
err_cqe           179 drivers/infiniband/hw/cxgb4/ev.c 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR);
err_cqe           186 drivers/infiniband/hw/cxgb4/ev.c 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL);
err_cqe           206 drivers/infiniband/hw/cxgb4/ev.c 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
err_cqe           211 drivers/infiniband/hw/cxgb4/ev.c 		       CQE_STATUS(err_cqe), qhp->wq.sq.qid);
err_cqe           212 drivers/infiniband/hw/cxgb4/ev.c 		post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL);
err_cqe          1027 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
err_cqe          1034 drivers/infiniband/hw/cxgb4/iw_cxgb4.h void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
err_cqe          1423 drivers/infiniband/hw/cxgb4/qp.c static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
err_cqe          1432 drivers/infiniband/hw/cxgb4/qp.c 	if (!err_cqe) {
err_cqe          1438 drivers/infiniband/hw/cxgb4/qp.c 	status = CQE_STATUS(err_cqe);
err_cqe          1439 drivers/infiniband/hw/cxgb4/qp.c 	opcode = CQE_OPCODE(err_cqe);
err_cqe          1440 drivers/infiniband/hw/cxgb4/qp.c 	rqtype = RQ_TYPE(err_cqe);
err_cqe          1559 drivers/infiniband/hw/cxgb4/qp.c static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
err_cqe          1588 drivers/infiniband/hw/cxgb4/qp.c 		build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
err_cqe           444 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_err_cqe *err_cqe;
err_cqe           517 drivers/infiniband/hw/mlx5/cq.c 		err_cqe = (struct mlx5_err_cqe *)cqe64;
err_cqe           518 drivers/infiniband/hw/mlx5/cq.c 		mlx5_handle_error_cqe(dev, err_cqe, wc);
err_cqe           523 drivers/infiniband/hw/mlx5/cq.c 			    err_cqe->syndrome, err_cqe->vendor_err_synd);
err_cqe          1139 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
err_cqe          1141 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c 	if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
err_cqe           404 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 				 struct mlx5_err_cqe *err_cqe)
err_cqe           414 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		   get_cqe_opcode((struct mlx5_cqe64 *)err_cqe),
err_cqe           415 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 		   err_cqe->syndrome, err_cqe->vendor_err_synd);
err_cqe           416 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c 	mlx5_dump_err_cqe(sq->cq.mdev, err_cqe);
err_cqe           198 include/linux/mlx5/cq.h 				     struct mlx5_err_cqe *err_cqe)
err_cqe           200 include/linux/mlx5/cq.h 	print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, err_cqe,
err_cqe           201 include/linux/mlx5/cq.h 		       sizeof(*err_cqe), false);