cqe64              81 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_cqe64 *cqe64;
cqe64              83 drivers/infiniband/hw/mlx5/cq.c 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cqe64              85 drivers/infiniband/hw/mlx5/cq.c 	if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
cqe64              86 drivers/infiniband/hw/mlx5/cq.c 	    !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
cqe64             333 drivers/infiniband/hw/mlx5/cq.c static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
cqe64             445 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_cqe64 *cqe64;
cqe64             462 drivers/infiniband/hw/mlx5/cq.c 	cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cqe64             471 drivers/infiniband/hw/mlx5/cq.c 	opcode = get_cqe_opcode(cqe64);
cqe64             484 drivers/infiniband/hw/mlx5/cq.c 	qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff;
cqe64             498 drivers/infiniband/hw/mlx5/cq.c 		wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
cqe64             500 drivers/infiniband/hw/mlx5/cq.c 		handle_good_req(wc, cqe64, wq, idx);
cqe64             501 drivers/infiniband/hw/mlx5/cq.c 		handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
cqe64             510 drivers/infiniband/hw/mlx5/cq.c 		handle_responder(wc, cqe64, *cur_qp);
cqe64             517 drivers/infiniband/hw/mlx5/cq.c 		err_cqe = (struct mlx5_err_cqe *)cqe64;
cqe64             526 drivers/infiniband/hw/mlx5/cq.c 			wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
cqe64             535 drivers/infiniband/hw/mlx5/cq.c 				wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
cqe64             546 drivers/infiniband/hw/mlx5/cq.c 		sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
cqe64             836 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_cqe64 *cqe64;
cqe64             840 drivers/infiniband/hw/mlx5/cq.c 		cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64             841 drivers/infiniband/hw/mlx5/cq.c 		cqe64->op_own = MLX5_CQE_INVALID << 4;
cqe64            1029 drivers/infiniband/hw/mlx5/cq.c static int is_equal_rsn(struct mlx5_cqe64 *cqe64, u32 rsn)
cqe64            1031 drivers/infiniband/hw/mlx5/cq.c 	return rsn == (ntohl(cqe64->sop_drop_qpn) & 0xffffff);
cqe64            1036 drivers/infiniband/hw/mlx5/cq.c 	struct mlx5_cqe64 *cqe64, *dest64;
cqe64            1060 drivers/infiniband/hw/mlx5/cq.c 		cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
cqe64            1061 drivers/infiniband/hw/mlx5/cq.c 		if (is_equal_rsn(cqe64, rsn)) {
cqe64            1062 drivers/infiniband/hw/mlx5/cq.c 			if (srq && (ntohl(cqe64->srqn) & 0xffffff))
cqe64            1063 drivers/infiniband/hw/mlx5/cq.c 				mlx5_ib_free_srq_wqe(srq, be16_to_cpu(cqe64->wqe_counter));
cqe64              50 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
cqe64              55 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	opcode = get_cqe_opcode(cqe64);
cqe64              57 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		idx = be16_to_cpu(cqe64->wqe_counter) &
cqe64              63 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 		idx = be16_to_cpu(cqe64->wqe_counter) &
cqe64              75 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	struct mlx5_cqe64 *cqe64;
cqe64              78 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
cqe64              79 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	if (!cqe64)
cqe64              83 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c 	err = dr_parse_cqe(dr_cq, cqe64);
cqe64             956 include/linux/mlx5/device.h 	struct mlx5_cqe64	cqe64;