Lines Matching refs:send_cq

50 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq,
52 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq,
863 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common()
872 mcq = to_mcq(init_attr->send_cq); in create_qp_common()
876 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common()
933 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_lock_cqs() argument
934 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) in mlx4_ib_lock_cqs()
936 if (send_cq == recv_cq) { in mlx4_ib_lock_cqs()
937 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
939 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs()
940 spin_lock(&send_cq->lock); in mlx4_ib_lock_cqs()
944 spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING); in mlx4_ib_lock_cqs()
948 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq) in mlx4_ib_unlock_cqs() argument
949 __releases(&send_cq->lock) __releases(&recv_cq->lock) in mlx4_ib_unlock_cqs()
951 if (send_cq == recv_cq) { in mlx4_ib_unlock_cqs()
953 spin_unlock(&send_cq->lock); in mlx4_ib_unlock_cqs()
954 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_unlock_cqs()
956 spin_unlock(&send_cq->lock); in mlx4_ib_unlock_cqs()
958 spin_unlock(&send_cq->lock); in mlx4_ib_unlock_cqs()
982 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) in get_cqs() argument
986 *send_cq = to_mcq(to_mxrcd(qp->ibqp.xrcd)->cq); in get_cqs()
987 *recv_cq = *send_cq; in get_cqs()
990 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
991 *recv_cq = *send_cq; in get_cqs()
994 *send_cq = to_mcq(qp->ibqp.send_cq); in get_cqs()
1003 struct mlx4_ib_cq *send_cq, *recv_cq; in destroy_qp_common() local
1034 get_cqs(qp, &send_cq, &recv_cq); in destroy_qp_common()
1037 mlx4_ib_lock_cqs(send_cq, recv_cq); in destroy_qp_common()
1046 if (send_cq != recv_cq) in destroy_qp_common()
1047 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1052 mlx4_ib_unlock_cqs(send_cq, recv_cq); in destroy_qp_common()
1145 init_attr->send_cq = to_mxrcd(init_attr->xrcd)->cq; in mlx4_ib_create_qp()
1150 init_attr->recv_cq = init_attr->send_cq; in mlx4_ib_create_qp()
1517 struct mlx4_ib_cq *send_cq, *recv_cq; in __mlx4_ib_modify_qp() local
1704 get_cqs(qp, &send_cq, &recv_cq); in __mlx4_ib_modify_qp()
1706 context->cqn_send = cpu_to_be32(send_cq->mcq.cqn); in __mlx4_ib_modify_qp()
1915 if (send_cq != recv_cq) in __mlx4_ib_modify_qp()
1916 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
2783 if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { in mlx4_ib_post_send()