Lines Matching refs:mqp
121 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) in to_msqp() argument
123 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp()
131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
175 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
844 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); in create_qp_common()
849 qp->mqp.qpn |= (1 << 23); in create_qp_common()
856 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
858 qp->mqp.event = mlx4_ib_qp_event; in create_qp_common()
1008 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) in destroy_qp_common()
1010 qp->mqp.qpn); in destroy_qp_common()
1044 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1047 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1050 mlx4_qp_remove(dev->dev, &qp->mqp); in destroy_qp_common()
1055 mlx4_qp_free(dev->dev, &qp->mqp); in destroy_qp_common()
1059 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); in destroy_qp_common()
1061 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); in destroy_qp_common()
1168 qp->ibqp.qp_num = qp->mqp.qpn; in mlx4_ib_create_qp()
1202 struct mlx4_ib_qp *mqp = to_mqp(qp); in mlx4_ib_destroy_qp() local
1205 if (is_qp0(dev, mqp)) in mlx4_ib_destroy_qp()
1206 mlx4_CLOSE_PORT(dev->dev, mqp->port); in mlx4_ib_destroy_qp()
1208 if (dev->qp1_proxy[mqp->port - 1] == mqp) { in mlx4_ib_destroy_qp()
1209 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); in mlx4_ib_destroy_qp()
1210 dev->qp1_proxy[mqp->port - 1] = NULL; in mlx4_ib_destroy_qp()
1211 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); in mlx4_ib_destroy_qp()
1214 if (mqp->counter_index) in mlx4_ib_destroy_qp()
1215 mlx4_ib_free_qp_counter(dev, mqp); in mlx4_ib_destroy_qp()
1217 pd = get_pd(mqp); in mlx4_ib_destroy_qp()
1218 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); in mlx4_ib_destroy_qp()
1220 if (is_sqp(dev, mqp)) in mlx4_ib_destroy_qp()
1221 kfree(to_msqp(mqp)); in mlx4_ib_destroy_qp()
1223 kfree(mqp); in mlx4_ib_destroy_qp()
1418 struct mlx4_ib_qp *mqp, in mlx4_set_path() argument
1425 path, &mqp->pri, port); in mlx4_set_path()
1431 struct mlx4_ib_qp *mqp, in mlx4_set_alt_path() argument
1437 path, &mqp->alt, port); in mlx4_set_alt_path()
1872 sqd_event, &qp->mqp); in __mlx4_ib_modify_qp()
1913 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx4_ib_modify_qp()
1916 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx4_ib_modify_qp()
2200 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2203 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) in build_sriov_qp0_header()
2207 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); in build_sriov_qp0_header()
3245 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); in mlx4_ib_query_qp()