Searched refs:mqp (Results 1 - 12 of 12) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_verbs_mcast.c55 struct ipath_mcast_qp *mqp; ipath_mcast_qp_alloc() local
57 mqp = kmalloc(sizeof *mqp, GFP_KERNEL); ipath_mcast_qp_alloc()
58 if (!mqp) ipath_mcast_qp_alloc()
61 mqp->qp = qp; ipath_mcast_qp_alloc()
65 return mqp; ipath_mcast_qp_alloc()
68 static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp) ipath_mcast_qp_free() argument
70 struct ipath_qp *qp = mqp->qp; ipath_mcast_qp_free()
76 kfree(mqp); ipath_mcast_qp_free()
157 * @mqp: the QP to attach
165 struct ipath_mcast_qp *mqp) ipath_mcast_add()
193 if (p->qp == mqp->qp) { ipath_mcast_add()
205 list_add_tail_rcu(&mqp->list, &tmcast->qp_list); ipath_mcast_add()
222 list_add_tail_rcu(&mqp->list, &mcast->qp_list); ipath_mcast_add()
241 struct ipath_mcast_qp *mqp; ipath_multicast_attach() local
253 mqp = ipath_mcast_qp_alloc(qp); ipath_multicast_attach()
254 if (mqp == NULL) { ipath_multicast_attach()
259 switch (ipath_mcast_add(dev, mcast, mqp)) { ipath_multicast_attach()
262 ipath_mcast_qp_free(mqp); ipath_multicast_attach()
271 ipath_mcast_qp_free(mqp); ipath_multicast_attach()
163 ipath_mcast_add(struct ipath_ibdev *dev, struct ipath_mcast *mcast, struct ipath_mcast_qp *mqp) ipath_mcast_add() argument
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_verbs_mcast.c44 struct qib_mcast_qp *mqp; qib_mcast_qp_alloc() local
46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); qib_mcast_qp_alloc()
47 if (!mqp) qib_mcast_qp_alloc()
50 mqp->qp = qp; qib_mcast_qp_alloc()
54 return mqp; qib_mcast_qp_alloc()
57 static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) qib_mcast_qp_free() argument
59 struct qib_qp *qp = mqp->qp; qib_mcast_qp_free()
65 kfree(mqp); qib_mcast_qp_free()
147 * @mqp: the QP to attach
154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp) qib_mcast_add()
182 if (p->qp == mqp->qp) { qib_mcast_add()
194 list_add_tail_rcu(&mqp->list, &tmcast->qp_list); qib_mcast_add()
211 list_add_tail_rcu(&mqp->list, &mcast->qp_list); qib_mcast_add()
231 struct qib_mcast_qp *mqp; qib_multicast_attach() local
248 mqp = qib_mcast_qp_alloc(qp); qib_multicast_attach()
249 if (mqp == NULL) { qib_multicast_attach()
255 switch (qib_mcast_add(dev, ibp, mcast, mqp)) { qib_multicast_attach()
258 qib_mcast_qp_free(mqp); qib_multicast_attach()
268 qib_mcast_qp_free(mqp); qib_multicast_attach()
153 qib_mcast_add(struct qib_ibdev *dev, struct qib_ibport *ibp, struct qib_mcast *mcast, struct qib_mcast_qp *mqp) qib_mcast_add() argument
/linux-4.1.27/drivers/scsi/arm/
H A Dmsgqueue.c124 struct msgqueue_entry **mqp; msgqueue_addmsg() local
136 mqp = &msgq->qe; msgqueue_addmsg()
137 while (*mqp) msgqueue_addmsg()
138 mqp = &(*mqp)->next; msgqueue_addmsg()
140 *mqp = mq; msgqueue_addmsg()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dmain.c809 struct mlx4_ib_qp *mqp = to_mqp(ibqp); add_gid_entry() local
818 if (mlx4_ib_add_mc(mdev, mqp, gid)) { add_gid_entry()
819 ge->port = mqp->port; add_gid_entry()
823 mutex_lock(&mqp->mutex); add_gid_entry()
824 list_add_tail(&ge->list, &mqp->gid_list); add_gid_entry()
825 mutex_unlock(&mqp->mutex); add_gid_entry()
830 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, mlx4_ib_add_mc() argument
836 if (!mqp->port) mlx4_ib_add_mc()
840 ndev = mdev->iboe.netdevs[mqp->port - 1]; mlx4_ib_add_mc()
1271 struct mlx4_ib_qp *mqp = to_mqp(ibqp); mlx4_ib_mcg_attach() local
1283 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, mlx4_ib_mcg_attach()
1284 !!(mqp->flags & mlx4_ib_mcg_attach()
1294 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mlx4_ib_mcg_attach()
1295 (mqp->port == 1) ? 2 : 1, mlx4_ib_mcg_attach()
1296 !!(mqp->flags & mlx4_ib_mcg_attach()
1310 mutex_lock(&mqp->mutex); mlx4_ib_mcg_attach()
1311 list_add(&ib_steering->list, &mqp->steering_rules); mlx4_ib_mcg_attach()
1312 mutex_unlock(&mqp->mutex); mlx4_ib_mcg_attach()
1317 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mlx4_ib_mcg_attach()
1320 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mlx4_ib_mcg_attach()
1349 struct mlx4_ib_qp *mqp = to_mqp(ibqp); mlx4_ib_mcg_detach() local
1359 mutex_lock(&mqp->mutex); mlx4_ib_mcg_detach()
1360 list_for_each_entry(ib_steering, &mqp->steering_rules, list) { mlx4_ib_mcg_detach()
1366 mutex_unlock(&mqp->mutex); mlx4_ib_mcg_detach()
1367 if (&ib_steering->list == &mqp->steering_rules) { mlx4_ib_mcg_detach()
1375 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mlx4_ib_mcg_detach()
1381 err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mlx4_ib_mcg_detach()
1387 mutex_lock(&mqp->mutex); mlx4_ib_mcg_detach()
1388 ge = find_gid_entry(mqp, gid->raw); mlx4_ib_mcg_detach()
1402 mutex_unlock(&mqp->mutex); mlx4_ib_mcg_detach()
1795 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, mlx4_ib_update_qps()
2491 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, mlx4_ib_steer_qp_reg() argument
2505 flow->port = mqp->port; mlx4_ib_steer_qp_reg()
2514 err = __mlx4_ib_create_flow(&mqp->ibqp, flow, mlx4_ib_steer_qp_reg()
2517 &mqp->reg_id); mlx4_ib_steer_qp_reg()
2519 err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); mlx4_ib_steer_qp_reg()
2628 struct mlx4_ib_qp *mqp; mlx4_ib_handle_catas_error() local
2642 list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { mlx4_ib_handle_catas_error()
2643 spin_lock_irqsave(&mqp->sq.lock, flags_qp); mlx4_ib_handle_catas_error()
2644 if (mqp->sq.tail != mqp->sq.head) { mlx4_ib_handle_catas_error()
2645 send_mcq = to_mcq(mqp->ibqp.send_cq); mlx4_ib_handle_catas_error()
2648 mqp->ibqp.send_cq->comp_handler) { mlx4_ib_handle_catas_error()
2657 spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); mlx4_ib_handle_catas_error()
2659 spin_lock_irqsave(&mqp->rq.lock, flags_qp); mlx4_ib_handle_catas_error()
2661 if (!mqp->ibqp.srq) { mlx4_ib_handle_catas_error()
2662 if (mqp->rq.tail != mqp->rq.head) { mlx4_ib_handle_catas_error()
2663 recv_mcq = to_mcq(mqp->ibqp.recv_cq); mlx4_ib_handle_catas_error()
2666 mqp->ibqp.recv_cq->comp_handler) { mlx4_ib_handle_catas_error()
2677 spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); mlx4_ib_handle_catas_error()
H A Dqp.c120 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) to_msqp() argument
122 return container_of(mqp, struct mlx4_ib_sqp, qp); to_msqp()
130 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && is_tunnel_qp()
131 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + is_tunnel_qp()
142 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && is_sqp()
143 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); is_sqp()
149 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || is_sqp()
150 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { is_sqp()
167 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && is_qp0()
168 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); is_qp0()
174 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { is_qp0()
825 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); create_qp_common()
830 qp->mqp.qpn |= (1 << 23); create_qp_common()
837 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); create_qp_common()
839 qp->mqp.event = mlx4_ib_qp_event; create_qp_common()
989 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) destroy_qp_common()
991 qp->mqp.qpn); destroy_qp_common()
1025 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, destroy_qp_common()
1028 __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); destroy_qp_common()
1031 mlx4_qp_remove(dev->dev, &qp->mqp); destroy_qp_common()
1036 mlx4_qp_free(dev->dev, &qp->mqp); destroy_qp_common()
1040 mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); destroy_qp_common()
1042 mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); destroy_qp_common()
1146 qp->ibqp.qp_num = qp->mqp.qpn; mlx4_ib_create_qp()
1180 struct mlx4_ib_qp *mqp = to_mqp(qp); mlx4_ib_destroy_qp() local
1183 if (is_qp0(dev, mqp)) mlx4_ib_destroy_qp()
1184 mlx4_CLOSE_PORT(dev->dev, mqp->port); mlx4_ib_destroy_qp()
1186 if (dev->qp1_proxy[mqp->port - 1] == mqp) { mlx4_ib_destroy_qp()
1187 mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); mlx4_ib_destroy_qp()
1188 dev->qp1_proxy[mqp->port - 1] = NULL; mlx4_ib_destroy_qp()
1189 mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); mlx4_ib_destroy_qp()
1192 pd = get_pd(mqp); mlx4_ib_destroy_qp()
1193 destroy_qp_common(dev, mqp, !!pd->ibpd.uobject); mlx4_ib_destroy_qp()
1195 if (is_sqp(dev, mqp)) mlx4_ib_destroy_qp()
1196 kfree(to_msqp(mqp)); mlx4_ib_destroy_qp()
1198 kfree(mqp); mlx4_ib_destroy_qp()
1389 struct mlx4_ib_qp *mqp, mlx4_set_path()
1395 path, &mqp->pri, port); mlx4_set_path()
1401 struct mlx4_ib_qp *mqp, mlx4_set_alt_path()
1408 path, &mqp->alt, port); mlx4_set_alt_path()
1765 sqd_event, &qp->mqp); __mlx4_ib_modify_qp()
1806 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx4_ib_modify_qp()
1809 mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx4_ib_modify_qp()
2091 if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) build_sriov_qp0_header()
2094 if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) build_sriov_qp0_header()
2098 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); build_sriov_qp0_header()
3131 err = mlx4_qp_query(dev->dev, &qp->mqp, &context); mlx4_ib_query_qp()
1387 mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) mlx4_set_path() argument
1398 mlx4_set_alt_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, enum ib_qp_attr_mask qp_attr_mask, struct mlx4_ib_qp *mqp, struct mlx4_qp_path *path, u8 port) mlx4_set_alt_path() argument
H A Dmlx4_ib.h273 struct mlx4_qp mqp; member in struct:mlx4_ib_qp
618 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp) to_mibqp() argument
620 return container_of(mqp, struct mlx4_ib_qp, mqp); to_mibqp()
754 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
812 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
H A Dcq.c652 struct mlx4_qp *mqp; mlx4_ib_poll_one() local
706 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { mlx4_ib_poll_one()
712 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, mlx4_ib_poll_one()
714 if (unlikely(!mqp)) { mlx4_ib_poll_one()
720 *cur_qp = to_mibqp(mqp); mlx4_ib_poll_one()
/linux-4.1.27/arch/sparc/kernel/
H A Dpci_msi.c334 } *mqp; sparc64_pbm_msi_init() local
354 mqp = of_get_property(pbm->op->dev.of_node, sparc64_pbm_msi_init()
356 if (!mqp) sparc64_pbm_msi_init()
357 mqp = of_get_property(pbm->op->dev.of_node, sparc64_pbm_msi_init()
359 if (!mqp || len != sizeof(struct msiq_prop)) sparc64_pbm_msi_init()
362 pbm->msiq_first = mqp->first_msiq; sparc64_pbm_msi_init()
363 pbm->msiq_first_devino = mqp->first_devino; sparc64_pbm_msi_init()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dodp.c165 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, mlx5_ib_page_fault_resume()
170 qp->mqp.qpn); mlx5_ib_page_fault_resume()
411 wqe_index, qp->mqp.qpn); mlx5_ib_mr_initiator_pfault_handler()
421 wqe_index, qp->mqp.qpn, mlx5_ib_mr_initiator_pfault_handler()
428 if (qp->mqp.qpn != ctrl_qpn) { mlx5_ib_mr_initiator_pfault_handler()
430 wqe_index, qp->mqp.qpn, mlx5_ib_mr_initiator_pfault_handler()
559 -ret, wqe_index, qp->mqp.qpn); mlx5_ib_mr_wqe_pfault_handler()
596 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); mlx5_ib_mr_wqe_pfault_handler()
763 qp->mqp.pfault_handler = mlx5_ib_pfault_handler; mlx5_ib_odp_create_qp()
H A Dqp.c1029 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); create_qp_common()
1040 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); create_qp_common()
1042 qp->mqp.event = mlx5_ib_qp_event; create_qp_common()
1166 MLX5_QP_STATE_RST, in, 0, &qp->mqp)) destroy_qp_common()
1168 qp->mqp.qpn); destroy_qp_common()
1175 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, destroy_qp_common()
1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); destroy_qp_common()
1182 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); destroy_qp_common()
1184 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); destroy_qp_common()
1285 qp->ibqp.qp_num = qp->mqp.qpn; mlx5_ib_create_qp()
1288 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, mlx5_ib_create_qp()
1312 struct mlx5_ib_qp *mqp = to_mqp(qp); mlx5_ib_destroy_qp() local
1314 destroy_qp_common(dev, mqp); mlx5_ib_destroy_qp()
1316 kfree(mqp); mlx5_ib_destroy_qp()
1731 &qp->mqp); __mlx5_ib_modify_qp()
1754 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx5_ib_modify_qp()
1757 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx5_ib_modify_qp()
2574 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); finish_wqe()
3058 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); mlx5_ib_query_qp()
H A Dmlx5_ib.h177 struct mlx5_core_qp mqp; member in struct:mlx5_ib_qp
471 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) to_mibqp() argument
473 return container_of(mqp, struct mlx5_ib_qp, mqp); to_mibqp()
H A Dcq.c409 struct mlx5_core_qp *mqp; mlx5_poll_one() local
453 mqp = __mlx5_qp_lookup(dev->mdev, qpn); mlx5_poll_one()
454 if (unlikely(!mqp)) { mlx5_poll_one()
460 *cur_qp = to_mibqp(mqp); mlx5_poll_one()

Completed in 258 milliseconds