/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_verbs_mcast.c | 55 struct ipath_mcast_qp *mqp; in ipath_mcast_qp_alloc() local 57 mqp = kmalloc(sizeof *mqp, GFP_KERNEL); in ipath_mcast_qp_alloc() 58 if (!mqp) in ipath_mcast_qp_alloc() 61 mqp->qp = qp; in ipath_mcast_qp_alloc() 65 return mqp; in ipath_mcast_qp_alloc() 68 static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp) in ipath_mcast_qp_free() argument 70 struct ipath_qp *qp = mqp->qp; in ipath_mcast_qp_free() 76 kfree(mqp); in ipath_mcast_qp_free() 165 struct ipath_mcast_qp *mqp) in ipath_mcast_add() argument 193 if (p->qp == mqp->qp) { in ipath_mcast_add() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_verbs_mcast.c | 44 struct qib_mcast_qp *mqp; in qib_mcast_qp_alloc() local 46 mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); in qib_mcast_qp_alloc() 47 if (!mqp) in qib_mcast_qp_alloc() 50 mqp->qp = qp; in qib_mcast_qp_alloc() 54 return mqp; in qib_mcast_qp_alloc() 57 static void qib_mcast_qp_free(struct qib_mcast_qp *mqp) in qib_mcast_qp_free() argument 59 struct qib_qp *qp = mqp->qp; in qib_mcast_qp_free() 65 kfree(mqp); in qib_mcast_qp_free() 154 struct qib_mcast *mcast, struct qib_mcast_qp *mqp) in qib_mcast_add() argument 182 if (p->qp == mqp->qp) { in qib_mcast_add() [all …]
|
/linux-4.1.27/drivers/scsi/arm/ |
D | msgqueue.c | 124 struct msgqueue_entry **mqp; in msgqueue_addmsg() local 136 mqp = &msgq->qe; in msgqueue_addmsg() 137 while (*mqp) in msgqueue_addmsg() 138 mqp = &(*mqp)->next; in msgqueue_addmsg() 140 *mqp = mq; in msgqueue_addmsg()
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | main.c | 809 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in add_gid_entry() local 818 if (mlx4_ib_add_mc(mdev, mqp, gid)) { in add_gid_entry() 819 ge->port = mqp->port; in add_gid_entry() 823 mutex_lock(&mqp->mutex); in add_gid_entry() 824 list_add_tail(&ge->list, &mqp->gid_list); in add_gid_entry() 825 mutex_unlock(&mqp->mutex); in add_gid_entry() 830 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, in mlx4_ib_add_mc() argument 836 if (!mqp->port) in mlx4_ib_add_mc() 840 ndev = mdev->iboe.netdevs[mqp->port - 1]; in mlx4_ib_add_mc() 1271 struct mlx4_ib_qp *mqp = to_mqp(ibqp); in mlx4_ib_mcg_attach() local [all …]
|
D | qp.c | 120 static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) in to_msqp() argument 122 return container_of(mqp, struct mlx4_ib_sqp, qp); in to_msqp() 130 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp() 131 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp() 142 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp() 143 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp() 149 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp() 150 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp() 167 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0() 168 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0() [all …]
|
D | mlx4_ib.h | 273 struct mlx4_qp mqp; member 618 static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp) in to_mibqp() argument 620 return container_of(mqp, struct mlx4_ib_qp, mqp); in to_mibqp() 754 int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, 812 int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
|
D | cq.c | 652 struct mlx4_qp *mqp; in mlx4_ib_poll_one() local 706 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { in mlx4_ib_poll_one() 712 mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, in mlx4_ib_poll_one() 714 if (unlikely(!mqp)) { in mlx4_ib_poll_one() 720 *cur_qp = to_mibqp(mqp); in mlx4_ib_poll_one()
|
/linux-4.1.27/arch/sparc/kernel/ |
D | pci_msi.c | 334 } *mqp; in sparc64_pbm_msi_init() local 354 mqp = of_get_property(pbm->op->dev.of_node, in sparc64_pbm_msi_init() 356 if (!mqp) in sparc64_pbm_msi_init() 357 mqp = of_get_property(pbm->op->dev.of_node, in sparc64_pbm_msi_init() 359 if (!mqp || len != sizeof(struct msiq_prop)) in sparc64_pbm_msi_init() 362 pbm->msiq_first = mqp->first_msiq; in sparc64_pbm_msi_init() 363 pbm->msiq_first_devino = mqp->first_devino; in sparc64_pbm_msi_init()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | odp.c | 165 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume() 170 qp->mqp.qpn); in mlx5_ib_page_fault_resume() 411 wqe_index, qp->mqp.qpn); in mlx5_ib_mr_initiator_pfault_handler() 421 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler() 428 if (qp->mqp.qpn != ctrl_qpn) { in mlx5_ib_mr_initiator_pfault_handler() 430 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler() 559 -ret, wqe_index, qp->mqp.qpn); in mlx5_ib_mr_wqe_pfault_handler() 596 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); in mlx5_ib_mr_wqe_pfault_handler() 763 qp->mqp.pfault_handler = mlx5_ib_pfault_handler; in mlx5_ib_odp_create_qp()
|
D | mlx5_ib.h | 177 struct mlx5_core_qp mqp; member 471 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) in to_mibqp() argument 473 return container_of(mqp, struct mlx5_ib_qp, mqp); in to_mibqp()
|
D | qp.c | 1029 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); in create_qp_common() 1040 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common() 1042 qp->mqp.event = mlx5_ib_qp_event; in create_qp_common() 1166 MLX5_QP_STATE_RST, in, 0, &qp->mqp)) in destroy_qp_common() 1168 qp->mqp.qpn); in destroy_qp_common() 1175 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common() 1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common() 1182 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); in destroy_qp_common() 1184 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); in destroy_qp_common() 1285 qp->ibqp.qp_num = qp->mqp.qpn; in mlx5_ib_create_qp() [all …]
|
D | cq.c | 409 struct mlx5_core_qp *mqp; in mlx5_poll_one() local 453 mqp = __mlx5_qp_lookup(dev->mdev, qpn); in mlx5_poll_one() 454 if (unlikely(!mqp)) { in mlx5_poll_one() 460 *cur_qp = to_mibqp(mqp); in mlx5_poll_one()
|