mqp 335 arch/sparc/kernel/pci_msi.c } *mqp; mqp 355 arch/sparc/kernel/pci_msi.c mqp = of_get_property(pbm->op->dev.of_node, mqp 357 arch/sparc/kernel/pci_msi.c if (!mqp) mqp 358 arch/sparc/kernel/pci_msi.c mqp = of_get_property(pbm->op->dev.of_node, mqp 360 arch/sparc/kernel/pci_msi.c if (!mqp || len != sizeof(struct msiq_prop)) mqp 363 arch/sparc/kernel/pci_msi.c pbm->msiq_first = mqp->first_msiq; mqp 364 arch/sparc/kernel/pci_msi.c pbm->msiq_first_devino = mqp->first_devino; mqp 666 drivers/infiniband/hw/mlx4/cq.c struct mlx4_qp *mqp; mqp 714 drivers/infiniband/hw/mlx4/cq.c (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { mqp 720 drivers/infiniband/hw/mlx4/cq.c mqp = __mlx4_qp_lookup(to_mdev(cq->ibcq.device)->dev, mqp 722 drivers/infiniband/hw/mlx4/cq.c *cur_qp = to_mibqp(mqp); mqp 1270 drivers/infiniband/hw/mlx4/main.c struct mlx4_ib_qp *mqp = to_mqp(ibqp); mqp 1279 drivers/infiniband/hw/mlx4/main.c if (mlx4_ib_add_mc(mdev, mqp, gid)) { mqp 1280 drivers/infiniband/hw/mlx4/main.c ge->port = mqp->port; mqp 1284 drivers/infiniband/hw/mlx4/main.c mutex_lock(&mqp->mutex); mqp 1285 drivers/infiniband/hw/mlx4/main.c list_add_tail(&ge->list, &mqp->gid_list); mqp 1286 drivers/infiniband/hw/mlx4/main.c mutex_unlock(&mqp->mutex); mqp 1307 drivers/infiniband/hw/mlx4/main.c int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, mqp 1313 drivers/infiniband/hw/mlx4/main.c if (!mqp->port) mqp 1317 drivers/infiniband/hw/mlx4/main.c ndev = mdev->iboe.netdevs[mqp->port - 1]; mqp 1861 drivers/infiniband/hw/mlx4/main.c struct mlx4_ib_qp *mqp = to_mqp(ibqp); mqp 1873 drivers/infiniband/hw/mlx4/main.c err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, mqp 1874 drivers/infiniband/hw/mlx4/main.c !!(mqp->flags & mqp 1884 drivers/infiniband/hw/mlx4/main.c err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp 1885 drivers/infiniband/hw/mlx4/main.c (mqp->port == 1) ? 2 : 1, mqp 1886 drivers/infiniband/hw/mlx4/main.c !!(mqp->flags & mqp 1900 drivers/infiniband/hw/mlx4/main.c mutex_lock(&mqp->mutex); mqp 1901 drivers/infiniband/hw/mlx4/main.c list_add(&ib_steering->list, &mqp->steering_rules); mqp 1902 drivers/infiniband/hw/mlx4/main.c mutex_unlock(&mqp->mutex); mqp 1907 drivers/infiniband/hw/mlx4/main.c mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mqp 1910 drivers/infiniband/hw/mlx4/main.c mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mqp 1939 drivers/infiniband/hw/mlx4/main.c struct mlx4_ib_qp *mqp = to_mqp(ibqp); mqp 1949 drivers/infiniband/hw/mlx4/main.c mutex_lock(&mqp->mutex); mqp 1950 drivers/infiniband/hw/mlx4/main.c list_for_each_entry(ib_steering, &mqp->steering_rules, list) { mqp 1956 drivers/infiniband/hw/mlx4/main.c mutex_unlock(&mqp->mutex); mqp 1957 drivers/infiniband/hw/mlx4/main.c if (&ib_steering->list == &mqp->steering_rules) { mqp 1965 drivers/infiniband/hw/mlx4/main.c err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mqp 1971 drivers/infiniband/hw/mlx4/main.c err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, mqp 1977 drivers/infiniband/hw/mlx4/main.c mutex_lock(&mqp->mutex); mqp 1978 drivers/infiniband/hw/mlx4/main.c ge = find_gid_entry(mqp, gid->raw); mqp 1992 drivers/infiniband/hw/mlx4/main.c mutex_unlock(&mqp->mutex); mqp 2308 drivers/infiniband/hw/mlx4/main.c if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, mqp 2979 drivers/infiniband/hw/mlx4/main.c int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, mqp 2993 drivers/infiniband/hw/mlx4/main.c flow->port = mqp->port; mqp 3002 drivers/infiniband/hw/mlx4/main.c err = __mlx4_ib_create_flow(&mqp->ibqp, flow, mqp 3005 drivers/infiniband/hw/mlx4/main.c &mqp->reg_id); mqp 3007 drivers/infiniband/hw/mlx4/main.c err = __mlx4_ib_destroy_flow(mdev->dev, mqp->reg_id); mqp 3105 drivers/infiniband/hw/mlx4/main.c struct mlx4_ib_qp *mqp; mqp 3119 drivers/infiniband/hw/mlx4/main.c list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { mqp 3120 drivers/infiniband/hw/mlx4/main.c spin_lock_irqsave(&mqp->sq.lock, flags_qp); mqp 3121 drivers/infiniband/hw/mlx4/main.c if (mqp->sq.tail != mqp->sq.head) { mqp 3122 drivers/infiniband/hw/mlx4/main.c send_mcq = to_mcq(mqp->ibqp.send_cq); mqp 3125 drivers/infiniband/hw/mlx4/main.c mqp->ibqp.send_cq->comp_handler) { mqp 3134 drivers/infiniband/hw/mlx4/main.c spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); mqp 3136 drivers/infiniband/hw/mlx4/main.c spin_lock_irqsave(&mqp->rq.lock, flags_qp); mqp 3138 drivers/infiniband/hw/mlx4/main.c if (!mqp->ibqp.srq) { mqp 3139 drivers/infiniband/hw/mlx4/main.c if (mqp->rq.tail != mqp->rq.head) { mqp 3140 drivers/infiniband/hw/mlx4/main.c recv_mcq = to_mcq(mqp->ibqp.recv_cq); mqp 3143 drivers/infiniband/hw/mlx4/main.c mqp->ibqp.recv_cq->comp_handler) { mqp 3154 drivers/infiniband/hw/mlx4/main.c spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); mqp 311 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_qp mqp; mqp 696 drivers/infiniband/hw/mlx4/mlx4_ib.h static inline struct mlx4_ib_qp *to_mibqp(struct mlx4_qp *mqp) mqp 698 drivers/infiniband/hw/mlx4/mlx4_ib.h return container_of(mqp, struct mlx4_ib_qp, mqp); mqp 834 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, mqp 892 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, mqp 126 drivers/infiniband/hw/mlx4/qp.c static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) mqp 128 drivers/infiniband/hw/mlx4/qp.c return container_of(mqp, struct mlx4_ib_sqp, qp); mqp 136 drivers/infiniband/hw/mlx4/qp.c return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && mqp 137 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + mqp 148 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && mqp 149 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); mqp 155 drivers/infiniband/hw/mlx4/qp.c if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy || mqp 156 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp1_proxy) { mqp 176 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && mqp 177 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); mqp 183 drivers/infiniband/hw/mlx4/qp.c if (qp->mqp.qpn == dev->dev->caps.spec_qps[i].qp0_proxy) { mqp 649 drivers/infiniband/hw/mlx4/qp.c qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; mqp 651 drivers/infiniband/hw/mlx4/qp.c err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn, 0, qp->mqp.usage); mqp 655 drivers/infiniband/hw/mlx4/qp.c err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); mqp 691 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_remove(dev->dev, &qp->mqp); mqp 692 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_free(dev->dev, &qp->mqp); mqp 770 drivers/infiniband/hw/mlx4/qp.c qp->ibqp.qp_num = qp->mqp.qpn; mqp 801 drivers/infiniband/hw/mlx4/qp.c qp->mqp.usage); mqp 939 drivers/infiniband/hw/mlx4/qp.c qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; mqp 945 drivers/infiniband/hw/mlx4/qp.c err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); mqp 954 drivers/infiniband/hw/mlx4/qp.c qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); mqp 956 drivers/infiniband/hw/mlx4/qp.c qp->mqp.event = mlx4_ib_wq_event; mqp 1136 drivers/infiniband/hw/mlx4/qp.c qp->mqp.usage = MLX4_RES_USAGE_USER_VERBS; mqp 1191 drivers/infiniband/hw/mlx4/qp.c qp->mqp.usage = MLX4_RES_USAGE_DRIVER; mqp 1212 drivers/infiniband/hw/mlx4/qp.c qp->mqp.usage); mqp 1218 drivers/infiniband/hw/mlx4/qp.c &qpn, 0, qp->mqp.usage); mqp 1226 drivers/infiniband/hw/mlx4/qp.c err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); mqp 1231 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn |= (1 << 23); mqp 1238 drivers/infiniband/hw/mlx4/qp.c qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); mqp 1240 drivers/infiniband/hw/mlx4/qp.c qp->mqp.event = mlx4_ib_qp_event; mqp 1404 drivers/infiniband/hw/mlx4/qp.c MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) mqp 1406 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn); mqp 1409 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_remove(dev->dev, &qp->mqp); mqp 1410 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_free(dev->dev, &qp->mqp); mqp 1411 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); mqp 1425 drivers/infiniband/hw/mlx4/qp.c MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) mqp 1427 drivers/infiniband/hw/mlx4/qp.c qp->mqp.qpn); mqp 1461 drivers/infiniband/hw/mlx4/qp.c __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, mqp 1464 drivers/infiniband/hw/mlx4/qp.c __mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); mqp 1467 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_remove(dev->dev, &qp->mqp); mqp 1472 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_free(dev->dev, &qp->mqp); mqp 1476 drivers/infiniband/hw/mlx4/qp.c mlx4_ib_steer_qp_free(dev, qp->mqp.qpn, 1); mqp 1485 drivers/infiniband/hw/mlx4/qp.c mlx4_qp_release_range(dev->dev, qp->mqp.qpn, 1); mqp 1604 drivers/infiniband/hw/mlx4/qp.c qp->ibqp.qp_num = qp->mqp.qpn; mqp 1682 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *mqp = to_mqp(qp); mqp 1684 drivers/infiniband/hw/mlx4/qp.c if (is_qp0(dev, mqp)) mqp 1685 drivers/infiniband/hw/mlx4/qp.c mlx4_CLOSE_PORT(dev->dev, mqp->port); mqp 1687 drivers/infiniband/hw/mlx4/qp.c if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI && mqp 1688 drivers/infiniband/hw/mlx4/qp.c dev->qp1_proxy[mqp->port - 1] == mqp) { mqp 1689 drivers/infiniband/hw/mlx4/qp.c mutex_lock(&dev->qp1_proxy_lock[mqp->port - 1]); mqp 1690 drivers/infiniband/hw/mlx4/qp.c dev->qp1_proxy[mqp->port - 1] = NULL; mqp 1691 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&dev->qp1_proxy_lock[mqp->port - 1]); mqp 1694 drivers/infiniband/hw/mlx4/qp.c if (mqp->counter_index) mqp 1695 drivers/infiniband/hw/mlx4/qp.c mlx4_ib_free_qp_counter(dev, mqp); mqp 1698 drivers/infiniband/hw/mlx4/qp.c destroy_qp_rss(dev, mqp); mqp 1700 drivers/infiniband/hw/mlx4/qp.c destroy_qp_common(dev, mqp, MLX4_IB_QP_SRC, udata); mqp 1703 drivers/infiniband/hw/mlx4/qp.c if (is_sqp(dev, mqp)) mqp 1704 drivers/infiniband/hw/mlx4/qp.c kfree(to_msqp(mqp)); mqp 1706 drivers/infiniband/hw/mlx4/qp.c kfree(mqp); mqp 1713 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *mqp = to_mqp(qp); mqp 1715 drivers/infiniband/hw/mlx4/qp.c if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { mqp 1716 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_sqp *sqp = to_msqp(mqp); mqp 1915 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *mqp, mqp 1922 drivers/infiniband/hw/mlx4/qp.c path, &mqp->pri, port); mqp 1928 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *mqp, mqp 1934 drivers/infiniband/hw/mlx4/qp.c path, &mqp->alt, port); mqp 2560 drivers/infiniband/hw/mlx4/qp.c sqd_event, &qp->mqp); mqp 2601 drivers/infiniband/hw/mlx4/qp.c mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn, mqp 2604 drivers/infiniband/hw/mlx4/qp.c mlx4_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); mqp 2849 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *mqp = to_mqp(ibqp); mqp 2854 drivers/infiniband/hw/mlx4/qp.c if (mqp->mlx4_ib_qp_type == MLX4_IB_QPT_GSI) { mqp 2855 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_sqp *sqp = to_msqp(mqp); mqp 2941 drivers/infiniband/hw/mlx4/qp.c if (mlx4_get_parav_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) mqp 2944 drivers/infiniband/hw/mlx4/qp.c if (vf_get_qp0_qkey(mdev->dev, sqp->qp.mqp.qpn, &qkey)) mqp 2948 drivers/infiniband/hw/mlx4/qp.c sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.mqp.qpn); mqp 4041 drivers/infiniband/hw/mlx4/qp.c err = mlx4_qp_query(dev->dev, &qp->mqp, &context); mqp 4195 drivers/infiniband/hw/mlx4/qp.c qp->ibwq.wq_num = qp->mqp.qpn; mqp 446 drivers/infiniband/hw/mlx5/cq.c struct mlx5_core_qp *mqp; mqp 490 drivers/infiniband/hw/mlx5/cq.c mqp = __mlx5_qp_lookup(dev->mdev, qpn); mqp 491 drivers/infiniband/hw/mlx5/cq.c *cur_qp = to_mibqp(mqp); mqp 619 drivers/infiniband/hw/mlx5/devx.c rq->base.mqp.qpn) == obj_id || mqp 621 drivers/infiniband/hw/mlx5/devx.c sq->base.mqp.qpn) == obj_id || mqp 630 drivers/infiniband/hw/mlx5/devx.c qp->dct.mdct.mqp.qpn) == obj_id; mqp 129 drivers/infiniband/hw/mlx5/flow.c struct mlx5_ib_qp *mqp; mqp 139 drivers/infiniband/hw/mlx5/flow.c mqp = to_mqp(qp); mqp 140 drivers/infiniband/hw/mlx5/flow.c if (mqp->flags & MLX5_IB_QP_RSS) mqp 141 drivers/infiniband/hw/mlx5/flow.c dest_id = mqp->rss_qp.tirn; mqp 143 drivers/infiniband/hw/mlx5/flow.c dest_id = mqp->raw_packet_qp.rq.tirn; mqp 155 drivers/infiniband/hw/mlx5/ib_rep.c sq->base.mqp.qpn); mqp 3788 drivers/infiniband/hw/mlx5/main.c struct mlx5_ib_qp *mqp = to_mqp(qp); mqp 3877 drivers/infiniband/hw/mlx5/main.c if (mqp->flags & MLX5_IB_QP_RSS) mqp 3878 drivers/infiniband/hw/mlx5/main.c dst->tir_num = mqp->rss_qp.tirn; mqp 3880 drivers/infiniband/hw/mlx5/main.c dst->tir_num = mqp->raw_packet_qp.rq.tirn; mqp 3888 drivers/infiniband/hw/mlx5/main.c underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? mqp 3889 drivers/infiniband/hw/mlx5/main.c mqp->underlay_qpn : 0; mqp 4343 drivers/infiniband/hw/mlx5/main.c struct mlx5_ib_qp *mqp = to_mqp(ibqp); mqp 4350 drivers/infiniband/hw/mlx5/main.c if (mqp->flags & MLX5_IB_QP_UNDERLAY) { mqp 4469 drivers/infiniband/hw/mlx5/main.c struct mlx5_ib_qp *mqp; mqp 4481 drivers/infiniband/hw/mlx5/main.c list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { mqp 4482 drivers/infiniband/hw/mlx5/main.c spin_lock_irqsave(&mqp->sq.lock, flags_qp); mqp 4483 drivers/infiniband/hw/mlx5/main.c if (mqp->sq.tail != mqp->sq.head) { mqp 4484 drivers/infiniband/hw/mlx5/main.c send_mcq = to_mcq(mqp->ibqp.send_cq); mqp 4487 drivers/infiniband/hw/mlx5/main.c mqp->ibqp.send_cq->comp_handler) { mqp 4496 drivers/infiniband/hw/mlx5/main.c spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); mqp 4497 drivers/infiniband/hw/mlx5/main.c spin_lock_irqsave(&mqp->rq.lock, flags_qp); mqp 4499 drivers/infiniband/hw/mlx5/main.c if (!mqp->ibqp.srq) { mqp 4500 drivers/infiniband/hw/mlx5/main.c if (mqp->rq.tail != mqp->rq.head) { mqp 4501 drivers/infiniband/hw/mlx5/main.c recv_mcq = to_mcq(mqp->ibqp.recv_cq); mqp 4504 drivers/infiniband/hw/mlx5/main.c mqp->ibqp.recv_cq->comp_handler) { mqp 4515 drivers/infiniband/hw/mlx5/main.c spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); mqp 348 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_core_qp mqp; mqp 1033 drivers/infiniband/hw/mlx5/mlx5_ib.h static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) mqp 1035 drivers/infiniband/hw/mlx5/mlx5_ib.h return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; mqp 1033 drivers/infiniband/hw/mlx5/odp.c u32 qpn = qp->trans_qp.base.mqp.qpn; mqp 1152 drivers/infiniband/hw/mlx5/odp.c struct mlx5_core_qp *mqp = (struct mlx5_core_qp *)res; mqp 1154 drivers/infiniband/hw/mlx5/odp.c return to_mibqp(mqp); mqp 1277 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp); mqp 1297 drivers/infiniband/hw/mlx5/qp.c mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); mqp 1320 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *mqp = rq->base.container_mibqp; mqp 1349 drivers/infiniband/hw/mlx5/qp.c if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS) mqp 1367 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp); mqp 1377 drivers/infiniband/hw/mlx5/qp.c mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp); mqp 1418 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn); mqp 1478 drivers/infiniband/hw/mlx5/qp.c resp->sqn = sq->base.mqp.qpn; mqp 1483 drivers/infiniband/hw/mlx5/qp.c sq->base.mqp.event = mlx5_ib_qp_event; mqp 1504 drivers/infiniband/hw/mlx5/qp.c resp->rqn = rq->base.mqp.qpn; mqp 1525 drivers/infiniband/hw/mlx5/qp.c qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : mqp 1526 drivers/infiniband/hw/mlx5/qp.c rq->base.mqp.qpn; mqp 1831 drivers/infiniband/hw/mlx5/qp.c qp->trans_qp.base.mqp.qpn = 0; mqp 2287 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); mqp 2298 drivers/infiniband/hw/mlx5/qp.c base->mqp.event = mlx5_ib_qp_event; mqp 2455 drivers/infiniband/hw/mlx5/qp.c NULL, &base->mqp); mqp 2465 drivers/infiniband/hw/mlx5/qp.c base->mqp.qpn); mqp 2482 drivers/infiniband/hw/mlx5/qp.c __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, mqp 2485 drivers/infiniband/hw/mlx5/qp.c __mlx5_ib_cq_clean(send_cq, base->mqp.qpn, mqp 2495 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_destroy_qp(dev->mdev, &base->mqp); mqp 2498 drivers/infiniband/hw/mlx5/qp.c base->mqp.qpn); mqp 2721 drivers/infiniband/hw/mlx5/qp.c qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn; mqp 2724 drivers/infiniband/hw/mlx5/qp.c qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, mqp 2751 drivers/infiniband/hw/mlx5/qp.c static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) mqp 2753 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_dev *dev = to_mdev(mqp->ibqp.device); mqp 2755 drivers/infiniband/hw/mlx5/qp.c if (mqp->state == IB_QPS_RTR) { mqp 2758 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct); mqp 2765 drivers/infiniband/hw/mlx5/qp.c kfree(mqp->dct.in); mqp 2766 drivers/infiniband/hw/mlx5/qp.c kfree(mqp); mqp 2773 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *mqp = to_mqp(qp); mqp 2778 drivers/infiniband/hw/mlx5/qp.c if (mqp->qp_sub_type == MLX5_IB_QPT_DCT) mqp 2779 drivers/infiniband/hw/mlx5/qp.c return mlx5_ib_destroy_dct(mqp); mqp 2781 drivers/infiniband/hw/mlx5/qp.c destroy_qp_common(dev, mqp, udata); mqp 2783 drivers/infiniband/hw/mlx5/qp.c kfree(mqp); mqp 3180 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen); mqp 3243 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); mqp 3371 drivers/infiniband/hw/mlx5/qp.c tx_port_affinity, qp_base->mqp.qpn, ucontext); mqp 3379 drivers/infiniband/hw/mlx5/qp.c tx_port_affinity, qp_base->mqp.qpn); mqp 3389 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *mqp = to_mqp(qp); mqp 3397 drivers/infiniband/hw/mlx5/qp.c set_id = mlx5_ib_get_counters_id(dev, mqp->port - 1); mqp 3399 drivers/infiniband/hw/mlx5/qp.c base = &mqp->trans_qp.base; mqp 3405 drivers/infiniband/hw/mlx5/qp.c &context, &base->mqp); mqp 3693 drivers/infiniband/hw/mlx5/qp.c &base->mqp); mqp 3716 drivers/infiniband/hw/mlx5/qp.c mlx5_ib_cq_clean(recv_cq, base->mqp.qpn, mqp 3719 drivers/infiniband/hw/mlx5/qp.c mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL); mqp 3872 drivers/infiniband/hw/mlx5/qp.c resp.dctn = qp->dct.mdct.mqp.qpn; mqp 4941 drivers/infiniband/hw/mlx5/qp.c ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); mqp 5519 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); mqp 5542 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_query_rq(dev->mdev, rq->base.mqp.qpn, out); mqp 5589 drivers/infiniband/hw/mlx5/qp.c qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, mqp 5590 drivers/infiniband/hw/mlx5/qp.c qp->raw_packet_qp.rq.base.mqp.qpn, rq_state); mqp 5640 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, mqp 5692 drivers/infiniband/hw/mlx5/qp.c static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, mqp 5696 drivers/infiniband/hw/mlx5/qp.c struct mlx5_core_dct *dct = &mqp->dct.mdct; mqp 5712 drivers/infiniband/hw/mlx5/qp.c if (mqp->state != IB_QPS_RTR) mqp 6511 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *mqp = to_mqp(qp); mqp 6514 drivers/infiniband/hw/mlx5/qp.c mutex_lock(&mqp->mutex); mqp 6515 drivers/infiniband/hw/mlx5/qp.c if (mqp->state == IB_QPS_RESET) { mqp 6525 drivers/infiniband/hw/mlx5/qp.c if (mqp->state == IB_QPS_RTS) { mqp 6533 drivers/infiniband/hw/mlx5/qp.c mqp->counter_pending = 1; mqp 6537 drivers/infiniband/hw/mlx5/qp.c mutex_unlock(&mqp->mutex); mqp 77 drivers/infiniband/sw/rdmavt/mcast.c struct rvt_mcast_qp *mqp; mqp 79 drivers/infiniband/sw/rdmavt/mcast.c mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); mqp 80 drivers/infiniband/sw/rdmavt/mcast.c if (!mqp) mqp 83 drivers/infiniband/sw/rdmavt/mcast.c mqp->qp = qp; mqp 87 drivers/infiniband/sw/rdmavt/mcast.c return mqp; mqp 90 drivers/infiniband/sw/rdmavt/mcast.c static void rvt_mcast_qp_free(struct rvt_mcast_qp *mqp) mqp 92 drivers/infiniband/sw/rdmavt/mcast.c struct rvt_qp *qp = mqp->qp; mqp 97 drivers/infiniband/sw/rdmavt/mcast.c kfree(mqp); mqp 194 drivers/infiniband/sw/rdmavt/mcast.c struct rvt_mcast *mcast, struct rvt_mcast_qp *mqp) mqp 228 drivers/infiniband/sw/rdmavt/mcast.c if (p->qp == mqp->qp) { mqp 241 drivers/infiniband/sw/rdmavt/mcast.c list_add_tail_rcu(&mqp->list, &tmcast->qp_list); mqp 258 drivers/infiniband/sw/rdmavt/mcast.c list_add_tail_rcu(&mqp->list, &mcast->qp_list); mqp 286 drivers/infiniband/sw/rdmavt/mcast.c struct rvt_mcast_qp *mqp; mqp 300 drivers/infiniband/sw/rdmavt/mcast.c mqp = rvt_mcast_qp_alloc(qp); mqp 301 drivers/infiniband/sw/rdmavt/mcast.c if (!mqp) mqp 304 drivers/infiniband/sw/rdmavt/mcast.c switch (rvt_mcast_add(rdi, ibp, mcast, mqp)) { mqp 327 drivers/infiniband/sw/rdmavt/mcast.c rvt_mcast_qp_free(mqp); mqp 168 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c ctrl->qpn_ds = cpu_to_be32(size | (conn->qp.mqp.qpn << 8)); mqp 374 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c static void mlx5_fpga_conn_event(struct mlx5_core_qp *mqp, int event) mqp 378 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn = container_of(mqp, struct mlx5_fpga_conn, qp.mqp); mqp 379 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_warn(conn->fdev, "QP event %u on QP #%u\n", event, mqp->qpn); mqp 603 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c err = mlx5_core_create_qp(mdev, &conn->qp.mqp, in, inlen); mqp 607 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.mqp.event = mlx5_fpga_conn_event; mqp 608 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(fdev, "Created QP #0x%x\n", conn->qp.mqp.qpn); mqp 661 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_core_destroy_qp(conn->fdev->mdev, &conn->qp.mqp); mqp 673 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to RST\n", conn->qp.mqp.qpn); mqp 676 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c &conn->qp.mqp); mqp 686 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(conn->fdev, "Modifying QP %u to INIT\n", conn->qp.mqp.qpn); mqp 704 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c &conn->qp.mqp); mqp 749 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c &conn->qp.mqp); mqp 786 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c &conn->qp.mqp); mqp 934 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(fpga_qpc, conn->fpga_qpc, remote_qpn, conn->qp.mqp.qpn); mqp 985 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c &conn->qp.mqp); mqp 68 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h struct mlx5_core_qp mqp; mqp 238 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_core_qp *qp = &dct->mqp; mqp 255 drivers/net/ethernet/mellanox/mlx5/core/qp.c destroy_resource_common(dev, &dct->mqp); mqp 269 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_core_qp *qp = &dct->mqp; mqp 342 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_core_qp *qp = &dct->mqp; mqp 560 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_core_qp *qp = &dct->mqp; mqp 103 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c static void dr_qp_event(struct mlx5_core_qp *mqp, int event) mqp 105 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c pr_info("DR QP event %u on QP #%u\n", event, mqp->qpn); mqp 183 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c err = mlx5_core_create_qp(mdev, &dr_qp->mqp, in, inlen); mqp 190 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->mqp.event = dr_qp_event; mqp 207 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_core_destroy_qp(mdev, &dr_qp->mqp); mqp 245 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->mqp.qpn << 8); mqp 589 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c &dr_qp->mqp); mqp 601 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->mqp.qpn); mqp 608 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c &dr_qp->mqp); mqp 620 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->mqp.qpn); mqp 640 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c &dr_qp->mqp); mqp 664 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c rtr_attr.qp_num = dr_qp->mqp.qpn; mqp 975 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5_core_qp mqp; mqp 121 drivers/scsi/arm/msgqueue.c struct msgqueue_entry **mqp; mqp 133 drivers/scsi/arm/msgqueue.c mqp = &msgq->qe; mqp 134 drivers/scsi/arm/msgqueue.c while (*mqp) mqp 135 drivers/scsi/arm/msgqueue.c mqp = &(*mqp)->next; mqp 137 drivers/scsi/arm/msgqueue.c *mqp = mq; mqp 486 include/linux/mlx5/qp.h struct mlx5_core_qp mqp;