Home
last modified time | relevance | path

Searched refs:qpn (Results 1 – 78 of 78) sorted by relevance

/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Dmcg.c126 u32 qpn) in get_promisc_qp() argument
137 if (pqp->qpn == qpn) in get_promisc_qp()
150 unsigned int index, u32 qpn) in new_steering_entry() argument
177 pqp = get_promisc_qp(dev, port, steer, qpn); in new_steering_entry()
184 dqp->qpn = qpn; in new_steering_entry()
210 if (pqp->qpn == qpn) in new_steering_entry()
219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry()
242 unsigned int index, u32 qpn) in existing_steering_entry() argument
254 pqp = get_promisc_qp(dev, port, steer, qpn); in existing_steering_entry()
273 if (qpn == dqp->qpn) in existing_steering_entry()
[all …]
Dqp.c49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) in mlx4_qp_event() argument
56 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event()
63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); in mlx4_qp_event()
79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0()
81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && in is_master_qp0()
82 qp->qpn <= dev->phys_caps.base_sqpn + 1; in is_master_qp0()
145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, in __mlx4_qp_modify()
150 port = (qp->qpn & 1) + 1; in __mlx4_qp_modify()
174 cpu_to_be32(qp->qpn); in __mlx4_qp_modify()
177 qp->qpn | (!!sqd_event << 31), in __mlx4_qp_modify()
[all …]
Dresource_tracker.c221 int qpn; member
705 u8 slave, u32 qpn) in update_vport_qp_param() argument
722 if (mlx4_is_qp_reserved(dev, qpn)) in update_vport_qp_param()
736 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); in update_vport_qp_param()
983 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) in alloc_fs_rule_tr() argument
993 ret->qpn = qpn; in alloc_fs_rule_tr()
1266 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument
1276 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); in qp_res_start_move_to()
1536 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument
1538 return mlx4_is_qp_reserved(dev, qpn) && in valid_reserved()
[all …]
Den_resources.c41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument
62 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context()
78 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); in mlx4_en_fill_qp_context()
Den_rx.c1108 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, in mlx4_en_config_rss_qp() argument
1121 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); in mlx4_en_config_rss_qp()
1123 en_err(priv, "Failed to allocate qp #%x\n", qpn); in mlx4_en_config_rss_qp()
1130 qpn, ring->cqn, -1, context); in mlx4_en_config_rss_qp()
1157 u32 qpn; in mlx4_en_create_drop_qp() local
1159 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, in mlx4_en_create_drop_qp()
1165 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); in mlx4_en_create_drop_qp()
1168 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); in mlx4_en_create_drop_qp()
1177 u32 qpn; in mlx4_en_destroy_drop_qp() local
1179 qpn = priv->drop_qp.qpn; in mlx4_en_destroy_drop_qp()
[all …]
Den_netdev.c189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; in mlx4_en_filter_work()
474 int qpn, u64 *reg_id) in mlx4_en_tunnel_steer_add() argument
482 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, in mlx4_en_tunnel_steer_add()
494 unsigned char *mac, int *qpn, u64 *reg_id) in mlx4_en_uc_steer_add() argument
505 qp.qpn = *qpn; in mlx4_en_uc_steer_add()
525 rule.qpn = *qpn; in mlx4_en_uc_steer_add()
546 unsigned char *mac, int qpn, u64 reg_id) in mlx4_en_uc_steer_release() argument
556 qp.qpn = qpn; in mlx4_en_uc_steer_release()
580 int *qpn = &priv->base_qpn; in mlx4_en_get_qp() local
595 *qpn = base_qpn + index; in mlx4_en_get_qp()
[all …]
Den_tx.c116 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring()
125 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
156 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
178 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
184 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring()
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
Den_ethtool.c1137 ring_index[n] = rss_map->qps[n % rss_rings].qpn - in mlx4_en_get_rxfh()
1482 u32 qpn; in mlx4_en_flow_replace() local
1498 qpn = priv->drop_qp.qpn; in mlx4_en_flow_replace()
1500 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); in mlx4_en_flow_replace()
1507 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; in mlx4_en_flow_replace()
1508 if (!qpn) { in mlx4_en_flow_replace()
1514 rule.qpn = qpn; in mlx4_en_flow_replace()
Dmlx4.h295 __be32 qpn; member
456 u32 qpn; member
938 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
939 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
1185 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
Dmlx4_en.h291 int qpn; member
793 int is_tx, int rss, int qpn, int cqn, int user_prio,
Dmain.c666 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) in mlx4_get_parav_qkey() argument
670 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || in mlx4_get_parav_qkey()
671 qpn < dev->phys_caps.base_proxy_sqpn) in mlx4_get_parav_qkey()
674 if (qpn >= dev->phys_caps.base_tunnel_sqpn) in mlx4_get_parav_qkey()
676 qk += qpn - dev->phys_caps.base_tunnel_sqpn; in mlx4_get_parav_qkey()
678 qk += qpn - dev->phys_caps.base_proxy_sqpn; in mlx4_get_parav_qkey()
Deq.c497 be32_to_cpu(eqe->event.qp.qpn) in mlx4_eq_int()
512 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & in mlx4_eq_int()
Dport.c308 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) in __mlx4_replace_mac() argument
312 int index = qpn - info->base_qpn; in __mlx4_replace_mac()
Dmr.c865 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); in mlx4_mw_enable()
Dfw.c2504 qp.qpn = be32_to_cpu(mgm->qp[i]); in mlx4_opreq_action()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Dqp.c95 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; in mlx5_eq_pagefault() local
96 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); in mlx5_eq_pagefault()
103 qpn); in mlx5_eq_pagefault()
130 qpn, pfault.rdma.r_key); in mlx5_eq_pagefault()
150 qpn, pfault.wqe.wqe_index); in mlx5_eq_pagefault()
159 eqe->sub_type, qpn); in mlx5_eq_pagefault()
170 qpn); in mlx5_eq_pagefault()
206 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; in mlx5_core_create_qp()
207 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); in mlx5_core_create_qp()
211 err = radix_tree_insert(&table->tree, qp->qpn, qp); in mlx5_core_create_qp()
[all …]
Dmcg.c42 __be32 qpn; member
54 __be32 qpn; member
64 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_attach_mcg() argument
74 in.qpn = cpu_to_be32(qpn); in mlx5_core_attach_mcg()
86 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_detach_mcg() argument
96 in.qpn = cpu_to_be32(qpn); in mlx5_core_detach_mcg()
Ddebugfs.c544 &qp->dbg, qp->qpn, qp_fields, in mlx5_debug_qp_add()
/linux-4.1.27/include/uapi/rdma/
Dib_user_mad.h78 __be32 qpn; member
122 __be32 qpn; member
187 __u8 qpn; member
222 __u32 qpn; member
Dib_user_cm.h133 __u32 qpn; member
158 __u32 qpn; member
211 __u32 qpn; member
295 __u32 qpn; member
Dib_user_verbs.h504 __u32 qpn; member
513 __u32 qpn; member
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_qp.c197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, in mthca_qp_event() argument
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
251 event_type, qpn); in mthca_qp_event()
448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); in mthca_query_qp()
614 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp()
755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp()
[all …]
Dmthca_eq.c143 __be32 qpn; member
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
Dmthca_mad.c165 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local
167 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap()
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
Dmthca_dev.h506 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
527 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
557 int qpn,
Dmthca_cq.c278 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, in mthca_cq_clean() argument
302 qpn, cq->cqn, cq->cons_index, prod_index); in mthca_cq_clean()
310 if (cqe->my_qpn == cpu_to_be32(qpn)) { in mthca_cq_clean()
519 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { in mthca_poll_one()
Dmthca_provider.h264 u32 qpn; member
Dmthca_cmd.h313 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn);
Dmthca_cmd.c1835 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) in mthca_CONF_SPECIAL_QP() argument
1856 return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, in mthca_CONF_SPECIAL_QP()
Dmthca_provider.c584 qp->ibqp.qp_num = qp->qpn; in mthca_create_qp()
/linux-4.1.27/include/linux/mlx5/
Dqp.h416 int qpn; member
494 __be32 qpn; member
500 __be32 qpn; member
511 __be32 qpn; member
525 __be32 qpn; member
541 __be32 qpn; member
573 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) in __mlx5_qp_lookup() argument
575 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); in __mlx5_qp_lookup()
614 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
Ddriver.h729 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
730 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
Ddevice.h585 __be32 qpn; member
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_qp.c127 u32 i, offset, max_scan, qpn; in alloc_qpn() local
145 qpn = qpt->last + 2; in alloc_qpn()
146 if (qpn >= QPN_MAX) in alloc_qpn()
147 qpn = 2; in alloc_qpn()
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) in alloc_qpn()
149 qpn = (qpn | qpt->mask) + 2; in alloc_qpn()
150 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
151 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
161 qpt->last = qpn; in alloc_qpn()
162 ret = qpn; in alloc_qpn()
[all …]
Dqib_verbs.h908 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
/linux-4.1.27/include/linux/mlx4/
Ddevice.h721 int qpn; member
851 __be32 qpn; member
979 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) in mlx4_is_qp_reserved() argument
981 return (qpn < dev->phys_caps.base_sqpn + 8 + in mlx4_is_qp_reserved()
983 qpn >= dev->phys_caps.base_sqpn) || in mlx4_is_qp_reserved()
984 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); in mlx4_is_qp_reserved()
987 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) in mlx4_is_guest_proxy() argument
991 if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8) in mlx4_is_guest_proxy()
1069 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
1195 u32 qpn; member
[all …]
Dqp.h453 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
468 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) in __mlx4_qp_lookup() argument
470 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); in __mlx4_qp_lookup()
/linux-4.1.27/include/rdma/
Diw_cm.h101 u32 qpn; member
110 int qpn);
167 struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
Dib_cm.h242 u32 qpn; member
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_qp.c107 u32 i, offset, max_scan, qpn; in alloc_qpn() local
132 qpn = qpt->last + 1; in alloc_qpn()
133 if (qpn >= QPN_MAX) in alloc_qpn()
134 qpn = 2; in alloc_qpn()
135 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
136 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
148 qpt->last = qpn; in alloc_qpn()
149 ret = qpn; in alloc_qpn()
153 qpn = mk_qpn(qpt, map, offset); in alloc_qpn()
162 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); in alloc_qpn()
[all …]
Dipath_verbs.h734 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
/linux-4.1.27/drivers/infiniband/core/
Dcm_msgs.h115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) in cm_req_set_local_qpn() argument
117 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_req_set_local_qpn()
523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) in cm_rep_set_local_qpn() argument
525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_rep_set_local_qpn()
643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) in cm_dreq_set_remote_qpn() argument
645 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_dreq_set_remote_qpn()
692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) in cm_lap_set_remote_qpn() argument
694 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_lap_set_remote_qpn()
829 __be32 qpn) in cm_sidr_rep_set_qpn() argument
831 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_sidr_rep_set_qpn()
Dagent.h49 int port_num, int qpn);
Dagent.c83 int port_num, int qpn) in agent_send_response() argument
101 agent = port_priv->agent[qpn]; in agent_send_response()
Duser_mad.c231 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler()
522 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write()
640 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent()
643 ureq.qpn); in ib_umad_reg_agent()
678 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent()
742 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent2()
745 ureq.qpn); in ib_umad_reg_agent2()
794 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent2()
Dmad.c212 int ret2, qpn; in ib_register_mad_agent() local
217 qpn = get_spl_qp_index(qp_type); in ib_register_mad_agent()
218 if (qpn == -1) { in ib_register_mad_agent()
327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
329 "ib_register_mad_agent: QP %d not supported\n", qpn); in ib_register_mad_agent()
341 mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd, in ib_register_mad_agent()
357 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
364 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
501 int qpn; in ib_register_mad_snoop() local
509 qpn = get_spl_qp_index(qp_type); in ib_register_mad_snoop()
[all …]
Ducm.c249 urep->qpn = krep->qpn; in ib_ucm_event_sidr_rep_get()
762 param.qp_num = cmd.qpn; in ib_ucm_send_req()
808 param.qp_num = cmd.qpn; in ib_ucm_send_rep()
1059 param.qp_num = cmd.qpn; in ib_ucm_send_sidr_rep()
Diwcm.c531 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); in iw_cm_accept()
592 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); in iw_cm_connect()
Dcma.c2697 event.param.ud.qp_num = rep->qpn; in cma_sidr_rep_handler()
2876 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
2879 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
2974 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
2976 iw_param.qpn = conn_param->qp_num; in cma_accept_iw()
Duverbs_cmd.c1755 resp.qpn = qp->qp_num; in ib_uverbs_create_qp()
1855 attr.qp_num = cmd.qpn; in ib_uverbs_open_qp()
1876 resp.qpn = qp->qp_num; in ib_uverbs_open_qp()
Dcm.c3128 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); in cm_format_sidr_rep_event()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dodp.c165 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume()
170 qp->mqp.qpn); in mlx5_ib_page_fault_resume()
411 wqe_index, qp->mqp.qpn); in mlx5_ib_mr_initiator_pfault_handler()
421 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
428 if (qp->mqp.qpn != ctrl_qpn) { in mlx5_ib_mr_initiator_pfault_handler()
430 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
559 -ret, wqe_index, qp->mqp.qpn); in mlx5_ib_mr_wqe_pfault_handler()
596 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); in mlx5_ib_mr_wqe_pfault_handler()
Dcq.c415 uint32_t qpn; in mlx5_poll_one() local
447 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; in mlx5_poll_one()
448 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { in mlx5_poll_one()
453 mqp = __mlx5_qp_lookup(dev->mdev, qpn); in mlx5_poll_one()
456 cq->mcq.cqn, qpn); in mlx5_poll_one()
906 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
912 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
Dqp.c212 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); in mlx5_ib_qp_event()
1040 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1168 qp->mqp.qpn); in destroy_qp_common()
1175 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1178 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1184 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); in destroy_qp_common()
1285 qp->ibqp.qp_num = qp->mqp.qpn; in mlx5_ib_create_qp()
1288 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, in mlx5_ib_create_qp()
1754 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx5_ib_modify_qp()
1757 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx5_ib_modify_qp()
[all …]
Dmlx5_ib.h524 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
525 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dqp.c130 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
131 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
142 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
143 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
149 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
150 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
167 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
168 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
174 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
323 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
[all …]
Dmlx4_ib.h677 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
678 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
810 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
811 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
Dcq.c706 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { in mlx4_ib_poll_one()
925 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in __mlx4_ib_cq_clean() argument
952 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { in __mlx4_ib_cq_clean()
978 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in mlx4_ib_cq_clean() argument
981 __mlx4_ib_cq_clean(cq, qpn, srq); in mlx4_ib_cq_clean()
Dmad.c361 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local
363 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap()
368 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
1158 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) in is_proxy_qp0() argument
1162 return (qpn >= proxy_start && qpn <= proxy_start + 1); in is_proxy_qp0()
Dmain.c1064 ctrl->qpn = cpu_to_be32(qp->qp_num); in __mlx4_ib_create_flow()
1795 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, in mlx4_ib_update_qps()
2462 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) in mlx4_ib_steer_qp_alloc() argument
2474 *qpn = dev->steer_qpn_base + offset; in mlx4_ib_steer_qp_alloc()
2478 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) in mlx4_ib_steer_qp_free() argument
2480 if (!qpn || in mlx4_ib_steer_qp_free()
2484 BUG_ON(qpn < dev->steer_qpn_base); in mlx4_ib_steer_qp_free()
2487 qpn - dev->steer_qpn_base, in mlx4_ib_steer_qp_free()
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_qp.c390 qp->qpn = ret; in c2_alloc_qpn()
397 static void c2_free_qpn(struct c2_dev *c2dev, int qpn) in c2_free_qpn() argument
400 idr_remove(&c2dev->qp_table.idr, qpn); in c2_free_qpn()
404 struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) in c2_find_qpn() argument
410 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn()
432 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp()
570 c2_free_qpn(c2dev, qp->qpn); in c2_alloc_qp()
613 c2_free_qpn(c2dev, qp->qpn); in c2_free_qp()
Dc2_provider.h113 int qpn; member
Dc2_cm.c54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect()
301 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_accept()
Dc2.h491 extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
503 extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
Dc2_provider.c219 struct ib_qp *c2_get_qp(struct ib_device *device, int qpn) in c2_get_qp() argument
224 qp = c2_find_qpn(c2dev, qpn); in c2_get_qp()
226 __func__, qp, qpn, device, in c2_get_qp()
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
Dipoib_cm.c427 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep()
1051 u32 qpn, in ipoib_cm_send_req() argument
1058 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_req()
1063 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); in ipoib_cm_send_req()
1111 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, in ipoib_cm_tx_init() argument
1146 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); in ipoib_cm_tx_init()
1153 p->qp->qp_num, pathrec->dgid.raw, qpn); in ipoib_cm_tx_init()
1317 u32 qpn; in ipoib_cm_tx_start() local
1326 qpn = IPOIB_QPN(neigh->daddr); in ipoib_cm_tx_start()
1332 ret = ipoib_cm_tx_init(p, qpn, &pathrec); in ipoib_cm_tx_start()
Dipoib_ib.c512 struct ib_ah *address, u32 qpn, in post_send() argument
536 priv->tx_wr.wr.ud.remote_qpn = qpn; in post_send()
551 struct ipoib_ah *address, u32 qpn) in ipoib_send() argument
582 skb->len, address, qpn); in ipoib_send()
615 address->ah, qpn, tx_req, phead, hlen); in ipoib_send()
Dipoib.h187 __be32 qpn; /* High byte MUST be ignored on receive */ member
473 struct ipoib_ah *address, u32 qpn);
/linux-4.1.27/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1640 u32 qpn = 0, wqe_idx = 0; in ocrdma_discard_cqes() local
1663 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes()
1666 if (qpn == 0 || qpn != qp->id) in ocrdma_discard_cqes()
1691 cqe->cmn.qpn = 0; in ocrdma_discard_cqes()
2817 u16 qpn = 0; in ocrdma_poll_hwcq() local
2832 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); in ocrdma_poll_hwcq()
2834 if (qpn == 0) in ocrdma_poll_hwcq()
2836 qp = dev->qp_tbl[qpn]; in ocrdma_poll_hwcq()
2851 cqe->cmn.qpn = 0; in ocrdma_poll_hwcq()
Docrdma_sli.h1711 u32 qpn; member
1726 u32 qpn; member
/linux-4.1.27/drivers/infiniband/hw/nes/
Dnes.c362 struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) in nes_get_qp() argument
368 if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp))) in nes_get_qp()
371 return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; in nes_get_qp()
Dnes_cm.c3241 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); in nes_accept()
3516 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); in nes_connect()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
Dehea_phyp.h111 u32 qpn; /* 00 */ member
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_provider.c1098 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) in iwch_get_qp() argument
1100 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); in iwch_get_qp()
1101 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); in iwch_get_qp()
Diwch_cm.c1801 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); in iwch_accept_cr()
1929 ep->com.qp = get_qhp(h, conn_param->qpn); in iwch_connect()
1931 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, in iwch_connect()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dqp.c1865 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) in c4iw_get_qp() argument
1867 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); in c4iw_get_qp()
1868 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); in c4iw_get_qp()
Diw_cxgb4.h1009 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
Dcm.c2822 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); in c4iw_accept_cr()
3027 ep->com.qp = get_qhp(dev, conn_param->qpn); in c4iw_connect()
3029 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); in c4iw_connect()
3034 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, in c4iw_connect()