Home
last modified time | relevance | path

Searched refs:qpn (Results 1 – 87 of 87) sorted by relevance

/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Dmcg.c126 u32 qpn) in get_promisc_qp() argument
137 if (pqp->qpn == qpn) in get_promisc_qp()
150 unsigned int index, u32 qpn) in new_steering_entry() argument
177 pqp = get_promisc_qp(dev, port, steer, qpn); in new_steering_entry()
184 dqp->qpn = qpn; in new_steering_entry()
210 if (pqp->qpn == qpn) in new_steering_entry()
219 mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK); in new_steering_entry()
242 unsigned int index, u32 qpn) in existing_steering_entry() argument
254 pqp = get_promisc_qp(dev, port, steer, qpn); in existing_steering_entry()
273 if (qpn == dqp->qpn) in existing_steering_entry()
[all …]
Dqp.c49 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type) in mlx4_qp_event() argument
56 qp = __mlx4_qp_lookup(dev, qpn); in mlx4_qp_event()
63 mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn); in mlx4_qp_event()
79 *proxy_qp0 = qp->qpn >= pf_proxy_offset && qp->qpn <= pf_proxy_offset + 1; in is_master_qp0()
81 *real_qp0 = qp->qpn >= dev->phys_caps.base_sqpn && in is_master_qp0()
82 qp->qpn <= dev->phys_caps.base_sqpn + 1; in is_master_qp0()
145 ret = mlx4_cmd(dev, 0, qp->qpn, 2, in __mlx4_qp_modify()
150 port = (qp->qpn & 1) + 1; in __mlx4_qp_modify()
174 cpu_to_be32(qp->qpn); in __mlx4_qp_modify()
177 qp->qpn | (!!sqd_event << 31), in __mlx4_qp_modify()
[all …]
Den_resources.c41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() argument
62 context->local_qpn = cpu_to_be32(qpn); in mlx4_en_fill_qp_context()
87 en_dbg(HW, priv, "Setting RX qp %x tunnel mode to RX tunneled & non-tunneled\n", qpn); in mlx4_en_fill_qp_context()
102 ret = mlx4_update_qp(priv->mdev->dev, qp->qpn, in mlx4_en_change_mcast_lb()
Dresource_tracker.c224 int qpn; member
732 u8 slave, u32 qpn) in update_vport_qp_param() argument
753 if (mlx4_is_qp_reserved(dev, qpn)) in update_vport_qp_param()
767 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params); in update_vport_qp_param()
1095 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn) in alloc_fs_rule_tr() argument
1105 ret->qpn = qpn; in alloc_fs_rule_tr()
1427 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, in qp_res_start_move_to() argument
1437 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); in qp_res_start_move_to()
1697 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn) in valid_reserved() argument
1699 return mlx4_is_qp_reserved(dev, qpn) && in valid_reserved()
[all …]
Den_rx.c1122 static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, in mlx4_en_config_rss_qp() argument
1135 err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); in mlx4_en_config_rss_qp()
1137 en_err(priv, "Failed to allocate qp #%x\n", qpn); in mlx4_en_config_rss_qp()
1144 qpn, ring->cqn, -1, context); in mlx4_en_config_rss_qp()
1171 u32 qpn; in mlx4_en_create_drop_qp() local
1173 err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn, in mlx4_en_create_drop_qp()
1179 err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); in mlx4_en_create_drop_qp()
1182 mlx4_qp_release_range(priv->mdev->dev, qpn, 1); in mlx4_en_create_drop_qp()
1191 u32 qpn; in mlx4_en_destroy_drop_qp() local
1193 qpn = priv->drop_qp.qpn; in mlx4_en_destroy_drop_qp()
[all …]
Den_netdev.c189 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; in mlx4_en_filter_work()
474 int qpn, u64 *reg_id) in mlx4_en_tunnel_steer_add() argument
482 err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, in mlx4_en_tunnel_steer_add()
494 unsigned char *mac, int *qpn, u64 *reg_id) in mlx4_en_uc_steer_add() argument
505 qp.qpn = *qpn; in mlx4_en_uc_steer_add()
525 rule.qpn = *qpn; in mlx4_en_uc_steer_add()
546 unsigned char *mac, int qpn, u64 reg_id) in mlx4_en_uc_steer_release() argument
556 qp.qpn = qpn; in mlx4_en_uc_steer_release()
578 int *qpn = &priv->base_qpn; in mlx4_en_get_qp() local
593 *qpn = base_qpn + index; in mlx4_en_get_qp()
[all …]
Den_tx.c116 err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn, in mlx4_en_create_tx_ring()
123 err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL); in mlx4_en_create_tx_ring()
125 en_err(priv, "Failed allocating qp %d\n", ring->qpn); in mlx4_en_create_tx_ring()
156 mlx4_qp_release_range(mdev->dev, ring->qpn, 1); in mlx4_en_create_tx_ring()
178 en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn); in mlx4_en_destroy_tx_ring()
184 mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); in mlx4_en_destroy_tx_ring()
210 ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8); in mlx4_en_activate_tx_ring()
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, in mlx4_en_activate_tx_ring()
Den_ethtool.c1152 ring_index[n] = rss_map->qps[n % rss_rings].qpn - in mlx4_en_get_rxfh()
1497 u32 qpn; in mlx4_en_flow_replace() local
1513 qpn = priv->drop_qp.qpn; in mlx4_en_flow_replace()
1515 qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); in mlx4_en_flow_replace()
1522 qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; in mlx4_en_flow_replace()
1523 if (!qpn) { in mlx4_en_flow_replace()
1529 rule.qpn = qpn; in mlx4_en_flow_replace()
Dmlx4.h303 __be32 qpn; member
467 u32 qpn; member
954 int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
955 void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
1203 void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
Dmlx4_en.h292 int qpn; member
796 int is_tx, int rss, int qpn, int cqn, int user_prio,
Den_main.c130 priv->rss_map.indir_qp.qpn) { in mlx4_en_update_loopback_state()
Dmain.c689 int mlx4_get_parav_qkey(struct mlx4_dev *dev, u32 qpn, u32 *qkey) in mlx4_get_parav_qkey() argument
693 if (qpn >= dev->phys_caps.base_tunnel_sqpn + 8 * MLX4_MFUNC_MAX || in mlx4_get_parav_qkey()
694 qpn < dev->phys_caps.base_proxy_sqpn) in mlx4_get_parav_qkey()
697 if (qpn >= dev->phys_caps.base_tunnel_sqpn) in mlx4_get_parav_qkey()
699 qk += qpn - dev->phys_caps.base_tunnel_sqpn; in mlx4_get_parav_qkey()
701 qk += qpn - dev->phys_caps.base_proxy_sqpn; in mlx4_get_parav_qkey()
Dport.c308 int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) in __mlx4_replace_mac() argument
312 int index = qpn - info->base_qpn; in __mlx4_replace_mac()
Deq.c525 be32_to_cpu(eqe->event.qp.qpn) in mlx4_eq_int()
540 mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & in mlx4_eq_int()
Dmr.c865 mpt_entry->qpn = cpu_to_be32(MLX4_MPT_QP_FLAG_BOUND_QP); in mlx4_mw_enable()
Dfw.c2532 qp.qpn = be32_to_cpu(mgm->qp[i]); in mlx4_opreq_action()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Dqp.c95 int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK; in mlx5_eq_pagefault() local
96 struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn); in mlx5_eq_pagefault()
103 qpn); in mlx5_eq_pagefault()
130 qpn, pfault.rdma.r_key); in mlx5_eq_pagefault()
150 qpn, pfault.wqe.wqe_index); in mlx5_eq_pagefault()
159 eqe->sub_type, qpn); in mlx5_eq_pagefault()
170 qpn); in mlx5_eq_pagefault()
213 qp->qpn = be32_to_cpu(out.qpn) & 0xffffff; in mlx5_core_create_qp()
214 mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn); in mlx5_core_create_qp()
218 err = radix_tree_insert(&table->tree, qp->qpn, qp); in mlx5_core_create_qp()
[all …]
Dmcg.c42 __be32 qpn; member
54 __be32 qpn; member
64 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_attach_mcg() argument
74 in.qpn = cpu_to_be32(qpn); in mlx5_core_attach_mcg()
86 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn) in mlx5_core_detach_mcg() argument
96 in.qpn = cpu_to_be32(qpn); in mlx5_core_detach_mcg()
Ddebugfs.c544 &qp->dbg, qp->qpn, qp_fields, in mlx5_debug_qp_add()
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dqp.h88 static inline u32 qpn_hash(struct hfi1_qp_ibdev *dev, u32 qpn) in qpn_hash() argument
90 return hash_32(qpn, dev->qp_table_bits); in qpn_hash()
102 u32 qpn) __must_hold(RCU) in hfi1_lookup_qpn() argument
106 if (unlikely(qpn <= 1)) { in hfi1_lookup_qpn()
107 qp = rcu_dereference(ibp->qp[qpn]); in hfi1_lookup_qpn()
110 u32 n = qpn_hash(dev->qp_dev, qpn); in hfi1_lookup_qpn()
114 if (qp->ibqp.qp_num == qpn) in hfi1_lookup_qpn()
Dqp.c144 u32 i, offset, max_scan, qpn; in alloc_qpn() local
162 qpn = qpt->last + qpt->incr; in alloc_qpn()
163 if (qpn >= QPN_MAX) in alloc_qpn()
164 qpn = qpt->incr | ((qpt->last & 1) ^ 1); in alloc_qpn()
166 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
167 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
177 qpt->last = qpn; in alloc_qpn()
178 ret = qpn; in alloc_qpn()
186 qpn = mk_qpn(qpt, map, offset); in alloc_qpn()
187 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); in alloc_qpn()
[all …]
Dtrace.h301 __field(u32, qpn)
308 __entry->qpn = qp->ibqp.qp_num;
314 __entry->qpn,
335 __field(u32, qpn)
340 __entry->qpn = qp->ibqp.qp_num;
346 __entry->qpn,
452 __field(u32, qpn)
501 __entry->qpn =
533 __entry->qpn,
570 __field(u32, qpn)
[all …]
Ddriver.c574 u32 etype = rhf_rcv_type(rhf), qpn; in prescan_rxq() local
604 qpn = be32_to_cpu(ohdr->bth[1]) & HFI1_QPN_MASK; in prescan_rxq()
606 qp = hfi1_lookup_qpn(ibp, qpn); in prescan_rxq()
Ddiag.c1290 u32 qpn = 0; in hfi1_filter_mad_mgmt_class() local
1311 qpn = be32_to_cpu(ohdr->bth[1]) & 0x00FFFFFF; in hfi1_filter_mad_mgmt_class()
1312 if (qpn <= 1) { in hfi1_filter_mad_mgmt_class()
Dchip.c10065 unsigned qpns_per_vl, ctxt, i, qpn, n = 1, m; in init_qos() local
10095 for (qpn = 0, tctxt = ctxt; in init_qos()
10096 krcvqs[i] && qpn < qpns_per_vl; qpn++) { in init_qos()
10100 idx = (qpn << n) ^ i; in init_qos()
Dmad.c92 u32 qpn = ppd_from_ibp(ibp)->sm_trap_qp; in send_trap() local
113 send_buf = ib_create_send_mad(agent, qpn, pkey_idx, 0, in send_trap()
/linux-4.4.14/include/uapi/rdma/
Dib_user_mad.h78 __be32 qpn; member
122 __be32 qpn; member
187 __u8 qpn; member
222 __u32 qpn; member
Dib_user_cm.h133 __u32 qpn; member
158 __u32 qpn; member
211 __u32 qpn; member
295 __u32 qpn; member
Dib_user_verbs.h543 __u32 qpn; member
552 __u32 qpn; member
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_qp.c197 return qp->qpn >= dev->qp_table.sqp_start && in is_sqp()
198 qp->qpn <= dev->qp_table.sqp_start + 3; in is_sqp()
203 return qp->qpn >= dev->qp_table.sqp_start && in is_qp0()
204 qp->qpn <= dev->qp_table.sqp_start + 1; in is_qp0()
237 void mthca_qp_event(struct mthca_dev *dev, u32 qpn, in mthca_qp_event() argument
244 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event()
251 event_type, qpn); in mthca_qp_event()
448 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox); in mthca_query_qp()
614 qp_context->local_qpn = cpu_to_be32(qp->qpn); in __mthca_modify_qp()
755 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp()
[all …]
Dmthca_eq.c143 __be32 qpn; member
282 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
287 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
292 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
297 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
307 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
312 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
317 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
322 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, in mthca_eq_int()
Dmthca_mad.c165 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local
167 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap()
172 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
Dmthca_dev.h506 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn,
527 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
557 int qpn,
Dmthca_cq.c278 void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, in mthca_cq_clean() argument
302 qpn, cq->cqn, cq->cons_index, prod_index); in mthca_cq_clean()
310 if (cqe->my_qpn == cpu_to_be32(qpn)) { in mthca_cq_clean()
519 if (!*cur_qp || be32_to_cpu(cqe->my_qpn) != (*cur_qp)->qpn) { in mthca_poll_one()
Dmthca_provider.h264 u32 qpn; member
Dmthca_cmd.h313 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn);
Dmthca_cmd.c1835 int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn) in mthca_CONF_SPECIAL_QP() argument
1856 return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, in mthca_CONF_SPECIAL_QP()
Dmthca_provider.c588 qp->ibqp.qp_num = qp->qpn; in mthca_create_qp()
/linux-4.4.14/include/linux/mlx5/
Dqp.h441 int qpn; member
519 __be32 qpn; member
525 __be32 qpn; member
536 __be32 qpn; member
550 __be32 qpn; member
566 __be32 qpn; member
598 static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn) in __mlx5_qp_lookup() argument
600 return radix_tree_lookup(&dev->priv.qp_table.tree, qpn); in __mlx5_qp_lookup()
639 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
Dmlx5_ifc.h1588 u8 qpn[0x18]; member
2178 u8 qpn[0x18]; member
2602 u8 qpn[0x18]; member
2632 u8 qpn[0x18]; member
2822 u8 qpn[0x18]; member
2852 u8 qpn[0x18]; member
2882 u8 qpn[0x18]; member
3261 u8 qpn[0x18]; member
3930 u8 qpn[0x18]; member
3952 u8 qpn[0x18]; member
[all …]
Ddriver.h767 int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
768 int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
Ddevice.h659 __be32 qpn; member
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_qp.c127 u32 i, offset, max_scan, qpn; in alloc_qpn() local
145 qpn = qpt->last + 2; in alloc_qpn()
146 if (qpn >= QPN_MAX) in alloc_qpn()
147 qpn = 2; in alloc_qpn()
148 if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues) in alloc_qpn()
149 qpn = (qpn | qpt->mask) + 2; in alloc_qpn()
150 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
151 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
161 qpt->last = qpn; in alloc_qpn()
162 ret = qpn; in alloc_qpn()
[all …]
Dqib_verbs.h918 struct qib_qp *qib_lookup_qpn(struct qib_ibport *ibp, u32 qpn);
/linux-4.4.14/include/linux/mlx4/
Ddevice.h738 int qpn; member
883 __be32 qpn; member
1012 static inline int mlx4_is_qp_reserved(struct mlx4_dev *dev, u32 qpn) in mlx4_is_qp_reserved() argument
1014 return (qpn < dev->phys_caps.base_sqpn + 8 + in mlx4_is_qp_reserved()
1016 qpn >= dev->phys_caps.base_sqpn) || in mlx4_is_qp_reserved()
1017 (qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW]); in mlx4_is_qp_reserved()
1020 static inline int mlx4_is_guest_proxy(struct mlx4_dev *dev, int slave, u32 qpn) in mlx4_is_guest_proxy() argument
1024 if (qpn >= guest_proxy_base && qpn < guest_proxy_base + 8) in mlx4_is_guest_proxy()
1102 int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
1228 u32 qpn; member
[all …]
Dqp.h468 int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
483 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) in __mlx4_qp_lookup() argument
485 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); in __mlx4_qp_lookup()
/linux-4.4.14/include/rdma/
Diw_cm.h102 u32 qpn; member
111 int qpn);
168 struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
Dib_cm.h247 u32 qpn; member
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_qp.c106 u32 i, offset, max_scan, qpn; in alloc_qpn() local
131 qpn = qpt->last + 1; in alloc_qpn()
132 if (qpn >= QPN_MAX) in alloc_qpn()
133 qpn = 2; in alloc_qpn()
134 offset = qpn & BITS_PER_PAGE_MASK; in alloc_qpn()
135 map = &qpt->map[qpn / BITS_PER_PAGE]; in alloc_qpn()
147 qpt->last = qpn; in alloc_qpn()
148 ret = qpn; in alloc_qpn()
152 qpn = mk_qpn(qpt, map, offset); in alloc_qpn()
161 } while (offset < BITS_PER_PAGE && qpn < QPN_MAX); in alloc_qpn()
[all …]
Dipath_verbs.h742 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
/linux-4.4.14/drivers/infiniband/core/
Dcm_msgs.h115 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn) in cm_req_set_local_qpn() argument
117 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_req_set_local_qpn()
523 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn) in cm_rep_set_local_qpn() argument
525 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_rep_set_local_qpn()
643 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn) in cm_dreq_set_remote_qpn() argument
645 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_dreq_set_remote_qpn()
692 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn) in cm_lap_set_remote_qpn() argument
694 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_lap_set_remote_qpn()
829 __be32 qpn) in cm_sidr_rep_set_qpn() argument
831 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) | in cm_sidr_rep_set_qpn()
Dagent.h49 int port_num, int qpn, size_t resp_mad_len, bool opa);
Dagent.c83 int port_num, int qpn, size_t resp_mad_len, bool opa) in agent_send_response() argument
101 agent = port_priv->agent[qpn]; in agent_send_response()
Duser_mad.c230 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); in recv_handler()
526 be32_to_cpu(packet->mad.hdr.qpn), in ib_umad_write()
645 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent()
648 ureq.qpn); in ib_umad_reg_agent()
683 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent()
747 if (ureq.qpn != 0 && ureq.qpn != 1) { in ib_umad_reg_agent2()
750 ureq.qpn); in ib_umad_reg_agent2()
799 ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, in ib_umad_reg_agent2()
Dmad.c212 int ret2, qpn; in ib_register_mad_agent() local
217 qpn = get_spl_qp_index(qp_type); in ib_register_mad_agent()
218 if (qpn == -1) { in ib_register_mad_agent()
327 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent()
329 "ib_register_mad_agent: QP %d not supported\n", qpn); in ib_register_mad_agent()
350 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent()
357 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent()
492 int qpn; in ib_register_mad_snoop() local
500 qpn = get_spl_qp_index(qp_type); in ib_register_mad_snoop()
501 if (qpn == -1) { in ib_register_mad_snoop()
[all …]
Ducm.c249 urep->qpn = krep->qpn; in ib_ucm_event_sidr_rep_get()
761 param.qp_num = cmd.qpn; in ib_ucm_send_req()
807 param.qp_num = cmd.qpn; in ib_ucm_send_rep()
1058 param.qp_num = cmd.qpn; in ib_ucm_send_sidr_rep()
Diwcm.c531 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); in iw_cm_accept()
592 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn); in iw_cm_connect()
Dcma.c3087 event.param.ud.qp_num = rep->qpn; in cma_sidr_rep_handler()
3267 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
3270 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
3360 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
3362 iw_param.qpn = conn_param->qp_num; in cma_accept_iw()
Duverbs_cmd.c1897 resp.base.qpn = qp->qp_num; in create_qp()
2105 attr.qp_num = cmd.qpn; in ib_uverbs_open_qp()
2126 resp.qpn = qp->qp_num; in ib_uverbs_open_qp()
Dcm.c3202 param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg)); in cm_format_sidr_rep_event()
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dodp.c158 int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, in mlx5_ib_page_fault_resume()
163 qp->mqp.qpn); in mlx5_ib_page_fault_resume()
404 wqe_index, qp->mqp.qpn); in mlx5_ib_mr_initiator_pfault_handler()
414 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
421 if (qp->mqp.qpn != ctrl_qpn) { in mlx5_ib_mr_initiator_pfault_handler()
423 wqe_index, qp->mqp.qpn, in mlx5_ib_mr_initiator_pfault_handler()
552 -ret, wqe_index, qp->mqp.qpn); in mlx5_ib_mr_wqe_pfault_handler()
589 qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); in mlx5_ib_mr_wqe_pfault_handler()
Dcq.c423 uint32_t qpn; in mlx5_poll_one() local
455 qpn = ntohl(cqe64->sop_drop_qpn) & 0xffffff; in mlx5_poll_one()
456 if (!*cur_qp || (qpn != (*cur_qp)->ibqp.qp_num)) { in mlx5_poll_one()
461 mqp = __mlx5_qp_lookup(dev->mdev, qpn); in mlx5_poll_one()
464 cq->mcq.cqn, qpn); in mlx5_poll_one()
919 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
925 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
Dqp.c207 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn); in mlx5_ib_qp_event()
1032 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); in create_qp_common()
1160 qp->mqp.qpn); in destroy_qp_common()
1167 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in destroy_qp_common()
1170 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in destroy_qp_common()
1176 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); in destroy_qp_common()
1275 qp->ibqp.qp_num = qp->mqp.qpn; in mlx5_ib_create_qp()
1278 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, in mlx5_ib_create_qp()
1740 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, in __mlx5_ib_modify_qp()
1743 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); in __mlx5_ib_modify_qp()
[all …]
Dmlx5_ib.h504 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
505 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dqp.c131 return qp->mqp.qpn >= dev->dev->phys_caps.base_tunnel_sqpn && in is_tunnel_qp()
132 qp->mqp.qpn < dev->dev->phys_caps.base_tunnel_sqpn + in is_tunnel_qp()
143 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_sqp()
144 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 3); in is_sqp()
150 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i] || in is_sqp()
151 qp->mqp.qpn == dev->dev->caps.qp1_proxy[i]) { in is_sqp()
168 qp->mqp.qpn >= dev->dev->phys_caps.base_sqpn && in is_qp0()
169 qp->mqp.qpn <= dev->dev->phys_caps.base_sqpn + 1); in is_qp0()
175 if (qp->mqp.qpn == dev->dev->caps.qp0_proxy[i]) { in is_qp0()
324 "on QP %06x\n", type, qp->qpn); in mlx4_ib_qp_event()
[all …]
Dmlx4_ib.h725 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
726 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq);
860 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn);
861 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count);
Dcq.c715 (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) != (*cur_qp)->mqp.qpn) { in mlx4_ib_poll_one()
934 void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in __mlx4_ib_cq_clean() argument
961 if ((be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_QPN_MASK) == qpn) { in __mlx4_ib_cq_clean()
987 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq) in mlx4_ib_cq_clean() argument
990 __mlx4_ib_cq_clean(cq, qpn, srq); in mlx4_ib_cq_clean()
Dmain.c1502 ctrl->qpn = cpu_to_be32(qp->qp_num); in __mlx4_ib_create_flow()
1968 if (mlx4_update_qp(ibdev->dev, qp->mqp.qpn, MLX4_UPDATE_QP_SMAC, in mlx4_ib_update_qps()
2501 int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) in mlx4_ib_steer_qp_alloc() argument
2513 *qpn = dev->steer_qpn_base + offset; in mlx4_ib_steer_qp_alloc()
2517 void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) in mlx4_ib_steer_qp_free() argument
2519 if (!qpn || in mlx4_ib_steer_qp_free()
2523 BUG_ON(qpn < dev->steer_qpn_base); in mlx4_ib_steer_qp_free()
2526 qpn - dev->steer_qpn_base, in mlx4_ib_steer_qp_free()
Dmad.c354 int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; in forward_trap() local
356 struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; in forward_trap()
361 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, in forward_trap()
1176 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave) in is_proxy_qp0() argument
1180 return (qpn >= proxy_start && qpn <= proxy_start + 1); in is_proxy_qp0()
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_qp.c390 qp->qpn = ret; in c2_alloc_qpn()
397 static void c2_free_qpn(struct c2_dev *c2dev, int qpn) in c2_free_qpn() argument
400 idr_remove(&c2dev->qp_table.idr, qpn); in c2_free_qpn()
404 struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn) in c2_find_qpn() argument
410 qp = idr_find(&c2dev->qp_table.idr, qpn); in c2_find_qpn()
432 qp->ibqp.qp_num = qp->qpn; in c2_alloc_qp()
570 c2_free_qpn(c2dev, qp->qpn); in c2_alloc_qp()
613 c2_free_qpn(c2dev, qp->qpn); in c2_free_qp()
Dc2_provider.h113 int qpn; member
Dc2_cm.c54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect()
298 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_accept()
Dc2.h491 extern struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
503 extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
Dc2_provider.c222 struct ib_qp *c2_get_qp(struct ib_device *device, int qpn) in c2_get_qp() argument
227 qp = c2_find_qpn(c2dev, qpn); in c2_get_qp()
229 __func__, qp, qpn, device, in c2_get_qp()
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
Dipoib_cm.c427 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_rep()
1048 u32 qpn, in ipoib_cm_send_req() argument
1055 data.qpn = cpu_to_be32(priv->qp->qp_num); in ipoib_cm_send_req()
1060 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn); in ipoib_cm_send_req()
1108 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, in ipoib_cm_tx_init() argument
1143 ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec); in ipoib_cm_tx_init()
1150 p->qp->qp_num, pathrec->dgid.raw, qpn); in ipoib_cm_tx_init()
1313 u32 qpn; in ipoib_cm_tx_start() local
1322 qpn = IPOIB_QPN(neigh->daddr); in ipoib_cm_tx_start()
1328 ret = ipoib_cm_tx_init(p, qpn, &pathrec); in ipoib_cm_tx_start()
Dipoib_ib.c512 struct ib_ah *address, u32 qpn, in post_send() argument
522 priv->tx_wr.remote_qpn = qpn; in post_send()
537 struct ipoib_ah *address, u32 qpn) in ipoib_send() argument
568 skb->len, address, qpn); in ipoib_send()
601 address->ah, qpn, tx_req, phead, hlen); in ipoib_send()
Dipoib.h187 __be32 qpn; /* High byte MUST be ignored on receive */ member
472 struct ipoib_ah *address, u32 qpn);
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1711 u32 qpn = 0, wqe_idx = 0; in ocrdma_discard_cqes() local
1734 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK; in ocrdma_discard_cqes()
1737 if (qpn == 0 || qpn != qp->id) in ocrdma_discard_cqes()
1762 cqe->cmn.qpn = 0; in ocrdma_discard_cqes()
2865 u16 qpn = 0; in ocrdma_poll_hwcq() local
2880 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK); in ocrdma_poll_hwcq()
2882 if (qpn == 0) in ocrdma_poll_hwcq()
2884 qp = dev->qp_tbl[qpn]; in ocrdma_poll_hwcq()
2899 cqe->cmn.qpn = 0; in ocrdma_poll_hwcq()
Docrdma_sli.h1769 u32 qpn; member
1784 u32 qpn; member
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes.c362 struct ib_qp *nes_get_qp(struct ib_device *device, int qpn) in nes_get_qp() argument
368 if ((qpn < NES_FIRST_QPN) || (qpn >= (NES_FIRST_QPN + nesadapter->max_qp))) in nes_get_qp()
371 return &nesadapter->qp_table[qpn - NES_FIRST_QPN]->ibqp; in nes_get_qp()
Dnes_cm.c3247 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); in nes_accept()
3522 ibqp = nes_get_qp(cm_id->device, conn_param->qpn); in nes_connect()
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/
Dehea_phyp.h111 u32 qpn; /* 00 */ member
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_provider.c1127 static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) in iwch_get_qp() argument
1129 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); in iwch_get_qp()
1130 return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); in iwch_get_qp()
Diwch_cm.c1801 struct iwch_qp *qp = get_qhp(h, conn_param->qpn); in iwch_accept_cr()
1929 ep->com.qp = get_qhp(h, conn_param->qpn); in iwch_connect()
1931 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, in iwch_connect()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dqp.c1878 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) in c4iw_get_qp() argument
1880 PDBG("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); in c4iw_get_qp()
1881 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); in c4iw_get_qp()
Diw_cxgb4.h998 struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn);
Dcm.c2921 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); in c4iw_accept_cr()
3126 ep->com.qp = get_qhp(dev, conn_param->qpn); in c4iw_connect()
3128 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); in c4iw_connect()
3133 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, in c4iw_connect()