Home
last modified time | relevance | path

Searched refs:cm_id (Results 1 – 47 of 47) sorted by relevance

/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_cm.c41 int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_llp_connect() argument
43 struct c2_dev *c2dev = to_c2dev(cm_id->device); in c2_llp_connect()
49 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; in c2_llp_connect()
51 if (cm_id->remote_addr.ss_family != AF_INET) in c2_llp_connect()
54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect()
60 cm_id->provider_data = qp; in c2_llp_connect()
61 cm_id->add_ref(cm_id); in c2_llp_connect()
62 qp->cm_id = cm_id; in c2_llp_connect()
128 cm_id->provider_data = NULL; in c2_llp_connect()
129 qp->cm_id = NULL; in c2_llp_connect()
[all …]
Dc2_ae.c187 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event() local
190 if (!cm_id) { in c2_ae_event()
221 if (qp->cm_id) { in c2_ae_event()
222 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event()
223 qp->cm_id = NULL; in c2_ae_event()
229 if (cm_id->event_handler) in c2_ae_event()
230 cm_id->event_handler(cm_id, &cm_event); in c2_ae_event()
247 BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b); in c2_ae_event()
250 if (qp->cm_id) { in c2_ae_event()
251 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event()
[all …]
Dc2_intr.c200 cm_event.local_addr = req->cm_id->local_addr; in handle_vq()
201 cm_event.remote_addr = req->cm_id->remote_addr; in handle_vq()
204 req->cm_id->event_handler(req->cm_id, &cm_event); in handle_vq()
Dc2_qp.c174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify()
176 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in c2_qp_modify()
178 vq_req->cm_id = qp->cm_id; in c2_qp_modify()
236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { in c2_qp_modify()
237 qp->cm_id->rem_ref(qp->cm_id); in c2_qp_modify()
238 qp->cm_id = NULL; in c2_qp_modify()
332 if (qp->cm_id && qp->state == IB_QPS_RTS) { in destroy_qp()
334 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in destroy_qp()
337 vq_req->cm_id = qp->cm_id; in destroy_qp()
369 if (qp->cm_id) { in destroy_qp()
[all …]
Dc2_provider.c593 static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_connect() argument
598 return c2_llp_connect(cm_id, iw_param); in c2_connect()
601 static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_accept() argument
606 return c2_llp_accept(cm_id, iw_param); in c2_accept()
609 static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) in c2_reject() argument
615 err = c2_llp_reject(cm_id, pdata, pdata_len); in c2_reject()
619 static int c2_service_create(struct iw_cm_id *cm_id, int backlog) in c2_service_create() argument
624 err = c2_llp_service_create(cm_id, backlog); in c2_service_create()
631 static int c2_service_destroy(struct iw_cm_id *cm_id) in c2_service_destroy() argument
636 err = c2_llp_service_destroy(cm_id); in c2_service_destroy()
Dc2.h521 extern int c2_llp_connect(struct iw_cm_id *cm_id,
523 extern int c2_llp_accept(struct iw_cm_id *cm_id,
525 extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
527 extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
528 extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
Dc2_vq.h46 struct iw_cm_id *cm_id; member
Dc2_provider.h109 struct iw_cm_id *cm_id; member
Dc2_vq.c112 r->cm_id = NULL; in vq_req_alloc()
/linux-4.1.27/include/rdma/
Diw_cm.h68 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id,
79 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id,
112 int (*connect)(struct iw_cm_id *cm_id,
115 int (*accept)(struct iw_cm_id *cm_id,
118 int (*reject)(struct iw_cm_id *cm_id,
121 int (*create_listen)(struct iw_cm_id *cm_id,
124 int (*destroy_listen)(struct iw_cm_id *cm_id);
146 void iw_destroy_cm_id(struct iw_cm_id *cm_id);
159 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
180 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
[all …]
Dib_cm.h294 typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
331 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
361 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
392 int ib_send_cm_req(struct ib_cm_id *cm_id,
416 int ib_send_cm_rep(struct ib_cm_id *cm_id,
427 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
440 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
455 int ib_send_cm_drep(struct ib_cm_id *cm_id,
473 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
487 int ib_send_cm_rej(struct ib_cm_id *cm_id,
[all …]
/linux-4.1.27/net/rds/
Drdma_transport.c68 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, in rds_rdma_cm_event_handler() argument
72 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler()
76 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler()
79 if (cm_id->device->node_type == RDMA_NODE_RNIC) in rds_rdma_cm_event_handler()
103 ret = trans->cm_handle_connect(cm_id, event); in rds_rdma_cm_event_handler()
108 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler()
114 ret = trans->cm_initiate_connect(cm_id); in rds_rdma_cm_event_handler()
150 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, in rds_rdma_cm_event_handler()
159 struct rdma_cm_id *cm_id; in rds_rdma_listen_init() local
162 cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP, in rds_rdma_listen_init()
[all …]
Diw_rdma.c47 struct rdma_cm_id *cm_id; member
93 struct rdma_cm_id **cm_id) argument
99 *cm_id = NULL;
106 src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr;
107 dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr;
137 *cm_id = i_cm_id->cm_id;
147 static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) argument
155 i_cm_id->cm_id = cm_id;
165 struct rdma_cm_id *cm_id) argument
171 if (i_cm_id->cm_id == cm_id) {
[all …]
Diw.c224 struct rdma_cm_id *cm_id; in rds_iw_laddr_check() local
230 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); in rds_iw_laddr_check()
231 if (IS_ERR(cm_id)) in rds_iw_laddr_check()
232 return PTR_ERR(cm_id); in rds_iw_laddr_check()
239 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); in rds_iw_laddr_check()
242 if (ret || !cm_id->device || in rds_iw_laddr_check()
243 cm_id->device->node_type != RDMA_NODE_RNIC) in rds_iw_laddr_check()
248 cm_id->device ? cm_id->device->node_type : -1); in rds_iw_laddr_check()
250 rdma_destroy_id(cm_id); in rds_iw_laddr_check()
Dib.c323 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local
329 cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); in rds_ib_laddr_check()
330 if (IS_ERR(cm_id)) in rds_ib_laddr_check()
331 return PTR_ERR(cm_id); in rds_ib_laddr_check()
338 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); in rds_ib_laddr_check()
341 if (ret || !cm_id->device || in rds_ib_laddr_check()
342 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check()
347 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check()
349 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
Dib_cm.c454 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, in rds_ib_cm_handle_connect() argument
457 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; in rds_ib_cm_handle_connect()
458 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; in rds_ib_cm_handle_connect()
517 BUG_ON(cm_id->context); in rds_ib_cm_handle_connect()
520 ic->i_cm_id = cm_id; in rds_ib_cm_handle_connect()
521 cm_id->context = conn; in rds_ib_cm_handle_connect()
538 err = rdma_accept(cm_id, &conn_param); in rds_ib_cm_handle_connect()
546 rdma_reject(cm_id, NULL, 0); in rds_ib_cm_handle_connect()
551 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) in rds_ib_cm_initiate_connect() argument
553 struct rds_connection *conn = cm_id->context; in rds_ib_cm_initiate_connect()
[all …]
Diw_cm.c377 int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, in rds_iw_cm_handle_connect() argument
438 BUG_ON(cm_id->context); in rds_iw_cm_handle_connect()
441 ic->i_cm_id = cm_id; in rds_iw_cm_handle_connect()
442 cm_id->context = conn; in rds_iw_cm_handle_connect()
444 rds_iwdev = ib_get_client_data(cm_id->device, &rds_iw_client); in rds_iw_cm_handle_connect()
461 err = rdma_accept(cm_id, &conn_param); in rds_iw_cm_handle_connect()
471 rdma_reject(cm_id, NULL, 0); in rds_iw_cm_handle_connect()
476 int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id) in rds_iw_cm_initiate_connect() argument
478 struct rds_connection *conn = cm_id->context; in rds_iw_cm_initiate_connect()
497 ret = rdma_connect(cm_id, &conn_param); in rds_iw_cm_initiate_connect()
[all …]
Diw.h175 struct rdma_cm_id *cm_id; member
289 int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
291 int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id);
300 int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
Drdma_transport.h11 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
Dib.h287 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
289 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
Drds.h438 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
440 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
/linux-4.1.27/drivers/infiniband/core/
Diwcm.c63 struct iwcm_id_private *cm_id; member
121 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work()
143 work->cm_id = cm_id_priv; in alloc_work_entries()
189 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument
192 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref()
196 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument
201 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref()
214 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
281 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument
288 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect()
[all …]
Ducma.c87 struct rdma_cm_id *cm_id; member
108 struct rdma_cm_id *cm_id; member
245 static int ucma_event_handler(struct rdma_cm_id *cm_id, in ucma_event_handler() argument
249 struct ucma_context *ctx = cm_id->context; in ucma_event_handler()
257 uevent->cm_id = cm_id; in ucma_event_handler()
261 if (cm_id->qp_type == IB_QPT_UD) in ucma_event_handler()
274 } else if (!ctx->uid || ctx->cm_id != cm_id) { in ucma_event_handler()
329 ctx->cm_id = uevent->cm_id; in ucma_get_event()
330 ctx->cm_id->context = ctx; in ucma_get_event()
394 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type); in ucma_create_id()
[all …]
Dcm.c838 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument
843 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id()
846 switch (cm_id->state) { in cm_destroy_id()
848 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
855 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
872 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, in cm_destroy_id()
884 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id()
896 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id()
903 ib_send_cm_dreq(cm_id, NULL, 0); in cm_destroy_id()
912 ib_send_cm_drep(cm_id, NULL, 0); in cm_destroy_id()
[all …]
Ducm.c84 struct ib_cm_id *cm_id; member
96 struct ib_cm_id *cm_id; member
169 ib_destroy_cm_id(uevent->cm_id); in ib_ucm_cleanup_events()
348 static int ib_ucm_event_handler(struct ib_cm_id *cm_id, in ib_ucm_event_handler() argument
355 ctx = cm_id->context; in ib_ucm_event_handler()
362 uevent->cm_id = cm_id; in ib_ucm_event_handler()
423 ctx->cm_id = uevent->cm_id; in ib_ucm_event()
424 ctx->cm_id->context = ctx; in ib_ucm_event()
492 ctx->cm_id = ib_create_cm_id(file->device->ib_dev, in ib_ucm_create_id()
494 if (IS_ERR(ctx->cm_id)) { in ib_ucm_create_id()
[all …]
Dcma.c138 } cm_id; member
740 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
743 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
750 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
754 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1059 if (id_priv->cm_id.ib) in rdma_destroy_id()
1060 ib_destroy_cm_id(id_priv->cm_id.ib); in rdma_destroy_id()
1063 if (id_priv->cm_id.iw) in rdma_destroy_id()
1064 iw_destroy_cm_id(id_priv->cm_id.iw); in rdma_destroy_id()
1097 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); in cma_rep_recv()
[all …]
/linux-4.1.27/drivers/infiniband/hw/nes/
Dnes_cm.c178 if (!cm_node->cm_id) in create_event()
193 event->cm_info.cm_id = cm_node->cm_id; in create_event()
897 struct iw_cm_id *cm_id = cm_node->cm_id; in nes_retrans_expired() local
908 if (cm_node->cm_id) in nes_retrans_expired()
909 cm_id->rem_ref(cm_id); in nes_retrans_expired()
922 struct iw_cm_id *cm_id = cm_node->cm_id; in handle_recv_entry() local
931 if (nesqp->cm_id) { in handle_recv_entry()
935 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry()
947 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry()
954 if (cm_node->cm_id) in handle_recv_entry()
[all …]
Dnes_cm.h298 struct iw_cm_id *cm_id; member
343 struct iw_cm_id *cm_id; member
361 struct iw_cm_id *cm_id; member
Dnes_verbs.h137 struct iw_cm_id *cm_id; member
Dnes_verbs.c1467 struct iw_cm_id *cm_id; in nes_destroy_qp() local
1482 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { in nes_destroy_qp()
1483 cm_id = nesqp->cm_id; in nes_destroy_qp()
1486 cm_event.local_addr = cm_id->local_addr; in nes_destroy_qp()
1487 cm_event.remote_addr = cm_id->remote_addr; in nes_destroy_qp()
1493 nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount)); in nes_destroy_qp()
1495 cm_id->rem_ref(cm_id); in nes_destroy_qp()
1496 ret = cm_id->event_handler(cm_id, &cm_event); in nes_destroy_qp()
2983 if (nesqp->cm_id == NULL) { in nes_modify_qp()
3141 if (nesqp->cm_id && nesqp->hw_tcp_state != 0) { in nes_modify_qp()
[all …]
Dnes_hw.h1213 struct iw_cm_id *cm_id; member
Dnes_hw.c3604 nesqp->cm_id->add_ref(nesqp->cm_id); in nes_process_iwarp_aeqe()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
Diwch_cm.h155 struct iw_cm_id *cm_id; member
196 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument
198 return cm_id->provider_data; in to_ep()
201 static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument
203 return cm_id->provider_data; in to_listen_ep()
217 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
219 int iwch_destroy_listen(struct iw_cm_id *cm_id);
220 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
221 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
Diwch_cm.c675 if (ep->com.cm_id) { in close_complete_upcall()
677 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
678 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
679 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall()
680 ep->com.cm_id = NULL; in close_complete_upcall()
692 if (ep->com.cm_id) { in peer_close_upcall()
694 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
695 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
707 if (ep->com.cm_id) { in peer_abort_upcall()
709 ep->com.cm_id, ep->hwtid); in peer_abort_upcall()
[all …]
/linux-4.1.27/net/9p/
Dtrans_rdma.c93 struct rdma_cm_id *cm_id; member
279 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
297 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in handle_recv()
333 ib_dma_unmap_single(rdma->cm_id->device, in handle_send()
397 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
398 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
410 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
413 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
499 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
502 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request()
[all …]
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dcm.c1105 if (ep->com.cm_id) { in close_complete_upcall()
1107 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
1108 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
1109 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall()
1110 ep->com.cm_id = NULL; in close_complete_upcall()
1130 if (ep->com.cm_id) { in peer_close_upcall()
1132 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
1133 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
1146 if (ep->com.cm_id) { in peer_abort_upcall()
1148 ep->com.cm_id, ep->hwtid); in peer_abort_upcall()
[all …]
Diw_cxgb4.h797 struct iw_cm_id *cm_id; member
897 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument
899 return cm_id->provider_data; in to_ep()
902 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument
904 return cm_id->provider_data; in to_listen_ep()
964 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
965 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
966 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
967 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
968 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
Ddevice.c567 ep, ep->com.cm_id, ep->com.qp, in dump_ep()
591 ep, ep->com.cm_id, ep->com.qp, in dump_ep()
626 ep, ep->com.cm_id, (int)ep->com.state, in dump_listen_ep()
639 ep, ep->com.cm_id, (int)ep->com.state, in dump_listen_ep()
/linux-4.1.27/drivers/infiniband/ulp/srpt/
Dib_srpt.c230 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); in srpt_qp_event()
234 ib_cm_notify(ch->cm_id, event->event); in srpt_qp_event()
992 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr()
1022 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts()
1789 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); in srpt_handle_tsk_mgmt()
2074 qp_init->cap.max_send_wr, ch->cm_id); in srpt_create_ch_ib()
2142 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, in __srpt_close_ch()
2146 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) in __srpt_close_ch()
2203 static void srpt_drain_channel(struct ib_cm_id *cm_id) in srpt_drain_channel() argument
2212 sdev = cm_id->context; in srpt_drain_channel()
[all …]
Dib_srpt.h305 struct ib_cm_id *cm_id; member
399 struct ib_cm_id *cm_id; member
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
Dipoib_cm.c77 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
273 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument
281 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
292 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
313 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
346 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument
376 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx()
419 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument
437 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep()
440 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) in ipoib_cm_req_handler() argument
[all …]
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dcm.c75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) in set_local_comm_id() argument
80 msg->request_id = cpu_to_be32(cm_id); in set_local_comm_id()
86 msg->local_comm_id = cpu_to_be32(cm_id); in set_local_comm_id()
105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) in set_remote_comm_id() argument
110 msg->request_id = cpu_to_be32(cm_id); in set_remote_comm_id()
116 msg->remote_comm_id = cpu_to_be32(cm_id); in set_remote_comm_id()
/linux-4.1.27/drivers/char/
Dmbcs.h79 union cm_id { union
145 uint64_t cm_id:2, // 1:0 member
342 union cm_id id;
/linux-4.1.27/drivers/infiniband/ulp/srp/
Dib_srp.c136 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
302 if (ch->cm_id) in srp_new_cm_id()
303 ib_destroy_cm_id(ch->cm_id); in srp_new_cm_id()
304 ch->cm_id = new_cm_id; in srp_new_cm_id()
609 if (ch->cm_id) { in srp_free_ch_ib()
610 ib_destroy_cm_id(ch->cm_id); in srp_free_ch_ib()
611 ch->cm_id = NULL; in srp_free_ch_ib()
789 status = ib_send_cm_req(ch->cm_id, &req->param); in srp_send_req()
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { in srp_disconnect_target()
2174 static void srp_cm_rep_handler(struct ib_cm_id *cm_id, in srp_cm_rep_handler() argument
[all …]
Dib_srp.h164 struct ib_cm_id *cm_id; member
/linux-4.1.27/drivers/infiniband/ulp/isert/
Dib_isert.c86 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback()
747 isert_conn->cm_id = cma_id; in isert_connect_request()
809 if (isert_conn->cm_id) in isert_connect_release()
810 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release()
889 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate()
962 isert_conn->cm_id = NULL; in isert_connect_error()
1044 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_post_send()
1589 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_rcv_completion()
1655 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_map_data_buf()
1686 struct ib_device *ib_dev = isert_conn->cm_id->device; in isert_unmap_data_buf()
[all …]
Dib_isert.h171 struct rdma_cm_id *cm_id; member