Home
last modified time | relevance | path

Searched refs:cm_id (Results 1 – 47 of 47) sorted by relevance

/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_cm.c41 int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_llp_connect() argument
43 struct c2_dev *c2dev = to_c2dev(cm_id->device); in c2_llp_connect()
49 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr; in c2_llp_connect()
51 if (cm_id->remote_addr.ss_family != AF_INET) in c2_llp_connect()
54 ibqp = c2_get_qp(cm_id->device, iw_param->qpn); in c2_llp_connect()
60 cm_id->provider_data = qp; in c2_llp_connect()
61 cm_id->add_ref(cm_id); in c2_llp_connect()
62 qp->cm_id = cm_id; in c2_llp_connect()
128 cm_id->provider_data = NULL; in c2_llp_connect()
129 qp->cm_id = NULL; in c2_llp_connect()
[all …]
Dc2_ae.c187 struct iw_cm_id *cm_id = qp->cm_id; in c2_ae_event() local
190 if (!cm_id) { in c2_ae_event()
221 if (qp->cm_id) { in c2_ae_event()
222 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event()
223 qp->cm_id = NULL; in c2_ae_event()
229 if (cm_id->event_handler) in c2_ae_event()
230 cm_id->event_handler(cm_id, &cm_event); in c2_ae_event()
247 BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b); in c2_ae_event()
250 if (qp->cm_id) { in c2_ae_event()
251 qp->cm_id->rem_ref(qp->cm_id); in c2_ae_event()
[all …]
Dc2_intr.c200 cm_event.local_addr = req->cm_id->local_addr; in handle_vq()
201 cm_event.remote_addr = req->cm_id->remote_addr; in handle_vq()
204 req->cm_id->event_handler(req->cm_id, &cm_event); in handle_vq()
Dc2_qp.c174 if (qp->cm_id && qp->state == IB_QPS_RTS) { in c2_qp_modify()
176 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in c2_qp_modify()
178 vq_req->cm_id = qp->cm_id; in c2_qp_modify()
236 if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) { in c2_qp_modify()
237 qp->cm_id->rem_ref(qp->cm_id); in c2_qp_modify()
238 qp->cm_id = NULL; in c2_qp_modify()
332 if (qp->cm_id && qp->state == IB_QPS_RTS) { in destroy_qp()
334 "qp=%p, cm_id=%p\n",qp,qp->cm_id); in destroy_qp()
337 vq_req->cm_id = qp->cm_id; in destroy_qp()
369 if (qp->cm_id) { in destroy_qp()
[all …]
Dc2_provider.c605 static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_connect() argument
610 return c2_llp_connect(cm_id, iw_param); in c2_connect()
613 static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param) in c2_accept() argument
618 return c2_llp_accept(cm_id, iw_param); in c2_accept()
621 static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) in c2_reject() argument
627 err = c2_llp_reject(cm_id, pdata, pdata_len); in c2_reject()
631 static int c2_service_create(struct iw_cm_id *cm_id, int backlog) in c2_service_create() argument
636 err = c2_llp_service_create(cm_id, backlog); in c2_service_create()
643 static int c2_service_destroy(struct iw_cm_id *cm_id) in c2_service_destroy() argument
648 err = c2_llp_service_destroy(cm_id); in c2_service_destroy()
Dc2.h521 extern int c2_llp_connect(struct iw_cm_id *cm_id,
523 extern int c2_llp_accept(struct iw_cm_id *cm_id,
525 extern int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
527 extern int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
528 extern int c2_llp_service_destroy(struct iw_cm_id *cm_id);
Dc2_vq.h46 struct iw_cm_id *cm_id; member
Dc2_provider.h109 struct iw_cm_id *cm_id; member
Dc2_vq.c112 r->cm_id = NULL; in vq_req_alloc()
/linux-4.4.14/net/rds/
Drdma_transport.c41 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, in rds_rdma_cm_event_handler() argument
45 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler()
49 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler()
52 if (cm_id->device->node_type == RDMA_NODE_RNIC) in rds_rdma_cm_event_handler()
76 ret = trans->cm_handle_connect(cm_id, event); in rds_rdma_cm_event_handler()
81 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler()
93 if (ibic && ibic->i_cm_id == cm_id) in rds_rdma_cm_event_handler()
94 ret = trans->cm_initiate_connect(cm_id); in rds_rdma_cm_event_handler()
133 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, in rds_rdma_cm_event_handler()
142 struct rdma_cm_id *cm_id; in rds_rdma_listen_init() local
[all …]
Diw_rdma.c47 struct rdma_cm_id *cm_id; member
92 struct rdma_cm_id **cm_id) argument
98 *cm_id = NULL;
105 src_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.src_addr;
106 dst_addr = (struct sockaddr_in *)&i_cm_id->cm_id->route.addr.dst_addr;
136 *cm_id = i_cm_id->cm_id;
146 static int rds_iw_add_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id) argument
154 i_cm_id->cm_id = cm_id;
164 struct rdma_cm_id *cm_id) argument
170 if (i_cm_id->cm_id == cm_id) {
[all …]
Diw.c220 struct rdma_cm_id *cm_id; in rds_iw_laddr_check() local
226 cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); in rds_iw_laddr_check()
227 if (IS_ERR(cm_id)) in rds_iw_laddr_check()
228 return PTR_ERR(cm_id); in rds_iw_laddr_check()
235 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); in rds_iw_laddr_check()
238 if (ret || !cm_id->device || in rds_iw_laddr_check()
239 cm_id->device->node_type != RDMA_NODE_RNIC) in rds_iw_laddr_check()
244 cm_id->device ? cm_id->device->node_type : -1); in rds_iw_laddr_check()
246 rdma_destroy_id(cm_id); in rds_iw_laddr_check()
Dib.c333 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local
339 cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); in rds_ib_laddr_check()
340 if (IS_ERR(cm_id)) in rds_ib_laddr_check()
341 return PTR_ERR(cm_id); in rds_ib_laddr_check()
348 ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); in rds_ib_laddr_check()
351 if (ret || !cm_id->device || in rds_ib_laddr_check()
352 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check()
357 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check()
359 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
Diw_cm.c380 int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, in rds_iw_cm_handle_connect() argument
442 BUG_ON(cm_id->context); in rds_iw_cm_handle_connect()
445 ic->i_cm_id = cm_id; in rds_iw_cm_handle_connect()
446 cm_id->context = conn; in rds_iw_cm_handle_connect()
448 rds_iwdev = ib_get_client_data(cm_id->device, &rds_iw_client); in rds_iw_cm_handle_connect()
465 err = rdma_accept(cm_id, &conn_param); in rds_iw_cm_handle_connect()
475 rdma_reject(cm_id, NULL, 0); in rds_iw_cm_handle_connect()
480 int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id) in rds_iw_cm_initiate_connect() argument
482 struct rds_connection *conn = cm_id->context; in rds_iw_cm_initiate_connect()
501 ret = rdma_connect(cm_id, &conn_param); in rds_iw_cm_initiate_connect()
[all …]
Dib_cm.c529 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, in rds_ib_cm_handle_connect() argument
532 __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; in rds_ib_cm_handle_connect()
533 __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; in rds_ib_cm_handle_connect()
593 BUG_ON(cm_id->context); in rds_ib_cm_handle_connect()
596 ic->i_cm_id = cm_id; in rds_ib_cm_handle_connect()
597 cm_id->context = conn; in rds_ib_cm_handle_connect()
614 err = rdma_accept(cm_id, &conn_param); in rds_ib_cm_handle_connect()
622 rdma_reject(cm_id, NULL, 0); in rds_ib_cm_handle_connect()
627 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id) in rds_ib_cm_initiate_connect() argument
629 struct rds_connection *conn = cm_id->context; in rds_ib_cm_initiate_connect()
[all …]
Diw.h178 struct rdma_cm_id *cm_id; member
292 int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id,
294 int rds_iw_cm_initiate_connect(struct rdma_cm_id *cm_id);
303 int rds_iw_update_cm_id(struct rds_iw_device *rds_iwdev, struct rdma_cm_id *cm_id);
Drdma_transport.h11 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
Dib.h336 int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id,
338 int rds_ib_cm_initiate_connect(struct rdma_cm_id *cm_id);
Drds.h452 int (*cm_handle_connect)(struct rdma_cm_id *cm_id,
454 int (*cm_initiate_connect)(struct rdma_cm_id *cm_id);
/linux-4.4.14/include/rdma/
Diw_cm.h68 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id,
79 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id,
113 int (*connect)(struct iw_cm_id *cm_id,
116 int (*accept)(struct iw_cm_id *cm_id,
119 int (*reject)(struct iw_cm_id *cm_id,
122 int (*create_listen)(struct iw_cm_id *cm_id,
125 int (*destroy_listen)(struct iw_cm_id *cm_id);
147 void iw_destroy_cm_id(struct iw_cm_id *cm_id);
160 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
181 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
[all …]
Dib_cm.h299 typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
336 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
358 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
393 int ib_send_cm_req(struct ib_cm_id *cm_id,
417 int ib_send_cm_rep(struct ib_cm_id *cm_id,
428 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
441 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
456 int ib_send_cm_drep(struct ib_cm_id *cm_id,
474 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
488 int ib_send_cm_rej(struct ib_cm_id *cm_id,
[all …]
/linux-4.4.14/drivers/infiniband/core/
Diwcm.c63 struct iwcm_id_private *cm_id; member
121 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work()
143 work->cm_id = cm_id_priv; in alloc_work_entries()
189 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument
192 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref()
196 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument
201 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref()
214 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
281 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument
288 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect()
[all …]
Ducma.c89 struct rdma_cm_id *cm_id; member
117 struct rdma_cm_id *cm_id; member
165 rdma_destroy_id(uevent_close->cm_id); in ucma_close_event_id()
180 rdma_destroy_id(ctx->cm_id); in ucma_close_id()
283 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) in ucma_removal_event_handler() argument
285 struct ucma_context *ctx = cm_id->context; in ucma_removal_event_handler()
298 if (ctx->cm_id == cm_id) { in ucma_removal_event_handler()
307 if (con_req_eve->cm_id == cm_id && in ucma_removal_event_handler()
320 static int ucma_event_handler(struct rdma_cm_id *cm_id, in ucma_event_handler() argument
324 struct ucma_context *ctx = cm_id->context; in ucma_event_handler()
[all …]
Dcm.c808 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument
813 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id()
816 switch (cm_id->state) { in cm_destroy_id()
831 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
848 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, in cm_destroy_id()
860 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id()
872 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id()
879 ib_send_cm_dreq(cm_id, NULL, 0); in cm_destroy_id()
888 ib_send_cm_drep(cm_id, NULL, 0); in cm_destroy_id()
895 cm_free_id(cm_id->local_id); in cm_destroy_id()
[all …]
Ducm.c84 struct ib_cm_id *cm_id; member
96 struct ib_cm_id *cm_id; member
169 ib_destroy_cm_id(uevent->cm_id); in ib_ucm_cleanup_events()
348 static int ib_ucm_event_handler(struct ib_cm_id *cm_id, in ib_ucm_event_handler() argument
355 ctx = cm_id->context; in ib_ucm_event_handler()
362 uevent->cm_id = cm_id; in ib_ucm_event_handler()
423 ctx->cm_id = uevent->cm_id; in ib_ucm_event()
424 ctx->cm_id->context = ctx; in ib_ucm_event()
492 ctx->cm_id = ib_create_cm_id(file->device->ib_dev, in ib_ucm_create_id()
494 if (IS_ERR(ctx->cm_id)) { in ib_ucm_create_id()
[all …]
Dcma.c220 } cm_id; member
845 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
848 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
854 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
858 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1287 const struct ib_cm_id *cm_id, in cma_find_listener() argument
1299 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1305 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener()
1315 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id, in cma_id_from_event() argument
1345 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); in cma_id_from_event()
[all …]
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes_cm.c178 if (!cm_node->cm_id) in create_event()
193 event->cm_info.cm_id = cm_node->cm_id; in create_event()
897 struct iw_cm_id *cm_id = cm_node->cm_id; in nes_retrans_expired() local
908 if (cm_node->cm_id) in nes_retrans_expired()
909 cm_id->rem_ref(cm_id); in nes_retrans_expired()
922 struct iw_cm_id *cm_id = cm_node->cm_id; in handle_recv_entry() local
931 if (nesqp->cm_id) { in handle_recv_entry()
935 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry()
947 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry()
954 if (cm_node->cm_id) in handle_recv_entry()
[all …]
Dnes_cm.h298 struct iw_cm_id *cm_id; member
344 struct iw_cm_id *cm_id; member
363 struct iw_cm_id *cm_id; member
Dnes_verbs.h141 struct iw_cm_id *cm_id; member
Dnes_verbs.c1447 struct iw_cm_id *cm_id; in nes_destroy_qp() local
1462 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { in nes_destroy_qp()
1463 cm_id = nesqp->cm_id; in nes_destroy_qp()
1466 cm_event.local_addr = cm_id->local_addr; in nes_destroy_qp()
1467 cm_event.remote_addr = cm_id->remote_addr; in nes_destroy_qp()
1473 nesqp->hwqp.qp_id, cm_id, atomic_read(&nesqp->refcount)); in nes_destroy_qp()
1475 cm_id->rem_ref(cm_id); in nes_destroy_qp()
1476 ret = cm_id->event_handler(cm_id, &cm_event); in nes_destroy_qp()
2975 if (nesqp->cm_id == NULL) { in nes_modify_qp()
3133 if (nesqp->cm_id && nesqp->hw_tcp_state != 0) { in nes_modify_qp()
[all …]
Dnes_hw.h1207 struct iw_cm_id *cm_id; member
Dnes_hw.c3604 nesqp->cm_id->add_ref(nesqp->cm_id); in nes_process_iwarp_aeqe()
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_cm.h155 struct iw_cm_id *cm_id; member
196 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument
198 return cm_id->provider_data; in to_ep()
201 static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument
203 return cm_id->provider_data; in to_listen_ep()
217 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
219 int iwch_destroy_listen(struct iw_cm_id *cm_id);
220 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
221 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
Diwch_cm.c675 if (ep->com.cm_id) { in close_complete_upcall()
677 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
678 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
679 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall()
680 ep->com.cm_id = NULL; in close_complete_upcall()
692 if (ep->com.cm_id) { in peer_close_upcall()
694 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
695 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
707 if (ep->com.cm_id) { in peer_abort_upcall()
709 ep->com.cm_id, ep->hwtid); in peer_abort_upcall()
[all …]
/linux-4.4.14/net/9p/
Dtrans_rdma.c93 struct rdma_cm_id *cm_id; member
277 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
295 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in handle_recv()
331 ib_dma_unmap_single(rdma->cm_id->device, in handle_send()
392 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
393 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
405 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
408 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
494 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
497 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request()
[all …]
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dcm.c1158 if (ep->com.cm_id) { in close_complete_upcall()
1160 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
1161 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
1162 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall()
1163 ep->com.cm_id = NULL; in close_complete_upcall()
1183 if (ep->com.cm_id) { in peer_close_upcall()
1185 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
1186 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
1199 if (ep->com.cm_id) { in peer_abort_upcall()
1201 ep->com.cm_id, ep->hwtid); in peer_abort_upcall()
[all …]
Diw_cxgb4.h785 struct iw_cm_id *cm_id; member
885 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument
887 return cm_id->provider_data; in to_ep()
890 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument
892 return cm_id->provider_data; in to_listen_ep()
952 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
953 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
954 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
955 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
956 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
Ddevice.c567 ep, ep->com.cm_id, ep->com.qp, in dump_ep()
591 ep, ep->com.cm_id, ep->com.qp, in dump_ep()
626 ep, ep->com.cm_id, (int)ep->com.state, in dump_listen_ep()
639 ep, ep->com.cm_id, (int)ep->com.state, in dump_listen_ep()
/linux-4.4.14/drivers/infiniband/ulp/srpt/
Dib_srpt.c227 event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch)); in srpt_qp_event()
231 ib_cm_notify(ch->cm_id, event->event); in srpt_qp_event()
991 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr()
1021 ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts()
1784 srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess); in srpt_handle_tsk_mgmt()
2071 qp_init->cap.max_send_wr, ch->cm_id); in srpt_create_ch_ib()
2136 ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0, in __srpt_close_ch()
2140 if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0) in __srpt_close_ch()
2197 static void srpt_drain_channel(struct ib_cm_id *cm_id) in srpt_drain_channel() argument
2206 sdev = cm_id->context; in srpt_drain_channel()
[all …]
Dib_srpt.h304 struct ib_cm_id *cm_id; member
397 struct ib_cm_id *cm_id; member
/linux-4.4.14/drivers/infiniband/ulp/ipoib/
Dipoib_cm.c77 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
273 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument
281 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
292 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
313 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
346 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument
376 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx()
419 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument
437 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep()
440 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event) in ipoib_cm_req_handler() argument
[all …]
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dcm.c75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) in set_local_comm_id() argument
80 msg->request_id = cpu_to_be32(cm_id); in set_local_comm_id()
86 msg->local_comm_id = cpu_to_be32(cm_id); in set_local_comm_id()
105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) in set_remote_comm_id() argument
110 msg->request_id = cpu_to_be32(cm_id); in set_remote_comm_id()
116 msg->remote_comm_id = cpu_to_be32(cm_id); in set_remote_comm_id()
/linux-4.4.14/drivers/infiniband/ulp/isert/
Dib_isert.c89 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback()
702 isert_conn->cm_id = cma_id; in isert_connect_request()
762 if (isert_conn->cm_id) in isert_connect_release()
763 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release()
822 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn()
861 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate()
879 isert_np->cm_id = NULL; in isert_np_cma_handler()
882 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler()
883 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler()
885 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler()
[all …]
Dib_isert.h165 struct rdma_cm_id *cm_id; member
221 struct rdma_cm_id *cm_id; member
/linux-4.4.14/drivers/char/
Dmbcs.h79 union cm_id { union
145 uint64_t cm_id:2, // 1:0 member
342 union cm_id id;
/linux-4.4.14/drivers/infiniband/ulp/srp/
Dib_srp.c137 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
301 if (ch->cm_id) in srp_new_cm_id()
302 ib_destroy_cm_id(ch->cm_id); in srp_new_cm_id()
303 ch->cm_id = new_cm_id; in srp_new_cm_id()
609 if (ch->cm_id) { in srp_free_ch_ib()
610 ib_destroy_cm_id(ch->cm_id); in srp_free_ch_ib()
611 ch->cm_id = NULL; in srp_free_ch_ib()
789 status = ib_send_cm_req(ch->cm_id, &req->param); in srp_send_req()
823 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) { in srp_disconnect_target()
2233 static void srp_cm_rep_handler(struct ib_cm_id *cm_id, in srp_cm_rep_handler() argument
[all …]
Dib_srp.h164 struct ib_cm_id *cm_id; member