Searched refs:id_priv (Results 1 - 1 of 1) sorted by relevance

/linux-4.4.14/drivers/infiniband/core/
H A Dcma.c234 struct rdma_id_private *id_priv; member in struct:cma_multicast
291 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) cma_comp() argument
296 spin_lock_irqsave(&id_priv->lock, flags); cma_comp()
297 ret = (id_priv->state == comp); cma_comp()
298 spin_unlock_irqrestore(&id_priv->lock, flags); cma_comp()
302 static int cma_comp_exch(struct rdma_id_private *id_priv, cma_comp_exch() argument
308 spin_lock_irqsave(&id_priv->lock, flags); cma_comp_exch()
309 if ((ret = (id_priv->state == comp))) cma_comp_exch()
310 id_priv->state = exch; cma_comp_exch()
311 spin_unlock_irqrestore(&id_priv->lock, flags); cma_comp_exch()
315 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, cma_exch() argument
321 spin_lock_irqsave(&id_priv->lock, flags); cma_exch()
322 old = id_priv->state; cma_exch()
323 id_priv->state = exch; cma_exch()
324 spin_unlock_irqrestore(&id_priv->lock, flags); cma_exch()
338 static void cma_attach_to_dev(struct rdma_id_private *id_priv, cma_attach_to_dev() argument
342 id_priv->cma_dev = cma_dev; cma_attach_to_dev()
343 id_priv->id.device = cma_dev->device; cma_attach_to_dev()
344 id_priv->id.route.addr.dev_addr.transport = cma_attach_to_dev()
346 list_add_tail(&id_priv->list, &cma_dev->id_list); cma_attach_to_dev()
363 static void cma_release_dev(struct rdma_id_private *id_priv) cma_release_dev() argument
366 list_del(&id_priv->list); cma_release_dev()
367 cma_deref_dev(id_priv->cma_dev); cma_release_dev()
368 id_priv->cma_dev = NULL; cma_release_dev()
372 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) cma_src_addr() argument
374 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; cma_src_addr()
377 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) cma_dst_addr() argument
379 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; cma_dst_addr()
382 static inline unsigned short cma_family(struct rdma_id_private *id_priv) cma_family() argument
384 return id_priv->id.route.addr.src_addr.ss_family; cma_family()
387 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) cma_set_qkey() argument
392 if (id_priv->qkey) { cma_set_qkey()
393 if (qkey && id_priv->qkey != qkey) cma_set_qkey()
399 id_priv->qkey = qkey; cma_set_qkey()
403 switch (id_priv->id.ps) { cma_set_qkey()
406 id_priv->qkey = RDMA_UDP_QKEY; cma_set_qkey()
409 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); cma_set_qkey()
410 ret = ib_sa_get_mcmember_rec(id_priv->id.device, cma_set_qkey()
411 id_priv->id.port_num, &rec.mgid, cma_set_qkey()
414 id_priv->qkey = be32_to_cpu(rec.qkey); cma_set_qkey()
467 static int cma_acquire_dev(struct rdma_id_private *id_priv, cma_acquire_dev() argument
470 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; cma_acquire_dev()
477 id_priv->id.ps == RDMA_PS_IPOIB) cma_acquire_dev()
481 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, cma_acquire_dev()
497 id_priv->id.port_num = port; cma_acquire_dev()
516 id_priv->id.port_num = port; cma_acquire_dev()
524 cma_attach_to_dev(id_priv, cma_dev); cma_acquire_dev()
533 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) cma_resolve_ib_dev() argument
543 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); cma_resolve_ib_dev()
561 id_priv->id.port_num = p; cma_resolve_ib_dev()
569 id_priv->id.port_num = p; cma_resolve_ib_dev()
579 cma_attach_to_dev(id_priv, cma_dev); cma_resolve_ib_dev()
580 addr = (struct sockaddr_ib *) cma_src_addr(id_priv); cma_resolve_ib_dev()
582 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); cma_resolve_ib_dev()
586 static void cma_deref_id(struct rdma_id_private *id_priv) cma_deref_id() argument
588 if (atomic_dec_and_test(&id_priv->refcount)) cma_deref_id()
589 complete(&id_priv->comp); cma_deref_id()
592 static int cma_disable_callback(struct rdma_id_private *id_priv, cma_disable_callback() argument
595 mutex_lock(&id_priv->handler_mutex); cma_disable_callback()
596 if (id_priv->state != state) { cma_disable_callback()
597 mutex_unlock(&id_priv->handler_mutex); cma_disable_callback()
608 struct rdma_id_private *id_priv; rdma_create_id() local
610 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); rdma_create_id()
611 if (!id_priv) rdma_create_id()
614 id_priv->owner = task_pid_nr(current); rdma_create_id()
615 id_priv->state = RDMA_CM_IDLE; rdma_create_id()
616 id_priv->id.context = context; rdma_create_id()
617 id_priv->id.event_handler = event_handler; rdma_create_id()
618 id_priv->id.ps = ps; rdma_create_id()
619 id_priv->id.qp_type = qp_type; rdma_create_id()
620 spin_lock_init(&id_priv->lock); rdma_create_id()
621 mutex_init(&id_priv->qp_mutex); rdma_create_id()
622 init_completion(&id_priv->comp); rdma_create_id()
623 atomic_set(&id_priv->refcount, 1); rdma_create_id()
624 mutex_init(&id_priv->handler_mutex); rdma_create_id()
625 INIT_LIST_HEAD(&id_priv->listen_list); rdma_create_id()
626 INIT_LIST_HEAD(&id_priv->mc_list); rdma_create_id()
627 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); rdma_create_id()
628 id_priv->id.route.addr.dev_addr.net = get_net(net); rdma_create_id()
630 return &id_priv->id; rdma_create_id()
634 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) cma_init_ud_qp() argument
640 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); cma_init_ud_qp()
660 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) cma_init_conn_qp() argument
666 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); cma_init_conn_qp()
676 struct rdma_id_private *id_priv; rdma_create_qp() local
680 id_priv = container_of(id, struct rdma_id_private, id); rdma_create_qp()
689 ret = cma_init_ud_qp(id_priv, qp); rdma_create_qp()
691 ret = cma_init_conn_qp(id_priv, qp); rdma_create_qp()
696 id_priv->qp_num = qp->qp_num; rdma_create_qp()
697 id_priv->srq = (qp->srq != NULL); rdma_create_qp()
707 struct rdma_id_private *id_priv; rdma_destroy_qp() local
709 id_priv = container_of(id, struct rdma_id_private, id); rdma_destroy_qp()
710 mutex_lock(&id_priv->qp_mutex); rdma_destroy_qp()
711 ib_destroy_qp(id_priv->id.qp); rdma_destroy_qp()
712 id_priv->id.qp = NULL; rdma_destroy_qp()
713 mutex_unlock(&id_priv->qp_mutex); rdma_destroy_qp()
717 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, cma_modify_qp_rtr() argument
724 mutex_lock(&id_priv->qp_mutex); cma_modify_qp_rtr()
725 if (!id_priv->id.qp) { cma_modify_qp_rtr()
732 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); cma_modify_qp_rtr()
736 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rtr()
741 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); cma_modify_qp_rtr()
745 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, cma_modify_qp_rtr()
750 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); cma_modify_qp_rtr()
754 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rtr()
756 mutex_unlock(&id_priv->qp_mutex); cma_modify_qp_rtr()
760 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, cma_modify_qp_rts() argument
766 mutex_lock(&id_priv->qp_mutex); cma_modify_qp_rts()
767 if (!id_priv->id.qp) { cma_modify_qp_rts()
773 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); cma_modify_qp_rts()
779 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); cma_modify_qp_rts()
781 mutex_unlock(&id_priv->qp_mutex); cma_modify_qp_rts()
785 static int cma_modify_qp_err(struct rdma_id_private *id_priv) cma_modify_qp_err() argument
790 mutex_lock(&id_priv->qp_mutex); cma_modify_qp_err()
791 if (!id_priv->id.qp) { cma_modify_qp_err()
797 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); cma_modify_qp_err()
799 mutex_unlock(&id_priv->qp_mutex); cma_modify_qp_err()
803 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, cma_ib_init_qp_attr() argument
806 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; cma_ib_init_qp_attr()
810 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) cma_ib_init_qp_attr()
815 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, cma_ib_init_qp_attr()
820 qp_attr->port_num = id_priv->id.port_num; cma_ib_init_qp_attr()
823 if (id_priv->id.qp_type == IB_QPT_UD) { cma_ib_init_qp_attr()
824 ret = cma_set_qkey(id_priv, 0); cma_ib_init_qp_attr()
828 qp_attr->qkey = id_priv->qkey; cma_ib_init_qp_attr()
840 struct rdma_id_private *id_priv; rdma_init_qp_attr() local
843 id_priv = container_of(id, struct rdma_id_private, id); rdma_init_qp_attr()
845 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) rdma_init_qp_attr()
846 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); rdma_init_qp_attr()
848 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, rdma_init_qp_attr()
852 qp_attr->rq_psn = id_priv->seq_num; rdma_init_qp_attr()
854 if (!id_priv->cm_id.iw) { rdma_init_qp_attr()
858 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, rdma_init_qp_attr()
1215 static bool cma_match_private_data(struct rdma_id_private *id_priv, cma_match_private_data() argument
1218 struct sockaddr *addr = cma_src_addr(id_priv); cma_match_private_data()
1222 if (cma_any_addr(addr) && !id_priv->afonly) cma_match_private_data()
1292 struct rdma_id_private *id_priv, *id_priv_dev; cma_find_listener() local
1297 hlist_for_each_entry(id_priv, &bind_list->owners, node) { cma_find_listener()
1298 if (cma_match_private_data(id_priv, ib_event->private_data)) { cma_find_listener()
1299 if (id_priv->id.device == cm_id->device && cma_find_listener()
1300 cma_match_net_dev(&id_priv->id, net_dev, req->port)) cma_find_listener()
1301 return id_priv; cma_find_listener()
1303 &id_priv->listen_list, cma_find_listener()
1321 struct rdma_id_private *id_priv; cma_id_from_event() local
1345 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); cma_id_from_event()
1346 if (IS_ERR(id_priv) && *net_dev) { cma_id_from_event()
1351 return id_priv; cma_id_from_event()
1354 static inline int cma_user_data_offset(struct rdma_id_private *id_priv) cma_user_data_offset() argument
1356 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); cma_user_data_offset()
1359 static void cma_cancel_route(struct rdma_id_private *id_priv) cma_cancel_route() argument
1361 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { cma_cancel_route()
1362 if (id_priv->query) cma_cancel_route()
1363 ib_sa_cancel_query(id_priv->query_id, id_priv->query); cma_cancel_route()
1367 static void cma_cancel_listens(struct rdma_id_private *id_priv) cma_cancel_listens() argument
1376 list_del(&id_priv->list); cma_cancel_listens()
1378 while (!list_empty(&id_priv->listen_list)) { cma_cancel_listens()
1379 dev_id_priv = list_entry(id_priv->listen_list.next, cma_cancel_listens()
1392 static void cma_cancel_operation(struct rdma_id_private *id_priv, cma_cancel_operation() argument
1397 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); cma_cancel_operation()
1400 cma_cancel_route(id_priv); cma_cancel_operation()
1403 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) cma_cancel_operation()
1404 cma_cancel_listens(id_priv); cma_cancel_operation()
1411 static void cma_release_port(struct rdma_id_private *id_priv) cma_release_port() argument
1413 struct rdma_bind_list *bind_list = id_priv->bind_list; cma_release_port()
1414 struct net *net = id_priv->id.route.addr.dev_addr.net; cma_release_port()
1420 hlist_del(&id_priv->node); cma_release_port()
1428 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) cma_leave_mc_groups() argument
1432 while (!list_empty(&id_priv->mc_list)) { cma_leave_mc_groups()
1433 mc = container_of(id_priv->mc_list.next, cma_leave_mc_groups()
1436 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, cma_leave_mc_groups()
1437 id_priv->id.port_num)) { cma_leave_mc_groups()
1447 struct rdma_id_private *id_priv; rdma_destroy_id() local
1450 id_priv = container_of(id, struct rdma_id_private, id); rdma_destroy_id()
1451 state = cma_exch(id_priv, RDMA_CM_DESTROYING); rdma_destroy_id()
1452 cma_cancel_operation(id_priv, state); rdma_destroy_id()
1456 * the id_priv state set to destroying and abort. rdma_destroy_id()
1458 mutex_lock(&id_priv->handler_mutex); rdma_destroy_id()
1459 mutex_unlock(&id_priv->handler_mutex); rdma_destroy_id()
1461 if (id_priv->cma_dev) { rdma_destroy_id()
1462 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { rdma_destroy_id()
1463 if (id_priv->cm_id.ib) rdma_destroy_id()
1464 ib_destroy_cm_id(id_priv->cm_id.ib); rdma_destroy_id()
1465 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { rdma_destroy_id()
1466 if (id_priv->cm_id.iw) rdma_destroy_id()
1467 iw_destroy_cm_id(id_priv->cm_id.iw); rdma_destroy_id()
1469 cma_leave_mc_groups(id_priv); rdma_destroy_id()
1470 cma_release_dev(id_priv); rdma_destroy_id()
1473 cma_release_port(id_priv); rdma_destroy_id()
1474 cma_deref_id(id_priv); rdma_destroy_id()
1475 wait_for_completion(&id_priv->comp); rdma_destroy_id()
1477 if (id_priv->internal_id) rdma_destroy_id()
1478 cma_deref_id(id_priv->id.context); rdma_destroy_id()
1480 kfree(id_priv->id.route.path_rec); rdma_destroy_id()
1481 put_net(id_priv->id.route.addr.dev_addr.net); rdma_destroy_id()
1482 kfree(id_priv); rdma_destroy_id()
1486 static int cma_rep_recv(struct rdma_id_private *id_priv) cma_rep_recv() argument
1490 ret = cma_modify_qp_rtr(id_priv, NULL); cma_rep_recv()
1494 ret = cma_modify_qp_rts(id_priv, NULL); cma_rep_recv()
1498 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); cma_rep_recv()
1504 cma_modify_qp_err(id_priv); cma_rep_recv()
1505 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, cma_rep_recv()
1526 struct rdma_id_private *id_priv = cm_id->context; cma_ib_handler() local
1531 cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || cma_ib_handler()
1533 cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) cma_ib_handler()
1544 if (id_priv->id.qp) { cma_ib_handler()
1545 event.status = cma_rep_recv(id_priv); cma_ib_handler()
1562 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, cma_ib_handler()
1574 cma_modify_qp_err(id_priv); cma_ib_handler()
1586 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_ib_handler()
1589 id_priv->cm_id.ib = NULL; cma_ib_handler()
1590 cma_exch(id_priv, RDMA_CM_DESTROYING); cma_ib_handler()
1591 mutex_unlock(&id_priv->handler_mutex); cma_ib_handler()
1592 rdma_destroy_id(&id_priv->id); cma_ib_handler()
1596 mutex_unlock(&id_priv->handler_mutex); cma_ib_handler()
1604 struct rdma_id_private *id_priv; cma_new_conn_id() local
1618 id_priv = container_of(id, struct rdma_id_private, id); cma_new_conn_id()
1641 cma_any_addr(cma_src_addr(id_priv))) { cma_new_conn_id()
1645 } else if (!cma_any_addr(cma_src_addr(id_priv))) { cma_new_conn_id()
1646 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); cma_new_conn_id()
1653 id_priv->state = RDMA_CM_CONNECT; cma_new_conn_id()
1654 return id_priv; cma_new_conn_id()
1665 struct rdma_id_private *id_priv; cma_new_udp_id() local
1676 id_priv = container_of(id, struct rdma_id_private, id); cma_new_udp_id()
1688 if (!cma_any_addr(cma_src_addr(id_priv))) { cma_new_udp_id()
1689 ret = cma_translate_addr(cma_src_addr(id_priv), cma_new_udp_id()
1696 id_priv->state = RDMA_CM_CONNECT; cma_new_udp_id()
1697 return id_priv; cma_new_udp_id()
1829 struct rdma_id_private *id_priv = iw_id->context; cma_iw_handler() local
1835 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) cma_iw_handler()
1844 memcpy(cma_src_addr(id_priv), laddr, cma_iw_handler()
1846 memcpy(cma_dst_addr(id_priv), raddr, cma_iw_handler()
1878 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_iw_handler()
1881 id_priv->cm_id.iw = NULL; cma_iw_handler()
1882 cma_exch(id_priv, RDMA_CM_DESTROYING); cma_iw_handler()
1883 mutex_unlock(&id_priv->handler_mutex); cma_iw_handler()
1884 rdma_destroy_id(&id_priv->id); cma_iw_handler()
1888 mutex_unlock(&id_priv->handler_mutex); cma_iw_handler()
1979 static int cma_ib_listen(struct rdma_id_private *id_priv) cma_ib_listen() argument
1985 addr = cma_src_addr(id_priv); cma_ib_listen()
1986 svc_id = rdma_get_service_id(&id_priv->id, addr); cma_ib_listen()
1987 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id); cma_ib_listen()
1990 id_priv->cm_id.ib = id; cma_ib_listen()
1995 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) cma_iw_listen() argument
2000 id = iw_create_cm_id(id_priv->id.device, cma_iw_listen()
2002 id_priv); cma_iw_listen()
2006 id->tos = id_priv->tos; cma_iw_listen()
2007 id_priv->cm_id.iw = id; cma_iw_listen()
2009 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), cma_iw_listen()
2010 rdma_addr_size(cma_src_addr(id_priv))); cma_iw_listen()
2012 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); cma_iw_listen()
2015 iw_destroy_cm_id(id_priv->cm_id.iw); cma_iw_listen()
2016 id_priv->cm_id.iw = NULL; cma_iw_listen()
2025 struct rdma_id_private *id_priv = id->context; cma_listen_handler() local
2027 id->context = id_priv->id.context; cma_listen_handler()
2028 id->event_handler = id_priv->id.event_handler; cma_listen_handler()
2029 return id_priv->id.event_handler(id, event); cma_listen_handler()
2032 static void cma_listen_on_dev(struct rdma_id_private *id_priv, cma_listen_on_dev() argument
2037 struct net *net = id_priv->id.route.addr.dev_addr.net; cma_listen_on_dev()
2040 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) cma_listen_on_dev()
2043 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, cma_listen_on_dev()
2044 id_priv->id.qp_type); cma_listen_on_dev()
2051 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), cma_listen_on_dev()
2052 rdma_addr_size(cma_src_addr(id_priv))); cma_listen_on_dev()
2055 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); cma_listen_on_dev()
2056 atomic_inc(&id_priv->refcount); cma_listen_on_dev()
2058 dev_id_priv->afonly = id_priv->afonly; cma_listen_on_dev()
2060 ret = rdma_listen(id, id_priv->backlog); cma_listen_on_dev()
2066 static void cma_listen_on_all(struct rdma_id_private *id_priv) cma_listen_on_all() argument
2071 list_add_tail(&id_priv->list, &listen_any_list); cma_listen_on_all()
2073 cma_listen_on_dev(id_priv, cma_dev); cma_listen_on_all()
2079 struct rdma_id_private *id_priv; rdma_set_service_type() local
2081 id_priv = container_of(id, struct rdma_id_private, id); rdma_set_service_type()
2082 id_priv->tos = (u8) tos; rdma_set_service_type()
2107 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, cma_query_ib_route() argument
2110 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; cma_query_ib_route()
2122 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); cma_query_ib_route()
2128 switch (cma_family(id_priv)) { cma_query_ib_route()
2130 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); cma_query_ib_route()
2134 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); cma_query_ib_route()
2139 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); cma_query_ib_route()
2145 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, cma_query_ib_route()
2146 id_priv->id.port_num, &path_rec, cma_query_ib_route()
2149 work, &id_priv->query); cma_query_ib_route()
2151 return (id_priv->query_id < 0) ? id_priv->query_id : 0; cma_query_ib_route()
2157 struct rdma_id_private *id_priv = work->id; cma_work_handler() local
2160 mutex_lock(&id_priv->handler_mutex); cma_work_handler()
2161 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) cma_work_handler()
2164 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_work_handler()
2165 cma_exch(id_priv, RDMA_CM_DESTROYING); cma_work_handler()
2169 mutex_unlock(&id_priv->handler_mutex); cma_work_handler()
2170 cma_deref_id(id_priv); cma_work_handler()
2172 rdma_destroy_id(&id_priv->id); cma_work_handler()
2179 struct rdma_id_private *id_priv = work->id; cma_ndev_work_handler() local
2182 mutex_lock(&id_priv->handler_mutex); cma_ndev_work_handler()
2183 if (id_priv->state == RDMA_CM_DESTROYING || cma_ndev_work_handler()
2184 id_priv->state == RDMA_CM_DEVICE_REMOVAL) cma_ndev_work_handler()
2187 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { cma_ndev_work_handler()
2188 cma_exch(id_priv, RDMA_CM_DESTROYING); cma_ndev_work_handler()
2193 mutex_unlock(&id_priv->handler_mutex); cma_ndev_work_handler()
2194 cma_deref_id(id_priv); cma_ndev_work_handler()
2196 rdma_destroy_id(&id_priv->id); cma_ndev_work_handler()
2200 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) cma_resolve_ib_route() argument
2202 struct rdma_route *route = &id_priv->id.route; cma_resolve_ib_route()
2210 work->id = id_priv; cma_resolve_ib_route()
2222 ret = cma_query_ib_route(id_priv, timeout_ms, work); cma_resolve_ib_route()
2238 struct rdma_id_private *id_priv; rdma_set_ib_paths() local
2241 id_priv = container_of(id, struct rdma_id_private, id); rdma_set_ib_paths()
2242 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, rdma_set_ib_paths()
2256 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); rdma_set_ib_paths()
2261 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) cma_resolve_iw_route() argument
2269 work->id = id_priv; cma_resolve_iw_route()
2298 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) cma_resolve_iboe_route() argument
2300 struct rdma_route *route = &id_priv->id.route; cma_resolve_iboe_route()
2311 work->id = id_priv; cma_resolve_iboe_route()
2334 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, cma_resolve_iboe_route()
2336 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, cma_resolve_iboe_route()
2343 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos); cma_resolve_iboe_route()
2374 struct rdma_id_private *id_priv; rdma_resolve_route() local
2377 id_priv = container_of(id, struct rdma_id_private, id); rdma_resolve_route()
2378 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) rdma_resolve_route()
2381 atomic_inc(&id_priv->refcount); rdma_resolve_route()
2383 ret = cma_resolve_ib_route(id_priv, timeout_ms); rdma_resolve_route()
2385 ret = cma_resolve_iboe_route(id_priv); rdma_resolve_route()
2387 ret = cma_resolve_iw_route(id_priv, timeout_ms); rdma_resolve_route()
2396 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); rdma_resolve_route()
2397 cma_deref_id(id_priv); rdma_resolve_route()
2419 static int cma_bind_loopback(struct rdma_id_private *id_priv) cma_bind_loopback() argument
2431 if (cma_family(id_priv) == AF_IB && cma_bind_loopback()
2463 id_priv->id.route.addr.dev_addr.dev_type = cma_bind_loopback()
2467 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); cma_bind_loopback()
2468 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); cma_bind_loopback()
2469 id_priv->id.port_num = p; cma_bind_loopback()
2470 cma_attach_to_dev(id_priv, cma_dev); cma_bind_loopback()
2471 cma_set_loopback(cma_src_addr(id_priv)); cma_bind_loopback()
2480 struct rdma_id_private *id_priv = context; addr_handler() local
2484 mutex_lock(&id_priv->handler_mutex); addr_handler()
2485 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, addr_handler()
2489 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); addr_handler()
2490 if (!status && !id_priv->cma_dev) addr_handler()
2491 status = cma_acquire_dev(id_priv, NULL); addr_handler()
2494 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, addr_handler()
2502 if (id_priv->id.event_handler(&id_priv->id, &event)) { addr_handler()
2503 cma_exch(id_priv, RDMA_CM_DESTROYING); addr_handler()
2504 mutex_unlock(&id_priv->handler_mutex); addr_handler()
2505 cma_deref_id(id_priv); addr_handler()
2506 rdma_destroy_id(&id_priv->id); addr_handler()
2510 mutex_unlock(&id_priv->handler_mutex); addr_handler()
2511 cma_deref_id(id_priv); addr_handler()
2514 static int cma_resolve_loopback(struct rdma_id_private *id_priv) cma_resolve_loopback() argument
2524 if (!id_priv->cma_dev) { cma_resolve_loopback()
2525 ret = cma_bind_loopback(id_priv); cma_resolve_loopback()
2530 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); cma_resolve_loopback()
2531 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); cma_resolve_loopback()
2533 work->id = id_priv; cma_resolve_loopback()
2545 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) cma_resolve_ib_addr() argument
2554 if (!id_priv->cma_dev) { cma_resolve_ib_addr()
2555 ret = cma_resolve_ib_dev(id_priv); cma_resolve_ib_addr()
2560 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) cma_resolve_ib_addr()
2561 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); cma_resolve_ib_addr()
2563 work->id = id_priv; cma_resolve_ib_addr()
2598 struct rdma_id_private *id_priv; rdma_resolve_addr() local
2601 id_priv = container_of(id, struct rdma_id_private, id); rdma_resolve_addr()
2602 if (id_priv->state == RDMA_CM_IDLE) { rdma_resolve_addr()
2608 if (cma_family(id_priv) != dst_addr->sa_family) rdma_resolve_addr()
2611 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) rdma_resolve_addr()
2614 atomic_inc(&id_priv->refcount); rdma_resolve_addr()
2615 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); rdma_resolve_addr()
2617 ret = cma_resolve_loopback(id_priv); rdma_resolve_addr()
2620 ret = cma_resolve_ib_addr(id_priv); rdma_resolve_addr()
2622 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv), rdma_resolve_addr()
2624 timeout_ms, addr_handler, id_priv); rdma_resolve_addr()
2632 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); rdma_resolve_addr()
2633 cma_deref_id(id_priv); rdma_resolve_addr()
2640 struct rdma_id_private *id_priv; rdma_set_reuseaddr() local
2644 id_priv = container_of(id, struct rdma_id_private, id); rdma_set_reuseaddr()
2645 spin_lock_irqsave(&id_priv->lock, flags); rdma_set_reuseaddr()
2646 if (reuse || id_priv->state == RDMA_CM_IDLE) { rdma_set_reuseaddr()
2647 id_priv->reuseaddr = reuse; rdma_set_reuseaddr()
2652 spin_unlock_irqrestore(&id_priv->lock, flags); rdma_set_reuseaddr()
2659 struct rdma_id_private *id_priv; rdma_set_afonly() local
2663 id_priv = container_of(id, struct rdma_id_private, id); rdma_set_afonly()
2664 spin_lock_irqsave(&id_priv->lock, flags); rdma_set_afonly()
2665 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { rdma_set_afonly()
2666 id_priv->options |= (1 << CMA_OPTION_AFONLY); rdma_set_afonly()
2667 id_priv->afonly = afonly; rdma_set_afonly()
2672 spin_unlock_irqrestore(&id_priv->lock, flags); rdma_set_afonly()
2678 struct rdma_id_private *id_priv) cma_bind_port()
2685 addr = cma_src_addr(id_priv); cma_bind_port()
2703 id_priv->bind_list = bind_list; cma_bind_port()
2704 hlist_add_head(&id_priv->node, &bind_list->owners); cma_bind_port()
2708 struct rdma_id_private *id_priv, unsigned short snum) cma_alloc_port()
2717 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, cma_alloc_port()
2724 cma_bind_port(bind_list, id_priv); cma_alloc_port()
2732 struct rdma_id_private *id_priv) cma_alloc_any_port()
2737 struct net *net = id_priv->id.route.addr.dev_addr.net; cma_alloc_any_port()
2745 int ret = cma_alloc_port(ps, id_priv, rover); cma_alloc_any_port()
2767 * the latter case, the provided id_priv may already be on the bind_list, but
2771 struct rdma_id_private *id_priv, uint8_t reuseaddr) cma_check_port()
2776 addr = cma_src_addr(id_priv); cma_check_port()
2778 if (id_priv == cur_id) cma_check_port()
2786 if (id_priv->afonly && cur_id->afonly && cma_check_port()
2800 struct rdma_id_private *id_priv) cma_use_port()
2806 snum = ntohs(cma_port(cma_src_addr(id_priv))); cma_use_port()
2810 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); cma_use_port()
2812 ret = cma_alloc_port(ps, id_priv, snum); cma_use_port()
2814 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); cma_use_port()
2816 cma_bind_port(bind_list, id_priv); cma_use_port()
2821 static int cma_bind_listen(struct rdma_id_private *id_priv) cma_bind_listen() argument
2823 struct rdma_bind_list *bind_list = id_priv->bind_list; cma_bind_listen()
2828 ret = cma_check_port(bind_list, id_priv, 0); cma_bind_listen()
2834 struct rdma_id_private *id_priv) cma_select_inet_ps()
2836 switch (id_priv->id.ps) { cma_select_inet_ps()
2841 return id_priv->id.ps; cma_select_inet_ps()
2848 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv) cma_select_ib_ps() argument
2854 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); cma_select_ib_ps()
2858 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { cma_select_ib_ps()
2861 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && cma_select_ib_ps()
2865 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && cma_select_ib_ps()
2879 static int cma_get_port(struct rdma_id_private *id_priv) cma_get_port() argument
2884 if (cma_family(id_priv) != AF_IB) cma_get_port()
2885 ps = cma_select_inet_ps(id_priv); cma_get_port()
2887 ps = cma_select_ib_ps(id_priv); cma_get_port()
2892 if (cma_any_port(cma_src_addr(id_priv))) cma_get_port()
2893 ret = cma_alloc_any_port(ps, id_priv); cma_get_port()
2895 ret = cma_use_port(ps, id_priv); cma_get_port()
2925 struct rdma_id_private *id_priv; rdma_listen() local
2928 id_priv = container_of(id, struct rdma_id_private, id); rdma_listen()
2929 if (id_priv->state == RDMA_CM_IDLE) { rdma_listen()
2931 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); rdma_listen()
2936 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) rdma_listen()
2939 if (id_priv->reuseaddr) { rdma_listen()
2940 ret = cma_bind_listen(id_priv); rdma_listen()
2945 id_priv->backlog = backlog; rdma_listen()
2948 ret = cma_ib_listen(id_priv); rdma_listen()
2952 ret = cma_iw_listen(id_priv, backlog); rdma_listen()
2960 cma_listen_on_all(id_priv); rdma_listen()
2964 id_priv->backlog = 0; rdma_listen()
2965 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); rdma_listen()
2972 struct rdma_id_private *id_priv; rdma_bind_addr() local
2979 id_priv = container_of(id, struct rdma_id_private, id); rdma_bind_addr()
2980 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) rdma_bind_addr()
2987 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); rdma_bind_addr()
2993 ret = cma_acquire_dev(id_priv, NULL); rdma_bind_addr()
2998 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { rdma_bind_addr()
3000 id_priv->afonly = 1; rdma_bind_addr()
3003 struct net *net = id_priv->id.route.addr.dev_addr.net; rdma_bind_addr()
3005 id_priv->afonly = net->ipv6.sysctl.bindv6only; rdma_bind_addr()
3009 ret = cma_get_port(id_priv); rdma_bind_addr()
3015 if (id_priv->cma_dev) rdma_bind_addr()
3016 cma_release_dev(id_priv); rdma_bind_addr()
3018 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); rdma_bind_addr()
3023 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) cma_format_hdr() argument
3029 if (cma_family(id_priv) == AF_INET) { cma_format_hdr()
3032 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); cma_format_hdr()
3033 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); cma_format_hdr()
3039 } else if (cma_family(id_priv) == AF_INET6) { cma_format_hdr()
3042 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); cma_format_hdr()
3043 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); cma_format_hdr()
3056 struct rdma_id_private *id_priv = cm_id->context; cma_sidr_rep_handler() local
3061 if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) cma_sidr_rep_handler()
3078 ret = cma_set_qkey(id_priv, rep->qkey); cma_sidr_rep_handler()
3084 ib_init_ah_from_path(id_priv->id.device, id_priv->id.port_num, cma_sidr_rep_handler()
3085 id_priv->id.route.path_rec, cma_sidr_rep_handler()
3098 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_sidr_rep_handler()
3101 id_priv->cm_id.ib = NULL; cma_sidr_rep_handler()
3102 cma_exch(id_priv, RDMA_CM_DESTROYING); cma_sidr_rep_handler()
3103 mutex_unlock(&id_priv->handler_mutex); cma_sidr_rep_handler()
3104 rdma_destroy_id(&id_priv->id); cma_sidr_rep_handler()
3108 mutex_unlock(&id_priv->handler_mutex); cma_sidr_rep_handler()
3112 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, cma_resolve_ib_udp() argument
3121 offset = cma_user_data_offset(id_priv); cma_resolve_ib_udp()
3139 ret = cma_format_hdr(private_data, id_priv); cma_resolve_ib_udp()
3145 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, cma_resolve_ib_udp()
3146 id_priv); cma_resolve_ib_udp()
3151 id_priv->cm_id.ib = id; cma_resolve_ib_udp()
3153 req.path = id_priv->id.route.path_rec; cma_resolve_ib_udp()
3154 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); cma_resolve_ib_udp()
3158 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); cma_resolve_ib_udp()
3160 ib_destroy_cm_id(id_priv->cm_id.ib); cma_resolve_ib_udp()
3161 id_priv->cm_id.ib = NULL; cma_resolve_ib_udp()
3168 static int cma_connect_ib(struct rdma_id_private *id_priv, cma_connect_ib() argument
3178 offset = cma_user_data_offset(id_priv); cma_connect_ib()
3195 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); cma_connect_ib()
3200 id_priv->cm_id.ib = id; cma_connect_ib()
3202 route = &id_priv->id.route; cma_connect_ib()
3204 ret = cma_format_hdr(private_data, id_priv); cma_connect_ib()
3214 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); cma_connect_ib()
3215 req.qp_num = id_priv->qp_num; cma_connect_ib()
3216 req.qp_type = id_priv->id.qp_type; cma_connect_ib()
3217 req.starting_psn = id_priv->seq_num; cma_connect_ib()
3226 req.srq = id_priv->srq ? 1 : 0; cma_connect_ib()
3228 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); cma_connect_ib()
3232 id_priv->cm_id.ib = NULL; cma_connect_ib()
3239 static int cma_connect_iw(struct rdma_id_private *id_priv, cma_connect_iw() argument
3246 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); cma_connect_iw()
3250 cm_id->tos = id_priv->tos; cma_connect_iw()
3251 id_priv->cm_id.iw = cm_id; cma_connect_iw()
3253 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), cma_connect_iw()
3254 rdma_addr_size(cma_src_addr(id_priv))); cma_connect_iw()
3255 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), cma_connect_iw()
3256 rdma_addr_size(cma_dst_addr(id_priv))); cma_connect_iw()
3258 ret = cma_modify_qp_rtr(id_priv, conn_param); cma_connect_iw()
3267 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; cma_connect_iw()
3270 iw_param.qpn = id_priv->qp_num; cma_connect_iw()
3276 id_priv->cm_id.iw = NULL; cma_connect_iw()
3283 struct rdma_id_private *id_priv; rdma_connect() local
3286 id_priv = container_of(id, struct rdma_id_private, id); rdma_connect()
3287 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) rdma_connect()
3291 id_priv->qp_num = conn_param->qp_num; rdma_connect()
3292 id_priv->srq = conn_param->srq; rdma_connect()
3297 ret = cma_resolve_ib_udp(id_priv, conn_param); rdma_connect()
3299 ret = cma_connect_ib(id_priv, conn_param); rdma_connect()
3301 ret = cma_connect_iw(id_priv, conn_param); rdma_connect()
3309 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); rdma_connect()
3314 static int cma_accept_ib(struct rdma_id_private *id_priv, cma_accept_ib() argument
3320 ret = cma_modify_qp_rtr(id_priv, conn_param); cma_accept_ib()
3324 ret = cma_modify_qp_rts(id_priv, conn_param); cma_accept_ib()
3329 rep.qp_num = id_priv->qp_num; cma_accept_ib()
3330 rep.starting_psn = id_priv->seq_num; cma_accept_ib()
3338 rep.srq = id_priv->srq ? 1 : 0; cma_accept_ib()
3340 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); cma_accept_ib()
3345 static int cma_accept_iw(struct rdma_id_private *id_priv, cma_accept_iw() argument
3351 ret = cma_modify_qp_rtr(id_priv, conn_param); cma_accept_iw()
3359 if (id_priv->id.qp) { cma_accept_iw()
3360 iw_param.qpn = id_priv->qp_num; cma_accept_iw()
3364 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); cma_accept_iw()
3367 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, cma_send_sidr_rep() argument
3377 ret = cma_set_qkey(id_priv, qkey); cma_send_sidr_rep()
3380 rep.qp_num = id_priv->qp_num; cma_send_sidr_rep()
3381 rep.qkey = id_priv->qkey; cma_send_sidr_rep()
3386 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); cma_send_sidr_rep()
3391 struct rdma_id_private *id_priv; rdma_accept() local
3394 id_priv = container_of(id, struct rdma_id_private, id); rdma_accept()
3396 id_priv->owner = task_pid_nr(current); rdma_accept()
3398 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) rdma_accept()
3402 id_priv->qp_num = conn_param->qp_num; rdma_accept()
3403 id_priv->srq = conn_param->srq; rdma_accept()
3409 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, rdma_accept()
3414 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, rdma_accept()
3418 ret = cma_accept_ib(id_priv, conn_param); rdma_accept()
3420 ret = cma_rep_recv(id_priv); rdma_accept()
3423 ret = cma_accept_iw(id_priv, conn_param); rdma_accept()
3432 cma_modify_qp_err(id_priv); rdma_accept()
3440 struct rdma_id_private *id_priv; rdma_notify() local
3443 id_priv = container_of(id, struct rdma_id_private, id); rdma_notify()
3444 if (!id_priv->cm_id.ib) rdma_notify()
3449 ret = ib_cm_notify(id_priv->cm_id.ib, event); rdma_notify()
3462 struct rdma_id_private *id_priv; rdma_reject() local
3465 id_priv = container_of(id, struct rdma_id_private, id); rdma_reject()
3466 if (!id_priv->cm_id.ib) rdma_reject()
3471 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, rdma_reject()
3474 ret = ib_send_cm_rej(id_priv->cm_id.ib, rdma_reject()
3478 ret = iw_cm_reject(id_priv->cm_id.iw, rdma_reject()
3489 struct rdma_id_private *id_priv; rdma_disconnect() local
3492 id_priv = container_of(id, struct rdma_id_private, id); rdma_disconnect()
3493 if (!id_priv->cm_id.ib) rdma_disconnect()
3497 ret = cma_modify_qp_err(id_priv); rdma_disconnect()
3501 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) rdma_disconnect()
3502 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); rdma_disconnect()
3504 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); rdma_disconnect()
3515 struct rdma_id_private *id_priv; cma_ib_mc_handler() local
3520 id_priv = mc->id_priv; cma_ib_mc_handler()
3521 if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && cma_ib_mc_handler()
3522 cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) cma_ib_mc_handler()
3526 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); cma_ib_mc_handler()
3527 mutex_lock(&id_priv->qp_mutex); cma_ib_mc_handler()
3528 if (!status && id_priv->id.qp) cma_ib_mc_handler()
3529 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, cma_ib_mc_handler()
3531 mutex_unlock(&id_priv->qp_mutex); cma_ib_mc_handler()
3538 ib_init_ah_from_mcmember(id_priv->id.device, cma_ib_mc_handler()
3539 id_priv->id.port_num, &multicast->rec, cma_ib_mc_handler()
3546 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_ib_mc_handler()
3548 cma_exch(id_priv, RDMA_CM_DESTROYING); cma_ib_mc_handler()
3549 mutex_unlock(&id_priv->handler_mutex); cma_ib_mc_handler()
3550 rdma_destroy_id(&id_priv->id); cma_ib_mc_handler()
3554 mutex_unlock(&id_priv->handler_mutex); cma_ib_mc_handler()
3558 static void cma_set_mgid(struct rdma_id_private *id_priv, cma_set_mgid() argument
3562 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; cma_set_mgid()
3577 if (id_priv->id.ps == RDMA_PS_UDP) cma_set_mgid()
3582 if (id_priv->id.ps == RDMA_PS_UDP) cma_set_mgid()
3588 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, cma_join_ib_multicast() argument
3592 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; cma_join_ib_multicast()
3597 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, cma_join_ib_multicast()
3602 ret = cma_set_qkey(id_priv, 0); cma_join_ib_multicast()
3606 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); cma_join_ib_multicast()
3607 rec.qkey = cpu_to_be32(id_priv->qkey); cma_join_ib_multicast()
3618 if (id_priv->id.ps == RDMA_PS_IPOIB) cma_join_ib_multicast()
3625 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, cma_join_ib_multicast()
3626 id_priv->id.port_num, &rec, cma_join_ib_multicast()
3670 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, cma_iboe_join_multicast() argument
3674 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; cma_iboe_join_multicast()
3695 if (id_priv->id.ps == RDMA_PS_UDP) cma_iboe_join_multicast()
3712 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, cma_iboe_join_multicast()
3714 work->id = id_priv; cma_iboe_join_multicast()
3732 struct rdma_id_private *id_priv; rdma_join_multicast() local
3736 id_priv = container_of(id, struct rdma_id_private, id); rdma_join_multicast()
3737 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && rdma_join_multicast()
3738 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) rdma_join_multicast()
3747 mc->id_priv = id_priv; rdma_join_multicast()
3749 spin_lock(&id_priv->lock); rdma_join_multicast()
3750 list_add(&mc->list, &id_priv->mc_list); rdma_join_multicast()
3751 spin_unlock(&id_priv->lock); rdma_join_multicast()
3755 ret = cma_iboe_join_multicast(id_priv, mc); rdma_join_multicast()
3757 ret = cma_join_ib_multicast(id_priv, mc); rdma_join_multicast()
3762 spin_lock_irq(&id_priv->lock); rdma_join_multicast()
3764 spin_unlock_irq(&id_priv->lock); rdma_join_multicast()
3773 struct rdma_id_private *id_priv; rdma_leave_multicast() local
3776 id_priv = container_of(id, struct rdma_id_private, id); rdma_leave_multicast()
3777 spin_lock_irq(&id_priv->lock); rdma_leave_multicast()
3778 list_for_each_entry(mc, &id_priv->mc_list, list) { rdma_leave_multicast()
3781 spin_unlock_irq(&id_priv->lock); rdma_leave_multicast()
3788 BUG_ON(id_priv->cma_dev->device != id->device); rdma_leave_multicast()
3799 spin_unlock_irq(&id_priv->lock); rdma_leave_multicast()
3803 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) cma_netdev_change() argument
3808 dev_addr = &id_priv->id.route.addr.dev_addr; cma_netdev_change()
3814 ndev->name, &id_priv->id); cma_netdev_change()
3820 work->id = id_priv; cma_netdev_change()
3822 atomic_inc(&id_priv->refcount); cma_netdev_change()
3834 struct rdma_id_private *id_priv; cma_netdev_callback() local
3845 list_for_each_entry(id_priv, &cma_dev->id_list, list) { cma_netdev_callback()
3846 ret = cma_netdev_change(ndev, id_priv); cma_netdev_callback()
3863 struct rdma_id_private *id_priv; cma_add_one() local
3878 list_for_each_entry(id_priv, &listen_any_list, list) cma_add_one()
3879 cma_listen_on_dev(id_priv, cma_dev); cma_add_one()
3883 static int cma_remove_id_dev(struct rdma_id_private *id_priv) cma_remove_id_dev() argument
3890 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); cma_remove_id_dev()
3894 cma_cancel_operation(id_priv, state); cma_remove_id_dev()
3895 mutex_lock(&id_priv->handler_mutex); cma_remove_id_dev()
3898 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) cma_remove_id_dev()
3903 ret = id_priv->id.event_handler(&id_priv->id, &event); cma_remove_id_dev()
3905 mutex_unlock(&id_priv->handler_mutex); cma_remove_id_dev()
3911 struct rdma_id_private *id_priv; cma_process_remove() local
3916 id_priv = list_entry(cma_dev->id_list.next, cma_process_remove()
3919 list_del(&id_priv->listen_list); cma_process_remove()
3920 list_del_init(&id_priv->list); cma_process_remove()
3921 atomic_inc(&id_priv->refcount); cma_process_remove()
3924 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); cma_process_remove()
3925 cma_deref_id(id_priv); cma_process_remove()
3927 rdma_destroy_id(&id_priv->id); cma_process_remove()
3956 struct rdma_id_private *id_priv; cma_get_id_stats() local
3974 list_for_each_entry(id_priv, &cma_dev->id_list, list) { cma_get_id_stats()
3988 id = &id_priv->id; cma_get_id_stats()
3995 rdma_addr_size(cma_src_addr(id_priv)), cma_get_id_stats()
3996 cma_src_addr(id_priv), cma_get_id_stats()
4000 rdma_addr_size(cma_src_addr(id_priv)), cma_get_id_stats()
4001 cma_dst_addr(id_priv), cma_get_id_stats()
4005 id_stats->pid = id_priv->owner; cma_get_id_stats()
4007 id_stats->cm_state = id_priv->state; cma_get_id_stats()
4008 id_stats->qp_num = id_priv->qp_num; cma_get_id_stats()
2677 cma_bind_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv) cma_bind_port() argument
2707 cma_alloc_port(enum rdma_port_space ps, struct rdma_id_private *id_priv, unsigned short snum) cma_alloc_port() argument
2731 cma_alloc_any_port(enum rdma_port_space ps, struct rdma_id_private *id_priv) cma_alloc_any_port() argument
2770 cma_check_port(struct rdma_bind_list *bind_list, struct rdma_id_private *id_priv, uint8_t reuseaddr) cma_check_port() argument
2799 cma_use_port(enum rdma_port_space ps, struct rdma_id_private *id_priv) cma_use_port() argument
2833 cma_select_inet_ps( struct rdma_id_private *id_priv) cma_select_inet_ps() argument

Completed in 106 milliseconds