Lines Matching refs:cma_dev

119 	struct cma_device	*cma_dev;  member
248 struct cma_device *cma_dev) in cma_attach_to_dev() argument
250 atomic_inc(&cma_dev->refcount); in cma_attach_to_dev()
251 id_priv->cma_dev = cma_dev; in cma_attach_to_dev()
252 id_priv->id.device = cma_dev->device; in cma_attach_to_dev()
254 rdma_node_get_transport(cma_dev->device->node_type); in cma_attach_to_dev()
255 list_add_tail(&id_priv->list, &cma_dev->id_list); in cma_attach_to_dev()
258 static inline void cma_deref_dev(struct cma_device *cma_dev) in cma_deref_dev() argument
260 if (atomic_dec_and_test(&cma_dev->refcount)) in cma_deref_dev()
261 complete(&cma_dev->comp); in cma_deref_dev()
276 cma_deref_dev(id_priv->cma_dev); in cma_release_dev()
277 id_priv->cma_dev = NULL; in cma_release_dev()
356 struct cma_device *cma_dev; in cma_acquire_dev() local
376 cma_dev = listen_id_priv->cma_dev; in cma_acquire_dev()
378 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && in cma_acquire_dev()
379 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) in cma_acquire_dev()
380 ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, in cma_acquire_dev()
383 ret = ib_find_cached_gid(cma_dev->device, &gid, in cma_acquire_dev()
391 list_for_each_entry(cma_dev, &dev_list, list) { in cma_acquire_dev()
392 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { in cma_acquire_dev()
394 listen_id_priv->cma_dev == cma_dev && in cma_acquire_dev()
397 if (rdma_port_get_link_layer(cma_dev->device, port) == dev_ll) { in cma_acquire_dev()
398 if (rdma_node_get_transport(cma_dev->device->node_type) == RDMA_TRANSPORT_IB && in cma_acquire_dev()
399 rdma_port_get_link_layer(cma_dev->device, port) == IB_LINK_LAYER_ETHERNET) in cma_acquire_dev()
400 ret = ib_find_cached_gid(cma_dev->device, &iboe_gid, &found_port, NULL); in cma_acquire_dev()
402 ret = ib_find_cached_gid(cma_dev->device, &gid, &found_port, NULL); in cma_acquire_dev()
414 cma_attach_to_dev(id_priv, cma_dev); in cma_acquire_dev()
425 struct cma_device *cma_dev, *cur_dev; in cma_resolve_ib_dev() local
432 cma_dev = NULL; in cma_resolve_ib_dev()
447 cma_dev = cur_dev; in cma_resolve_ib_dev()
453 if (!cma_dev && (gid.global.subnet_prefix == in cma_resolve_ib_dev()
455 cma_dev = cur_dev; in cma_resolve_ib_dev()
463 if (!cma_dev) in cma_resolve_ib_dev()
467 cma_attach_to_dev(id_priv, cma_dev); in cma_resolve_ib_dev()
636 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) in cma_modify_qp_rtr()
994 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
1026 switch (rdma_port_get_link_layer(id_priv->cma_dev->device, id_priv->id.port_num)) { in cma_leave_mc_groups()
1056 if (id_priv->cma_dev) { in rdma_destroy_id()
1639 struct cma_device *cma_dev) in cma_listen_on_dev() argument
1646 rdma_node_get_transport(cma_dev->device->node_type) != RDMA_TRANSPORT_IB) in cma_listen_on_dev()
1660 cma_attach_to_dev(dev_id_priv, cma_dev); in cma_listen_on_dev()
1669 "listening on device %s\n", ret, cma_dev->device->name); in cma_listen_on_dev()
1674 struct cma_device *cma_dev; in cma_listen_on_all() local
1678 list_for_each_entry(cma_dev, &dev_list, list) in cma_listen_on_all()
1679 cma_listen_on_dev(id_priv, cma_dev); in cma_listen_on_all()
2037 struct cma_device *cma_dev, *cur_dev; in cma_bind_loopback() local
2044 cma_dev = NULL; in cma_bind_loopback()
2051 if (!cma_dev) in cma_bind_loopback()
2052 cma_dev = cur_dev; in cma_bind_loopback()
2057 cma_dev = cur_dev; in cma_bind_loopback()
2063 if (!cma_dev) { in cma_bind_loopback()
2071 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid); in cma_bind_loopback()
2075 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); in cma_bind_loopback()
2080 (rdma_port_get_link_layer(cma_dev->device, p) == IB_LINK_LAYER_INFINIBAND) ? in cma_bind_loopback()
2086 cma_attach_to_dev(id_priv, cma_dev); in cma_bind_loopback()
2106 if (!status && !id_priv->cma_dev) in addr_handler()
2140 if (!id_priv->cma_dev) { in cma_resolve_loopback()
2170 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
2625 if (id_priv->cma_dev) in rdma_bind_addr()
3425 if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) { in rdma_leave_multicast()
3474 struct cma_device *cma_dev; in cma_netdev_callback() local
3488 list_for_each_entry(cma_dev, &dev_list, list) in cma_netdev_callback()
3489 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_netdev_callback()
3506 struct cma_device *cma_dev; in cma_add_one() local
3509 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL); in cma_add_one()
3510 if (!cma_dev) in cma_add_one()
3513 cma_dev->device = device; in cma_add_one()
3515 init_completion(&cma_dev->comp); in cma_add_one()
3516 atomic_set(&cma_dev->refcount, 1); in cma_add_one()
3517 INIT_LIST_HEAD(&cma_dev->id_list); in cma_add_one()
3518 ib_set_client_data(device, &cma_client, cma_dev); in cma_add_one()
3521 list_add_tail(&cma_dev->list, &dev_list); in cma_add_one()
3523 cma_listen_on_dev(id_priv, cma_dev); in cma_add_one()
3553 static void cma_process_remove(struct cma_device *cma_dev) in cma_process_remove() argument
3559 while (!list_empty(&cma_dev->id_list)) { in cma_process_remove()
3560 id_priv = list_entry(cma_dev->id_list.next, in cma_process_remove()
3577 cma_deref_dev(cma_dev); in cma_process_remove()
3578 wait_for_completion(&cma_dev->comp); in cma_process_remove()
3583 struct cma_device *cma_dev; in cma_remove_one() local
3585 cma_dev = ib_get_client_data(device, &cma_client); in cma_remove_one()
3586 if (!cma_dev) in cma_remove_one()
3590 list_del(&cma_dev->list); in cma_remove_one()
3593 cma_process_remove(cma_dev); in cma_remove_one()
3594 kfree(cma_dev); in cma_remove_one()
3603 struct cma_device *cma_dev; in cma_get_id_stats() local
3612 list_for_each_entry(cma_dev, &dev_list, list) { in cma_get_id_stats()
3619 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_get_id_stats()