ib_dev 124 drivers/infiniband/core/cache.c static void dispatch_gid_change_event(struct ib_device *ib_dev, u8 port) ib_dev 128 drivers/infiniband/core/cache.c event.device = ib_dev; ib_dev 378 drivers/infiniband/core/cache.c static void del_gid(struct ib_device *ib_dev, u8 port, ib_dev 386 drivers/infiniband/core/cache.c dev_dbg(&ib_dev->dev, "%s port=%d index=%d gid %pI6\n", __func__, port, ib_dev 395 drivers/infiniband/core/cache.c if (!rdma_protocol_roce(ib_dev, port)) ib_dev 406 drivers/infiniband/core/cache.c if (rdma_cap_roce_gid_table(ib_dev, port)) ib_dev 407 drivers/infiniband/core/cache.c ib_dev->ops.del_gid(&entry->attr, &entry->context); ib_dev 542 drivers/infiniband/core/cache.c static int __ib_cache_gid_add(struct ib_device *ib_dev, u8 port, ib_dev 558 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); ib_dev 570 drivers/infiniband/core/cache.c attr->device = ib_dev; ib_dev 576 drivers/infiniband/core/cache.c dispatch_gid_change_event(ib_dev, port); ib_dev 586 drivers/infiniband/core/cache.c int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, ib_dev 593 drivers/infiniband/core/cache.c return __ib_cache_gid_add(ib_dev, port, gid, attr, mask, false); ib_dev 597 drivers/infiniband/core/cache.c _ib_cache_gid_del(struct ib_device *ib_dev, u8 port, ib_dev 605 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); ib_dev 615 drivers/infiniband/core/cache.c del_gid(ib_dev, port, table, ix); ib_dev 616 drivers/infiniband/core/cache.c dispatch_gid_change_event(ib_dev, port); ib_dev 626 drivers/infiniband/core/cache.c int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, ib_dev 634 drivers/infiniband/core/cache.c return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false); ib_dev 637 drivers/infiniband/core/cache.c int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, ib_dev 644 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); ib_dev 651 drivers/infiniband/core/cache.c del_gid(ib_dev, port, table, ix); ib_dev 659 drivers/infiniband/core/cache.c dispatch_gid_change_event(ib_dev, port); ib_dev 680 drivers/infiniband/core/cache.c rdma_find_gid_by_port(struct ib_device *ib_dev, ib_dev 693 drivers/infiniband/core/cache.c if (!rdma_is_port_valid(ib_dev, port)) ib_dev 696 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); ib_dev 733 drivers/infiniband/core/cache.c struct ib_device *ib_dev, const union ib_gid *gid, u8 port, ib_dev 743 drivers/infiniband/core/cache.c if (!rdma_is_port_valid(ib_dev, port)) ib_dev 746 drivers/infiniband/core/cache.c table = rdma_gid_table(ib_dev, port); ib_dev 817 drivers/infiniband/core/cache.c static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port, ib_dev 829 drivers/infiniband/core/cache.c del_gid(ib_dev, port, table, i); ib_dev 836 drivers/infiniband/core/cache.c dispatch_gid_change_event(ib_dev, port); ib_dev 839 drivers/infiniband/core/cache.c void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, ib_dev 863 drivers/infiniband/core/cache.c __ib_cache_gid_add(ib_dev, port, &gid, ib_dev 866 drivers/infiniband/core/cache.c _ib_cache_gid_del(ib_dev, port, &gid, ib_dev 872 drivers/infiniband/core/cache.c static void gid_table_reserve_default(struct ib_device *ib_dev, u8 port, ib_dev 879 drivers/infiniband/core/cache.c roce_gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_dev 887 drivers/infiniband/core/cache.c static void gid_table_release_one(struct ib_device *ib_dev) ib_dev 891 drivers/infiniband/core/cache.c rdma_for_each_port (ib_dev, p) { ib_dev 892 drivers/infiniband/core/cache.c release_gid_table(ib_dev, ib_dev->port_data[p].cache.gid); ib_dev 893 drivers/infiniband/core/cache.c ib_dev->port_data[p].cache.gid = NULL; ib_dev 897 drivers/infiniband/core/cache.c static int _gid_table_setup_one(struct ib_device *ib_dev) ib_dev 902 drivers/infiniband/core/cache.c rdma_for_each_port (ib_dev, rdma_port) { ib_dev 904 drivers/infiniband/core/cache.c ib_dev->port_data[rdma_port].immutable.gid_tbl_len); ib_dev 908 drivers/infiniband/core/cache.c gid_table_reserve_default(ib_dev, rdma_port, table); ib_dev 909 drivers/infiniband/core/cache.c ib_dev->port_data[rdma_port].cache.gid = table; ib_dev 914 drivers/infiniband/core/cache.c gid_table_release_one(ib_dev); ib_dev 918 drivers/infiniband/core/cache.c static void gid_table_cleanup_one(struct ib_device *ib_dev) ib_dev 922 drivers/infiniband/core/cache.c rdma_for_each_port (ib_dev, p) ib_dev 923 drivers/infiniband/core/cache.c cleanup_gid_table_port(ib_dev, p, ib_dev 924 drivers/infiniband/core/cache.c ib_dev->port_data[p].cache.gid); ib_dev 927 drivers/infiniband/core/cache.c static int gid_table_setup_one(struct ib_device *ib_dev) ib_dev 931 drivers/infiniband/core/cache.c err = _gid_table_setup_one(ib_dev); ib_dev 936 drivers/infiniband/core/cache.c rdma_roce_rescan_device(ib_dev); ib_dev 1632 drivers/infiniband/core/cm.c struct ib_device *ib_dev = work->port->cm_dev->ib_device; ib_dev 1638 drivers/infiniband/core/cm.c ret = ib_get_cached_pkey(ib_dev, port_num, pkey_index, &pkey); ib_dev 1640 drivers/infiniband/core/cm.c dev_warn_ratelimited(&ib_dev->dev, "ib_cm: Couldn't retrieve pkey for incoming request (port %d, pkey index %d). %d\n", ib_dev 69 drivers/infiniband/core/cma_configfs.c static bool filter_by_name(struct ib_device *ib_dev, void *cookie) ib_dev 71 drivers/infiniband/core/cma_configfs.c return !strcmp(dev_name(&ib_dev->dev), cookie); ib_dev 91 drivers/infiniband/core/core_priv.h struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, ib_dev 94 drivers/infiniband/core/core_priv.h void ib_enum_roce_netdev(struct ib_device *ib_dev, ib_dev 130 drivers/infiniband/core/core_priv.h void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port, ib_dev 135 drivers/infiniband/core/core_priv.h int ib_cache_gid_add(struct ib_device *ib_dev, u8 port, ib_dev 138 drivers/infiniband/core/core_priv.h int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, ib_dev 141 drivers/infiniband/core/core_priv.h int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, ib_dev 147 drivers/infiniband/core/core_priv.h unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port); ib_dev 188 drivers/infiniband/core/device.c static void free_netdevs(struct ib_device *ib_dev); ib_dev 768 drivers/infiniband/core/device.c pdata->ib_dev = device; ib_dev 1425 drivers/infiniband/core/device.c static void __ib_unregister_device(struct ib_device *ib_dev) ib_dev 1434 drivers/infiniband/core/device.c mutex_lock(&ib_dev->unregistration_lock); ib_dev 1435 drivers/infiniband/core/device.c if (!refcount_read(&ib_dev->refcount)) ib_dev 1438 drivers/infiniband/core/device.c disable_device(ib_dev); ib_dev 1441 drivers/infiniband/core/device.c free_netdevs(ib_dev); ib_dev 1443 drivers/infiniband/core/device.c ib_device_unregister_sysfs(ib_dev); ib_dev 1444 drivers/infiniband/core/device.c device_del(&ib_dev->dev); ib_dev 1445 drivers/infiniband/core/device.c ib_device_unregister_rdmacg(ib_dev); ib_dev 1446 drivers/infiniband/core/device.c ib_cache_cleanup_one(ib_dev); ib_dev 1452 drivers/infiniband/core/device.c if (ib_dev->ops.dealloc_driver) { ib_dev 1453 drivers/infiniband/core/device.c WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1); ib_dev 1454 drivers/infiniband/core/device.c ib_dealloc_device(ib_dev); ib_dev 1457 drivers/infiniband/core/device.c mutex_unlock(&ib_dev->unregistration_lock); ib_dev 1474 drivers/infiniband/core/device.c void ib_unregister_device(struct ib_device *ib_dev) ib_dev 1476 drivers/infiniband/core/device.c get_device(&ib_dev->dev); ib_dev 1477 drivers/infiniband/core/device.c __ib_unregister_device(ib_dev); ib_dev 1478 drivers/infiniband/core/device.c put_device(&ib_dev->dev); ib_dev 1496 drivers/infiniband/core/device.c void ib_unregister_device_and_put(struct ib_device *ib_dev) ib_dev 1498 drivers/infiniband/core/device.c WARN_ON(!ib_dev->ops.dealloc_driver); ib_dev 1499 drivers/infiniband/core/device.c get_device(&ib_dev->dev); ib_dev 1500 drivers/infiniband/core/device.c ib_device_put(ib_dev); ib_dev 1501 drivers/infiniband/core/device.c __ib_unregister_device(ib_dev); ib_dev 1502 drivers/infiniband/core/device.c put_device(&ib_dev->dev); ib_dev 1522 drivers/infiniband/core/device.c struct ib_device *ib_dev; ib_dev 1526 drivers/infiniband/core/device.c xa_for_each (&devices, index, ib_dev) { ib_dev 1527 drivers/infiniband/core/device.c if (ib_dev->ops.driver_id != driver_id) ib_dev 1530 drivers/infiniband/core/device.c get_device(&ib_dev->dev); ib_dev 1533 drivers/infiniband/core/device.c WARN_ON(!ib_dev->ops.dealloc_driver); ib_dev 1534 drivers/infiniband/core/device.c __ib_unregister_device(ib_dev); ib_dev 1536 drivers/infiniband/core/device.c put_device(&ib_dev->dev); ib_dev 1545 drivers/infiniband/core/device.c struct ib_device *ib_dev = ib_dev 1548 drivers/infiniband/core/device.c __ib_unregister_device(ib_dev); ib_dev 1549 drivers/infiniband/core/device.c put_device(&ib_dev->dev); ib_dev 1563 drivers/infiniband/core/device.c void ib_unregister_device_queued(struct ib_device *ib_dev) ib_dev 1565 drivers/infiniband/core/device.c WARN_ON(!refcount_read(&ib_dev->refcount)); ib_dev 1566 drivers/infiniband/core/device.c WARN_ON(!ib_dev->ops.dealloc_driver); ib_dev 1567 drivers/infiniband/core/device.c get_device(&ib_dev->dev); ib_dev 1568 drivers/infiniband/core/device.c if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work)) ib_dev 1569 drivers/infiniband/core/device.c put_device(&ib_dev->dev); ib_dev 2107 drivers/infiniband/core/device.c int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, ib_dev 2119 drivers/infiniband/core/device.c ret = alloc_port_data(ib_dev); ib_dev 2123 drivers/infiniband/core/device.c if (!rdma_is_port_valid(ib_dev, port)) ib_dev 2126 drivers/infiniband/core/device.c pdata = &ib_dev->port_data[port]; ib_dev 2148 drivers/infiniband/core/device.c static void free_netdevs(struct ib_device *ib_dev) ib_dev 2153 drivers/infiniband/core/device.c if (!ib_dev->port_data) ib_dev 2156 drivers/infiniband/core/device.c rdma_for_each_port (ib_dev, port) { ib_dev 2157 drivers/infiniband/core/device.c struct ib_port_data *pdata = &ib_dev->port_data[port]; ib_dev 2181 drivers/infiniband/core/device.c struct net_device *ib_device_get_netdev(struct ib_device *ib_dev, ib_dev 2187 drivers/infiniband/core/device.c if (!rdma_is_port_valid(ib_dev, port)) ib_dev 2190 drivers/infiniband/core/device.c pdata = &ib_dev->port_data[port]; ib_dev 2196 drivers/infiniband/core/device.c if (ib_dev->ops.get_netdev) ib_dev 2197 drivers/infiniband/core/device.c res = ib_dev->ops.get_netdev(ib_dev, port); ib_dev 2239 drivers/infiniband/core/device.c cur->ib_dev->ops.driver_id == driver_id) && ib_dev 2240 drivers/infiniband/core/device.c ib_device_try_get(cur->ib_dev)) { ib_dev 2241 drivers/infiniband/core/device.c res = cur->ib_dev; ib_dev 2263 drivers/infiniband/core/device.c void ib_enum_roce_netdev(struct ib_device *ib_dev, ib_dev 2271 drivers/infiniband/core/device.c rdma_for_each_port (ib_dev, port) ib_dev 2272 drivers/infiniband/core/device.c if (rdma_protocol_roce(ib_dev, port)) { ib_dev 2274 drivers/infiniband/core/device.c ib_device_get_netdev(ib_dev, port); ib_dev 2276 drivers/infiniband/core/device.c if (filter(ib_dev, port, idev, filter_cookie)) ib_dev 2277 drivers/infiniband/core/device.c cb(ib_dev, port, idev, cookie); ib_dev 412 drivers/infiniband/core/rdma_core.c !srcu_dereference(ufile->device->ib_dev, ib_dev 807 drivers/infiniband/core/rdma_core.c struct ib_device *ib_dev = ucontext->device; ib_dev 816 drivers/infiniband/core/rdma_core.c if (ib_dev->ops.disassociate_ucontext) ib_dev 817 drivers/infiniband/core/rdma_core.c ib_dev->ops.disassociate_ucontext(ucontext); ib_dev 820 drivers/infiniband/core/rdma_core.c ib_rdmacg_uncharge(&ucontext->cg_obj, ib_dev, ib_dev 825 drivers/infiniband/core/rdma_core.c ib_dev->ops.dealloc_ucontext(ucontext); ib_dev 82 drivers/infiniband/core/roce_gid_mgmt.c unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port) ib_dev 87 drivers/infiniband/core/roce_gid_mgmt.c if (!rdma_protocol_roce(ib_dev, port)) ib_dev 91 drivers/infiniband/core/roce_gid_mgmt.c if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port)) ib_dev 98 drivers/infiniband/core/roce_gid_mgmt.c static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev, ib_dev 103 drivers/infiniband/core/roce_gid_mgmt.c unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_dev 110 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_add(ib_dev, port, ib_dev 114 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_del(ib_dev, port, ib_dev 147 drivers/infiniband/core/roce_gid_mgmt.c is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port, ib_dev 171 drivers/infiniband/core/roce_gid_mgmt.c is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port, ib_dev 200 drivers/infiniband/core/roce_gid_mgmt.c is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port, ib_dev 226 drivers/infiniband/core/roce_gid_mgmt.c static bool pass_all_filter(struct ib_device *ib_dev, u8 port, ib_dev 232 drivers/infiniband/core/roce_gid_mgmt.c static bool upper_device_filter(struct ib_device *ib_dev, u8 port, ib_dev 263 drivers/infiniband/core/roce_gid_mgmt.c is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port, ib_dev 282 drivers/infiniband/core/roce_gid_mgmt.c struct ib_device *ib_dev, ib_dev 293 drivers/infiniband/core/roce_gid_mgmt.c update_gid(gid_op, ib_dev, port, &gid, &gid_attr); ib_dev 296 drivers/infiniband/core/roce_gid_mgmt.c static void bond_delete_netdev_default_gids(struct ib_device *ib_dev, ib_dev 323 drivers/infiniband/core/roce_gid_mgmt.c gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_dev 325 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, ib_dev 330 drivers/infiniband/core/roce_gid_mgmt.c static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, ib_dev 367 drivers/infiniband/core/roce_gid_mgmt.c update_gid_ip(GID_ADD, ib_dev, port, ndev, ib_dev 374 drivers/infiniband/core/roce_gid_mgmt.c static void enum_netdev_ipv6_ips(struct ib_device *ib_dev, ib_dev 414 drivers/infiniband/core/roce_gid_mgmt.c update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr); ib_dev 420 drivers/infiniband/core/roce_gid_mgmt.c static void _add_netdev_ips(struct ib_device *ib_dev, u8 port, ib_dev 423 drivers/infiniband/core/roce_gid_mgmt.c enum_netdev_ipv4_ips(ib_dev, port, ndev); ib_dev 425 drivers/infiniband/core/roce_gid_mgmt.c enum_netdev_ipv6_ips(ib_dev, port, ndev); ib_dev 428 drivers/infiniband/core/roce_gid_mgmt.c static void add_netdev_ips(struct ib_device *ib_dev, u8 port, ib_dev 431 drivers/infiniband/core/roce_gid_mgmt.c _add_netdev_ips(ib_dev, port, cookie); ib_dev 434 drivers/infiniband/core/roce_gid_mgmt.c static void del_netdev_ips(struct ib_device *ib_dev, u8 port, ib_dev 437 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie); ib_dev 449 drivers/infiniband/core/roce_gid_mgmt.c static void del_default_gids(struct ib_device *ib_dev, u8 port, ib_dev 455 drivers/infiniband/core/roce_gid_mgmt.c gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_dev 457 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask, ib_dev 461 drivers/infiniband/core/roce_gid_mgmt.c static void add_default_gids(struct ib_device *ib_dev, u8 port, ib_dev 467 drivers/infiniband/core/roce_gid_mgmt.c gid_type_mask = roce_gid_type_mask_support(ib_dev, port); ib_dev 468 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask, ib_dev 472 drivers/infiniband/core/roce_gid_mgmt.c static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev, ib_dev 492 drivers/infiniband/core/roce_gid_mgmt.c if (is_ndev_for_default_gid_filter(ib_dev, port, ib_dev 494 drivers/infiniband/core/roce_gid_mgmt.c add_default_gids(ib_dev, port, rdma_ndev, ndev); ib_dev 496 drivers/infiniband/core/roce_gid_mgmt.c if (is_eth_port_of_netdev_filter(ib_dev, port, ib_dev 498 drivers/infiniband/core/roce_gid_mgmt.c _add_netdev_ips(ib_dev, port, ndev); ib_dev 510 drivers/infiniband/core/roce_gid_mgmt.c void rdma_roce_rescan_device(struct ib_device *ib_dev) ib_dev 512 drivers/infiniband/core/roce_gid_mgmt.c ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL, ib_dev 549 drivers/infiniband/core/roce_gid_mgmt.c static void handle_netdev_upper(struct ib_device *ib_dev, u8 port, ib_dev 551 drivers/infiniband/core/roce_gid_mgmt.c void (*handle_netdev)(struct ib_device *ib_dev, ib_dev 564 drivers/infiniband/core/roce_gid_mgmt.c handle_netdev(ib_dev, port, ndev); ib_dev 567 drivers/infiniband/core/roce_gid_mgmt.c handle_netdev(ib_dev, port, upper_iter->upper); ib_dev 574 drivers/infiniband/core/roce_gid_mgmt.c static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, ib_dev 577 drivers/infiniband/core/roce_gid_mgmt.c ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev); ib_dev 580 drivers/infiniband/core/roce_gid_mgmt.c static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port, ib_dev 583 drivers/infiniband/core/roce_gid_mgmt.c handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids); ib_dev 586 drivers/infiniband/core/roce_gid_mgmt.c static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port, ib_dev 589 drivers/infiniband/core/roce_gid_mgmt.c handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips); ib_dev 592 drivers/infiniband/core/roce_gid_mgmt.c static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port, ib_dev 605 drivers/infiniband/core/roce_gid_mgmt.c bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev, ib_dev 101 drivers/infiniband/core/user_mad.c struct ib_device *ib_dev; ib_dev 676 drivers/infiniband/core/user_mad.c if (!file->port->ib_dev) { ib_dev 725 drivers/infiniband/core/user_mad.c agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, ib_dev 778 drivers/infiniband/core/user_mad.c if (!file->port->ib_dev) { ib_dev 841 drivers/infiniband/core/user_mad.c agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, ib_dev 983 drivers/infiniband/core/user_mad.c if (!port->ib_dev) { ib_dev 988 drivers/infiniband/core/user_mad.c if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { ib_dev 1086 drivers/infiniband/core/user_mad.c if (!rdma_dev_access_netns(port->ib_dev, current->nsproxy->net_ns)) { ib_dev 1091 drivers/infiniband/core/user_mad.c ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); ib_dev 1116 drivers/infiniband/core/user_mad.c if (port->ib_dev) ib_dev 1117 drivers/infiniband/core/user_mad.c ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props); ib_dev 1194 drivers/infiniband/core/user_mad.c return sprintf(buf, "%s\n", dev_name(&port->ib_dev->dev)); ib_dev 1283 drivers/infiniband/core/user_mad.c port->ib_dev = device; ib_dev 1334 drivers/infiniband/core/user_mad.c port->ib_dev = NULL; ib_dev 106 drivers/infiniband/core/uverbs.h struct ib_device __rcu *ib_dev; ib_dev 223 drivers/infiniband/core/uverbs.h struct ib_device *ib_dev); ib_dev 320 drivers/infiniband/core/uverbs.h struct ib_device *ib_dev, u8 port_num); ib_dev 214 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 222 drivers/infiniband/core/uverbs_cmd.c ib_dev = srcu_dereference(file->device->ib_dev, ib_dev 224 drivers/infiniband/core/uverbs_cmd.c if (!ib_dev) { ib_dev 234 drivers/infiniband/core/uverbs_cmd.c ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); ib_dev 238 drivers/infiniband/core/uverbs_cmd.c ucontext = rdma_zalloc_drv_obj(ib_dev, ib_ucontext); ib_dev 247 drivers/infiniband/core/uverbs_cmd.c ucontext->device = ib_dev; ib_dev 260 drivers/infiniband/core/uverbs_cmd.c filp = ib_uverbs_alloc_async_event_file(file, ib_dev); ib_dev 272 drivers/infiniband/core/uverbs_cmd.c ret = ib_dev->ops.alloc_ucontext(ucontext, &attrs->driver_udata); ib_dev 301 drivers/infiniband/core/uverbs_cmd.c ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE); ib_dev 312 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev = ucontext->device; ib_dev 315 drivers/infiniband/core/uverbs_cmd.c resp->node_guid = ib_dev->node_guid; ib_dev 353 drivers/infiniband/core/uverbs_cmd.c resp->phys_port_cnt = ib_dev->phys_port_cnt; ib_dev 384 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 389 drivers/infiniband/core/uverbs_cmd.c ib_dev = ucontext->device; ib_dev 395 drivers/infiniband/core/uverbs_cmd.c ret = ib_query_port(ib_dev, cmd.port_num, &attr); ib_dev 400 drivers/infiniband/core/uverbs_cmd.c copy_port_attr_to_resp(&attr, &resp, ib_dev, cmd.port_num); ib_dev 412 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 418 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_PD, attrs, &ib_dev); ib_dev 422 drivers/infiniband/core/uverbs_cmd.c pd = rdma_zalloc_drv_obj(ib_dev, ib_pd); ib_dev 428 drivers/infiniband/core/uverbs_cmd.c pd->device = ib_dev; ib_dev 434 drivers/infiniband/core/uverbs_cmd.c ret = ib_dev->ops.alloc_pd(pd, &attrs->driver_udata); ib_dev 567 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 598 drivers/infiniband/core/uverbs_cmd.c &ib_dev); ib_dev 605 drivers/infiniband/core/uverbs_cmd.c xrcd = ib_dev->ops.alloc_xrcd(ib_dev, &attrs->driver_udata); ib_dev 612 drivers/infiniband/core/uverbs_cmd.c xrcd->device = ib_dev; ib_dev 712 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 725 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_MR, attrs, &ib_dev); ib_dev 886 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 892 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_MW, attrs, &ib_dev); ib_dev 958 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 965 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_COMP_CHANNEL, attrs, &ib_dev); ib_dev 993 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 999 drivers/infiniband/core/uverbs_cmd.c &ib_dev); ib_dev 1021 drivers/infiniband/core/uverbs_cmd.c cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); ib_dev 1026 drivers/infiniband/core/uverbs_cmd.c cq->device = ib_dev; ib_dev 1033 drivers/infiniband/core/uverbs_cmd.c ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); ib_dev 1139 drivers/infiniband/core/uverbs_cmd.c static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest, ib_dev 1154 drivers/infiniband/core/uverbs_cmd.c if (rdma_cap_opa_ah(ib_dev, wc->port_num)) ib_dev 1285 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 1291 drivers/infiniband/core/uverbs_cmd.c &ib_dev); ib_dev 1560 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 1567 drivers/infiniband/core/uverbs_cmd.c &ib_dev); ib_dev 2400 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 2406 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_AH, attrs, &ib_dev); ib_dev 2410 drivers/infiniband/core/uverbs_cmd.c if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num)) { ib_dev 2421 drivers/infiniband/core/uverbs_cmd.c attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num); ib_dev 2900 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 2910 drivers/infiniband/core/uverbs_cmd.c &ib_dev); ib_dev 3056 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 3101 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_RWQ_IND_TBL, attrs, &ib_dev); ib_dev 3110 drivers/infiniband/core/uverbs_cmd.c rwq_ind_tbl = ib_dev->ops.create_rwq_ind_table(ib_dev, &init_attr, ib_dev 3122 drivers/infiniband/core/uverbs_cmd.c rwq_ind_tbl->device = ib_dev; ib_dev 3187 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 3237 drivers/infiniband/core/uverbs_cmd.c uobj = uobj_alloc(UVERBS_OBJECT_FLOW, attrs, &ib_dev); ib_dev 3363 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 3366 drivers/infiniband/core/uverbs_cmd.c &ib_dev); ib_dev 3416 drivers/infiniband/core/uverbs_cmd.c srq = rdma_zalloc_drv_obj(ib_dev, ib_srq); ib_dev 3616 drivers/infiniband/core/uverbs_cmd.c struct ib_device *ib_dev; ib_dev 3622 drivers/infiniband/core/uverbs_cmd.c ib_dev = ucontext->device; ib_dev 3634 drivers/infiniband/core/uverbs_cmd.c err = ib_dev->ops.query_device(ib_dev, &attr, &attrs->driver_udata); ib_dev 94 drivers/infiniband/core/uverbs_main.c if (!srcu_dereference(ufile->device->ib_dev, ib_dev 195 drivers/infiniband/core/uverbs_main.c struct ib_device *ib_dev; ib_dev 201 drivers/infiniband/core/uverbs_main.c ib_dev = srcu_dereference(file->device->ib_dev, ib_dev 203 drivers/infiniband/core/uverbs_main.c if (ib_dev && !ib_dev->ops.disassociate_ucontext) ib_dev 204 drivers/infiniband/core/uverbs_main.c module_put(ib_dev->ops.owner); ib_dev 560 drivers/infiniband/core/uverbs_main.c struct ib_device *ib_dev) ib_dev 587 drivers/infiniband/core/uverbs_main.c ib_dev, ib_dev 1041 drivers/infiniband/core/uverbs_main.c struct ib_device *ib_dev; ib_dev 1053 drivers/infiniband/core/uverbs_main.c ib_dev = srcu_dereference(dev->ib_dev, ib_dev 1055 drivers/infiniband/core/uverbs_main.c if (!ib_dev) { ib_dev 1060 drivers/infiniband/core/uverbs_main.c if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) { ib_dev 1068 drivers/infiniband/core/uverbs_main.c module_dependent = !(ib_dev->ops.disassociate_ucontext); ib_dev 1071 drivers/infiniband/core/uverbs_main.c if (!try_module_get(ib_dev->ops.owner)) { ib_dev 1106 drivers/infiniband/core/uverbs_main.c module_put(ib_dev->ops.owner); ib_dev 1197 drivers/infiniband/core/uverbs_main.c struct ib_device *ib_dev; ib_dev 1200 drivers/infiniband/core/uverbs_main.c ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); ib_dev 1201 drivers/infiniband/core/uverbs_main.c if (ib_dev) ib_dev 1202 drivers/infiniband/core/uverbs_main.c ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev)); ib_dev 1216 drivers/infiniband/core/uverbs_main.c struct ib_device *ib_dev; ib_dev 1219 drivers/infiniband/core/uverbs_main.c ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu); ib_dev 1220 drivers/infiniband/core/uverbs_main.c if (ib_dev) ib_dev 1221 drivers/infiniband/core/uverbs_main.c ret = sprintf(buf, "%u\n", ib_dev->ops.uverbs_abi_ver); ib_dev 1287 drivers/infiniband/core/uverbs_main.c rcu_assign_pointer(uverbs_dev->ib_dev, device); ib_dev 1328 drivers/infiniband/core/uverbs_main.c struct ib_device *ib_dev) ib_dev 1338 drivers/infiniband/core/uverbs_main.c event.device = ib_dev; ib_dev 57 drivers/infiniband/core/uverbs_std_types_counters.c struct ib_device *ib_dev = attrs->context->device; ib_dev 66 drivers/infiniband/core/uverbs_std_types_counters.c if (!ib_dev->ops.create_counters) ib_dev 69 drivers/infiniband/core/uverbs_std_types_counters.c counters = ib_dev->ops.create_counters(ib_dev, attrs); ib_dev 75 drivers/infiniband/core/uverbs_std_types_counters.c counters->device = ib_dev; ib_dev 67 drivers/infiniband/core/uverbs_std_types_cq.c struct ib_device *ib_dev = attrs->context->device; ib_dev 75 drivers/infiniband/core/uverbs_std_types_cq.c if (!ib_dev->ops.create_cq || !ib_dev->ops.destroy_cq) ib_dev 114 drivers/infiniband/core/uverbs_std_types_cq.c cq = rdma_zalloc_drv_obj(ib_dev, ib_cq); ib_dev 120 drivers/infiniband/core/uverbs_std_types_cq.c cq->device = ib_dev; ib_dev 128 drivers/infiniband/core/uverbs_std_types_cq.c ret = ib_dev->ops.create_cq(cq, &attr, &attrs->driver_udata); ib_dev 134 drivers/infiniband/core/uverbs_std_types_device.c struct ib_device *ib_dev, u8 port_num) ib_dev 146 drivers/infiniband/core/uverbs_std_types_device.c if (rdma_is_grh_required(ib_dev, port_num)) ib_dev 149 drivers/infiniband/core/uverbs_std_types_device.c if (rdma_cap_opa_ah(ib_dev, port_num)) { ib_dev 165 drivers/infiniband/core/uverbs_std_types_device.c resp->link_layer = rdma_port_get_link_layer(ib_dev, port_num); ib_dev 171 drivers/infiniband/core/uverbs_std_types_device.c struct ib_device *ib_dev; ib_dev 181 drivers/infiniband/core/uverbs_std_types_device.c ib_dev = ucontext->device; ib_dev 184 drivers/infiniband/core/uverbs_std_types_device.c if (!ib_dev->ops.query_port) ib_dev 192 drivers/infiniband/core/uverbs_std_types_device.c ret = ib_query_port(ib_dev, port_num, &attr); ib_dev 196 drivers/infiniband/core/uverbs_std_types_device.c copy_port_attr_to_resp(&attr, &resp.legacy_resp, ib_dev, port_num); ib_dev 58 drivers/infiniband/core/uverbs_std_types_dm.c struct ib_device *ib_dev = attrs->context->device; ib_dev 62 drivers/infiniband/core/uverbs_std_types_dm.c if (!ib_dev->ops.alloc_dm) ib_dev 75 drivers/infiniband/core/uverbs_std_types_dm.c dm = ib_dev->ops.alloc_dm(ib_dev, attrs->context, &attr, attrs); ib_dev 79 drivers/infiniband/core/uverbs_std_types_dm.c dm->device = ib_dev; ib_dev 227 drivers/infiniband/core/uverbs_std_types_flow_action.c static int parse_flow_action_esp(struct ib_device *ib_dev, ib_dev 313 drivers/infiniband/core/uverbs_std_types_flow_action.c struct ib_device *ib_dev = attrs->context->device; ib_dev 318 drivers/infiniband/core/uverbs_std_types_flow_action.c if (!ib_dev->ops.create_flow_action_esp) ib_dev 321 drivers/infiniband/core/uverbs_std_types_flow_action.c ret = parse_flow_action_esp(ib_dev, attrs, &esp_attr, false); ib_dev 326 drivers/infiniband/core/uverbs_std_types_flow_action.c action = ib_dev->ops.create_flow_action_esp(ib_dev, &esp_attr.hdr, ib_dev 331 drivers/infiniband/core/uverbs_std_types_flow_action.c uverbs_flow_action_fill_action(action, uobj, ib_dev, ib_dev 51 drivers/infiniband/core/uverbs_std_types_mr.c struct ib_device *ib_dev = pd->device; ib_dev 58 drivers/infiniband/core/uverbs_std_types_mr.c if (!ib_dev->ops.advise_mr) ib_dev 77 drivers/infiniband/core/uverbs_std_types_mr.c return ib_dev->ops.advise_mr(pd, advice, flags, sg_list, num_sge, ib_dev 91 drivers/infiniband/core/uverbs_std_types_mr.c struct ib_device *ib_dev = pd->device; ib_dev 96 drivers/infiniband/core/uverbs_std_types_mr.c if (!ib_dev->ops.reg_dm_mr) ib_dev 686 drivers/infiniband/core/uverbs_uapi.c rcu_assign_pointer(uverbs_dev->ib_dev, NULL); ib_dev 1251 drivers/infiniband/hw/cxgb3/iwch_provider.c static int set_netdevs(struct ib_device *ib_dev, struct cxio_rdev *rdev) ib_dev 1257 drivers/infiniband/hw/cxgb3/iwch_provider.c ret = ib_device_set_netdev(ib_dev, rdev->port_info.lldevs[i], ib_dev 521 drivers/infiniband/hw/cxgb4/provider.c static int set_netdevs(struct ib_device *ib_dev, struct c4iw_rdev *rdev) ib_dev 527 drivers/infiniband/hw/cxgb4/provider.c ret = ib_device_set_netdev(ib_dev, rdev->lldi.ports[i], ib_dev 997 drivers/infiniband/hw/hns/hns_roce_device.h struct ib_device ib_dev; ib_dev 1048 drivers/infiniband/hw/hns/hns_roce_device.h static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) ib_dev 1050 drivers/infiniband/hw/hns/hns_roce_device.h return container_of(ib_dev, struct hns_roce_dev, ib_dev); ib_dev 727 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ibdev = &hr_dev->ib_dev; ib_dev 738 drivers/infiniband/hw/hns/hns_roce_hw_v1.c free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; ib_dev 757 drivers/infiniband/hw/hns/hns_roce_hw_v1.c free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; ib_dev 807 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->ibqp.device = &hr_dev->ib_dev; ib_dev 947 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_dev = to_hr_dev(lp_qp_work->ib_dev); ib_dev 979 drivers/infiniband/hw/hns/hns_roce_hw_v1.c lp_qp_work->ib_dev = &(hr_dev->ib_dev); ib_dev 1046 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_dev = to_hr_dev(mr_work->ib_dev); ib_dev 1127 drivers/infiniband/hw/hns/hns_roce_hw_v1.c mr_work->ib_dev = &(hr_dev->ib_dev); ib_dev 4553 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (u8 *)&hr_dev->ib_dev.node_guid, ib_dev 4641 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); ib_dev 4680 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ib_dealloc_device(&hr_dev->ib_dev); ib_dev 4695 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ib_dealloc_device(&hr_dev->ib_dev); ib_dev 1057 drivers/infiniband/hw/hns/hns_roce_hw_v1.h struct ib_device *ib_dev; ib_dev 1067 drivers/infiniband/hw/hns/hns_roce_hw_v1.h struct ib_device *ib_dev; ib_dev 1075 drivers/infiniband/hw/hns/hns_roce_hw_v1.h struct ib_device *ib_dev; ib_dev 1642 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_dev->sys_image_guid = be64_to_cpu(hr_dev->ib_dev.node_guid); ib_dev 4075 drivers/infiniband/hw/hns/hns_roce_hw_v2.c is_roce_protocol = rdma_cap_eth_ah(&hr_dev->ib_dev, ib_port) && ib_dev 4654 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct ib_device *ibdev = &hr_dev->ib_dev; ib_dev 4729 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", ib_dev 6398 drivers/infiniband/hw/hns/hns_roce_hw_v2.c addrconf_addr_eui48((u8 *)&hr_dev->ib_dev.node_guid, ib_dev 6420 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); ib_dev 6453 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ib_dealloc_device(&hr_dev->ib_dev); ib_dev 6469 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ib_dealloc_device(&hr_dev->ib_dev); ib_dev 6547 drivers/infiniband/hw/hns/hns_roce_hw_v2.c event.device = &hr_dev->ib_dev; ib_dev 180 drivers/infiniband/hw/hns/hns_roce_main.c static int hns_roce_query_device(struct ib_device *ib_dev, ib_dev 184 drivers/infiniband/hw/hns/hns_roce_main.c struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); ib_dev 226 drivers/infiniband/hw/hns/hns_roce_main.c static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, ib_dev 229 drivers/infiniband/hw/hns/hns_roce_main.c struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); ib_dev 279 drivers/infiniband/hw/hns/hns_roce_main.c static int hns_roce_query_pkey(struct ib_device *ib_dev, u8 port, u16 index, ib_dev 287 drivers/infiniband/hw/hns/hns_roce_main.c static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, ib_dev 296 drivers/infiniband/hw/hns/hns_roce_main.c spin_lock_irqsave(&to_hr_dev(ib_dev)->sm_lock, flags); ib_dev 297 drivers/infiniband/hw/hns/hns_roce_main.c memcpy(ib_dev->node_desc, props->node_desc, NODE_DESC_SIZE); ib_dev 298 drivers/infiniband/hw/hns/hns_roce_main.c spin_unlock_irqrestore(&to_hr_dev(ib_dev)->sm_lock, flags); ib_dev 304 drivers/infiniband/hw/hns/hns_roce_main.c static int hns_roce_modify_port(struct ib_device *ib_dev, u8 port_num, int mask, ib_dev 382 drivers/infiniband/hw/hns/hns_roce_main.c static int hns_roce_port_immutable(struct ib_device *ib_dev, u8 port_num, ib_dev 388 drivers/infiniband/hw/hns/hns_roce_main.c ret = ib_query_port(ib_dev, port_num, &attr); ib_dev 397 drivers/infiniband/hw/hns/hns_roce_main.c if (to_hr_dev(ib_dev)->caps.flags & HNS_ROCE_CAP_FLAG_ROCE_V1_V2) ib_dev 413 drivers/infiniband/hw/hns/hns_roce_main.c ib_unregister_device(&hr_dev->ib_dev); ib_dev 480 drivers/infiniband/hw/hns/hns_roce_main.c struct ib_device *ib_dev = NULL; ib_dev 487 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev = &hr_dev->ib_dev; ib_dev 489 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->node_type = RDMA_NODE_IB_CA; ib_dev 490 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->dev.parent = dev; ib_dev 492 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->phys_port_cnt = hr_dev->caps.num_ports; ib_dev 493 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->local_dma_lkey = hr_dev->caps.reserved_lkey; ib_dev 494 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->num_comp_vectors = hr_dev->caps.num_comp_vectors; ib_dev 495 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->uverbs_cmd_mask = ib_dev 511 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->uverbs_ex_cmd_mask |= ib_dev 515 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR); ib_dev 516 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops); ib_dev 521 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->uverbs_cmd_mask |= ib_dev 524 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops); ib_dev 529 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops); ib_dev 533 drivers/infiniband/hw/hns/hns_roce_main.c ib_dev->uverbs_cmd_mask |= ib_dev 539 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops); ib_dev 540 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops); ib_dev 543 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); ib_dev 544 drivers/infiniband/hw/hns/hns_roce_main.c ib_set_device_ops(ib_dev, &hns_roce_dev_ops); ib_dev 549 drivers/infiniband/hw/hns/hns_roce_main.c ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i], ib_dev 554 drivers/infiniband/hw/hns/hns_roce_main.c ret = ib_register_device(ib_dev, "hns_%d"); ib_dev 577 drivers/infiniband/hw/hns/hns_roce_main.c ib_unregister_device(ib_dev); ib_dev 62 drivers/infiniband/hw/hns/hns_roce_pd.c struct ib_device *ib_dev = ibpd->device; ib_dev 63 drivers/infiniband/hw/hns/hns_roce_pd.c struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); ib_dev 68 drivers/infiniband/hw/hns/hns_roce_pd.c ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); ib_dev 78 drivers/infiniband/hw/hns/hns_roce_pd.c hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); ib_dev 337 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n"); ib_dev 342 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, "SQ sge error! max_send_sge=%d\n", ib_dev 366 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n"); ib_dev 1021 drivers/infiniband/hw/hns/hns_roce_qp.c struct ib_device *ibdev = &hr_dev->ib_dev; ib_dev 1118 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, ib_dev 1136 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, ib_dev 1145 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, ib_dev 1154 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, ib_dev 1162 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, ib_dev 1196 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_warn(&hr_dev->ib_dev, ib_dev 1204 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, "ib_modify_qp_is_ok failed\n"); ib_dev 1215 drivers/infiniband/hw/hns/hns_roce_qp.c ibdev_err(&hr_dev->ib_dev, ib_dev 357 drivers/infiniband/hw/mlx4/alias_GUID.c mlx4_ib_warn(&dev->ib_dev, ib_dev 371 drivers/infiniband/hw/mlx4/alias_GUID.c mlx4_ib_warn(&dev->ib_dev, "%s: Failed to set" ib_dev 769 drivers/infiniband/hw/mlx4/alias_GUID.c set_guid_rec(&dev->ib_dev, rec); ib_dev 852 drivers/infiniband/hw/mlx4/alias_GUID.c if (dev->ib_dev.ops.query_gid(&dev->ib_dev, i, 0, &gid)) { ib_dev 179 drivers/infiniband/hw/mlx4/cm.c found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id); ib_dev 199 drivers/infiniband/hw/mlx4/mad.c ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); ib_dev 453 drivers/infiniband/hw/mlx4/mad.c return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix); ib_dev 463 drivers/infiniband/hw/mlx4/mad.c ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey); ib_dev 540 drivers/infiniband/hw/mlx4/mad.c ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey); ib_dev 559 drivers/infiniband/hw/mlx4/mad.c attr.type = rdma_ah_find_type(&dev->ib_dev, port); ib_dev 589 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_cpu(&dev->ib_dev, ib_dev 631 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_device(&dev->ib_dev, ib_dev 1039 drivers/infiniband/hw/mlx4/mad.c ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1); ib_dev 1042 drivers/infiniband/hw/mlx4/mad.c agent = ib_register_mad_agent(&dev->ib_dev, p + 1, ib_dev 1167 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n"); ib_dev 1188 drivers/infiniband/hw/mlx4/mad.c struct mlx4_ib_dev *dev = ew->ib_dev; ib_dev 1218 drivers/infiniband/hw/mlx4/mad.c err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1); ib_dev 1292 drivers/infiniband/hw/mlx4/mad.c event.device = &dev->ib_dev; ib_dev 1303 drivers/infiniband/hw/mlx4/mad.c struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); ib_dev 1331 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, ib_dev 1423 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_cpu(&dev->ib_dev, ib_dev 1430 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_device(&dev->ib_dev, ib_dev 1466 drivers/infiniband/hw/mlx4/mad.c if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) ib_dev 1475 drivers/infiniband/hw/mlx4/mad.c if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND) ib_dev 1483 drivers/infiniband/hw/mlx4/mad.c struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); ib_dev 1501 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); ib_dev 1506 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " ib_dev 1512 drivers/infiniband/hw/mlx4/mad.c ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, ib_dev 1525 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " ib_dev 1544 drivers/infiniband/hw/mlx4/mad.c if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, ib_dev 1549 drivers/infiniband/hw/mlx4/mad.c if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, ib_dev 1561 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " ib_dev 1570 drivers/infiniband/hw/mlx4/mad.c ah.ibah.device = ctx->ib_dev; ib_dev 1577 drivers/infiniband/hw/mlx4/mad.c ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); ib_dev 1639 drivers/infiniband/hw/mlx4/mad.c tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, ib_dev 1643 drivers/infiniband/hw/mlx4/mad.c if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { ib_dev 1655 drivers/infiniband/hw/mlx4/mad.c ib_dma_map_single(ctx->ib_dev, ib_dev 1659 drivers/infiniband/hw/mlx4/mad.c if (ib_dma_mapping_error(ctx->ib_dev, ib_dev 1676 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, ib_dev 1684 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, ib_dev 1716 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, ib_dev 1722 drivers/infiniband/hw/mlx4/mad.c ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, ib_dev 1847 drivers/infiniband/hw/mlx4/mad.c ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, ib_dev 1852 drivers/infiniband/hw/mlx4/mad.c to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; ib_dev 1927 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); ib_dev 1963 drivers/infiniband/hw/mlx4/mad.c ctx->ib_dev = &dev->ib_dev; ib_dev 2012 drivers/infiniband/hw/mlx4/mad.c ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, ib_dev 2020 drivers/infiniband/hw/mlx4/mad.c ctx->pd = ib_alloc_pd(ctx->ib_dev, 0); ib_dev 2131 drivers/infiniband/hw/mlx4/mad.c ret = create_pv_resources(&dev->ib_dev, slave, port, 1, ib_dev 2136 drivers/infiniband/hw/mlx4/mad.c ret = create_pv_resources(&dev->ib_dev, slave, port, 0, ib_dev 2167 drivers/infiniband/hw/mlx4/mad.c ctx->ib_dev = &dev->ib_dev; ib_dev 2249 drivers/infiniband/hw/mlx4/mad.c struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); ib_dev 2292 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n"); ib_dev 2295 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n"); ib_dev 2301 drivers/infiniband/hw/mlx4/mad.c mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid); ib_dev 2308 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n"); ib_dev 2313 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n"); ib_dev 2317 drivers/infiniband/hw/mlx4/mad.c mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n", ib_dev 2321 drivers/infiniband/hw/mlx4/mad.c err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1); ib_dev 411 drivers/infiniband/hw/mlx4/main.c if (!rdma_cap_roce_gid_table(&ibdev->ib_dev, port_num)) ib_dev 948 drivers/infiniband/hw/mlx4/main.c err = mlx4_ib_query_sl2vl(&mdev->ib_dev, i, &sl2vl); ib_dev 2018 drivers/infiniband/hw/mlx4/main.c memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); ib_dev 2027 drivers/infiniband/hw/mlx4/main.c memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); ib_dev 2039 drivers/infiniband/hw/mlx4/main.c rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); ib_dev 2048 drivers/infiniband/hw/mlx4/main.c rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); ib_dev 2057 drivers/infiniband/hw/mlx4/main.c rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); ib_dev 2248 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_hw_stats_ops); ib_dev 2358 drivers/infiniband/hw/mlx4/main.c if (ib_get_cached_port_state(&ibdev->ib_dev, port, ib_dev 2372 drivers/infiniband/hw/mlx4/main.c ibev.device = &ibdev->ib_dev; ib_dev 2462 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.num_comp_vectors = eq; ib_dev 2468 drivers/infiniband/hw/mlx4/main.c int total_eqs = ibdev->ib_dev.num_comp_vectors; ib_dev 2475 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.num_comp_vectors = 0; ib_dev 2519 drivers/infiniband/hw/mlx4/main.c container_of(device, struct mlx4_ib_dev, ib_dev); ib_dev 2641 drivers/infiniband/hw/mlx4/main.c ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); ib_dev 2665 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ib_dev 2666 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; ib_dev 2668 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? ib_dev 2670 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ib_dev 2671 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; ib_dev 2673 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.uverbs_cmd_mask = ib_dev 2699 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_ops); ib_dev 2700 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.uverbs_ex_cmd_mask |= ib_dev 2707 drivers/infiniband/hw/mlx4/main.c ((mlx4_ib_port_link_layer(&ibdev->ib_dev, 1) == ib_dev 2709 drivers/infiniband/hw/mlx4/main.c (mlx4_ib_port_link_layer(&ibdev->ib_dev, 2) == ib_dev 2711 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.uverbs_ex_cmd_mask |= ib_dev 2717 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_wq_ops); ib_dev 2721 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fmr_ops); ib_dev 2725 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.uverbs_cmd_mask |= ib_dev 2728 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_mw_ops); ib_dev 2732 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.uverbs_cmd_mask |= ib_dev 2735 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_xrc_ops); ib_dev 2740 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.uverbs_ex_cmd_mask |= ib_dev 2743 drivers/infiniband/hw/mlx4/main.c ib_set_device_ops(&ibdev->ib_dev, &mlx4_ib_dev_fs_ops); ib_dev 2747 drivers/infiniband/hw/mlx4/main.c ibdev->ib_dev.ops.uverbs_abi_ver = ib_dev 2768 drivers/infiniband/hw/mlx4/main.c if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == ib_dev 2859 drivers/infiniband/hw/mlx4/main.c rdma_set_device_sysfs_group(&ibdev->ib_dev, &mlx4_attr_group); ib_dev 2860 drivers/infiniband/hw/mlx4/main.c if (ib_register_device(&ibdev->ib_dev, "mlx4_%d")) ib_dev 2886 drivers/infiniband/hw/mlx4/main.c &ibdev->ib_dev); ib_dev 2916 drivers/infiniband/hw/mlx4/main.c ib_unregister_device(&ibdev->ib_dev); ib_dev 2942 drivers/infiniband/hw/mlx4/main.c ib_dealloc_device(&ibdev->ib_dev); ib_dev 3032 drivers/infiniband/hw/mlx4/main.c ib_unregister_device(&ibdev->ib_dev); ib_dev 3050 drivers/infiniband/hw/mlx4/main.c ib_dealloc_device(&ibdev->ib_dev); ib_dev 3168 drivers/infiniband/hw/mlx4/main.c struct mlx4_ib_dev *ibdev = ew->ib_dev; ib_dev 3192 drivers/infiniband/hw/mlx4/main.c ibev.device = &ibdev->ib_dev; ib_dev 3205 drivers/infiniband/hw/mlx4/main.c err = mlx4_ib_query_sl2vl(&mdev->ib_dev, port, &sl2vl); ib_dev 3217 drivers/infiniband/hw/mlx4/main.c struct mlx4_ib_dev *mdev = ew->ib_dev; ib_dev 3234 drivers/infiniband/hw/mlx4/main.c ew->ib_dev = ibdev; ib_dev 3255 drivers/infiniband/hw/mlx4/main.c ew->ib_dev = ibdev; ib_dev 3270 drivers/infiniband/hw/mlx4/main.c rdma_port_get_link_layer(&ibdev->ib_dev, p) == ib_dev 3300 drivers/infiniband/hw/mlx4/main.c ew->ib_dev = ibdev; ib_dev 3315 drivers/infiniband/hw/mlx4/main.c if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ib_dev 3329 drivers/infiniband/hw/mlx4/main.c if (rdma_port_get_link_layer(&ibdev->ib_dev, i) ib_dev 242 drivers/infiniband/hw/mlx4/mcg.c if (ib_find_cached_pkey(&dev->ib_dev, ctx->port, IB_DEFAULT_PKEY_FULL, &wc.pkey_index)) ib_dev 457 drivers/infiniband/hw/mlx4/mlx4_ib.h struct ib_device *ib_dev; ib_dev 466 drivers/infiniband/hw/mlx4/mlx4_ib.h struct ib_device *ib_dev; ib_dev 583 drivers/infiniband/hw/mlx4/mlx4_ib.h struct ib_device ib_dev; ib_dev 624 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_ib_dev *ib_dev; ib_dev 643 drivers/infiniband/hw/mlx4/mlx4_ib.h return container_of(ibdev, struct mlx4_ib_dev, ib_dev); ib_dev 1505 drivers/infiniband/hw/mlx4/qp.c free_proxy_bufs(&dev->ib_dev, qp); ib_dev 1658 drivers/infiniband/hw/mlx4/qp.c int is_eth = rdma_cap_eth_ah(&dev->ib_dev, init_attr->port_num); ib_dev 1979 drivers/infiniband/hw/mlx4/qp.c if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) != ib_dev 2178 drivers/infiniband/hw/mlx4/qp.c rdma_port_get_link_layer(&dev->ib_dev, qp->port) == ib_dev 2320 drivers/infiniband/hw/mlx4/qp.c rdma_cap_eth_ah(&dev->ib_dev, port_num) && ib_dev 2479 drivers/infiniband/hw/mlx4/qp.c if (rdma_port_get_link_layer(&dev->ib_dev, qp->port) == ib_dev 2511 drivers/infiniband/hw/mlx4/qp.c &dev->ib_dev, qp->port) == ib_dev 2787 drivers/infiniband/hw/mlx4/qp.c (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) != ib_dev 2885 drivers/infiniband/hw/mlx4/qp.c struct ib_device *ib_dev = &mdev->ib_dev; ib_dev 2929 drivers/infiniband/hw/mlx4/qp.c err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey); ib_dev 3035 drivers/infiniband/hw/mlx4/qp.c struct ib_device *ib_dev = sqp->qp.ibqp.device; ib_dev 3036 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_dev *ibdev = to_mdev(ib_dev); ib_dev 3063 drivers/infiniband/hw/mlx4/qp.c if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { ib_dev 3067 drivers/infiniband/hw/mlx4/qp.c err = mlx4_get_roce_gid_from_slave(to_mdev(ib_dev)->dev, ib_dev 3115 drivers/infiniband/hw/mlx4/qp.c if (mlx4_is_mfunc(to_mdev(ib_dev)->dev)) { ib_dev 3121 drivers/infiniband/hw/mlx4/qp.c cpu_to_be64(atomic64_read(&(to_mdev(ib_dev)->sriov. ib_dev 3125 drivers/infiniband/hw/mlx4/qp.c to_mdev(ib_dev)->sriov.demux[sqp->qp.port - 1]. ib_dev 3208 drivers/infiniband/hw/mlx4/qp.c sl_to_vl(to_mdev(ib_dev), ib_dev 3218 drivers/infiniband/hw/mlx4/qp.c err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, ib_dev 3221 drivers/infiniband/hw/mlx4/qp.c err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, ib_dev 3998 drivers/infiniband/hw/mlx4/qp.c ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, port_num); ib_dev 122 drivers/infiniband/hw/mlx4/sysfs.c ret = __mlx4_ib_query_gid(&mdev->ib_dev, port->num, ib_dev 149 drivers/infiniband/hw/mlx4/sysfs.c ret = __mlx4_ib_query_pkey(&mdev->ib_dev, port->num, ib_dev 231 drivers/infiniband/hw/mlx4/sysfs.c ret = __mlx4_ib_query_port(&device->ib_dev, port_num, &attr, 1); ib_dev 590 drivers/infiniband/hw/mlx4/sysfs.c int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == ib_dev 625 drivers/infiniband/hw/mlx4/sysfs.c int is_eth = rdma_port_get_link_layer(&p->dev->ib_dev, p->port_num) == ib_dev 640 drivers/infiniband/hw/mlx4/sysfs.c int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) == ib_dev 817 drivers/infiniband/hw/mlx4/sysfs.c dev->iov_parent = kobject_create_and_add("iov", &dev->ib_dev.dev.kobj); ib_dev 830 drivers/infiniband/hw/mlx4/sysfs.c for (i = 1; i <= dev->ib_dev.phys_port_cnt; ++i) { ib_dev 61 drivers/infiniband/hw/mlx5/cq.c event.device = &dev->ib_dev; ib_dev 231 drivers/infiniband/hw/mlx5/cq.c ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey, ib_dev 94 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_dev *ib_dev; ib_dev 1348 drivers/infiniband/hw/mlx5/devx.c xa_erase(&obj->ib_dev->mdev->priv.mkey_table, ib_dev 1354 drivers/infiniband/hw/mlx5/devx.c ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); ib_dev 1356 drivers/infiniband/hw/mlx5/devx.c ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); ib_dev 1358 drivers/infiniband/hw/mlx5/devx.c ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, ib_dev 1382 drivers/infiniband/hw/mlx5/devx.c table = &obj->ib_dev->devx_event_table; ib_dev 1468 drivers/infiniband/hw/mlx5/devx.c obj->ib_dev = dev; ib_dev 1490 drivers/infiniband/hw/mlx5/devx.c mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); ib_dev 1492 drivers/infiniband/hw/mlx5/devx.c mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); ib_dev 1494 drivers/infiniband/hw/mlx5/devx.c mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out, ib_dev 1656 drivers/infiniband/hw/mlx5/devx.c get_device(&dev->ib_dev.dev); ib_dev 2675 drivers/infiniband/hw/mlx5/devx.c put_device(&dev->ib_dev.dev); ib_dev 206 drivers/infiniband/hw/mlx5/flow.c ib_set_flow(uobj, &flow_handler->ibflow, qp, &dev->ib_dev, uflow_res); ib_dev 418 drivers/infiniband/hw/mlx5/flow.c uverbs_flow_action_fill_action(action, uobj, &mdev->ib_dev, ib_dev 560 drivers/infiniband/hw/mlx5/flow.c uverbs_flow_action_fill_action(&maction->ib_action, uobj, &mdev->ib_dev, ib_dev 42 drivers/infiniband/hw/mlx5/ib_rep.c ibdev = ib_alloc_device(mlx5_ib_dev, ib_dev); ib_dev 49 drivers/infiniband/hw/mlx5/ib_rep.c ib_dealloc_device(&ibdev->ib_dev); ib_dev 241 drivers/infiniband/hw/mlx5/main.c if (get_port_state(&ibdev->ib_dev, port_num, ib_dev 249 drivers/infiniband/hw/mlx5/main.c ibev.device = &ibdev->ib_dev; ib_dev 303 drivers/infiniband/hw/mlx5/main.c enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, ib_dev 340 drivers/infiniband/hw/mlx5/main.c enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, ib_dev 788 drivers/infiniband/hw/mlx5/main.c switch (mlx5_get_vport_access_method(&dev->ib_dev)) { ib_dev 4383 drivers/infiniband/hw/mlx5/main.c err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); ib_dev 4389 drivers/infiniband/hw/mlx5/main.c return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); ib_dev 4396 drivers/infiniband/hw/mlx5/main.c rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); ib_dev 4406 drivers/infiniband/hw/mlx5/main.c rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); ib_dev 4416 drivers/infiniband/hw/mlx5/main.c rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); ib_dev 4426 drivers/infiniband/hw/mlx5/main.c rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); ib_dev 4436 drivers/infiniband/hw/mlx5/main.c rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); ib_dev 4553 drivers/infiniband/hw/mlx5/main.c if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == ib_dev 4576 drivers/infiniband/hw/mlx5/main.c if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == ib_dev 4641 drivers/infiniband/hw/mlx5/main.c ibev.device = &ibdev->ib_dev; ib_dev 4643 drivers/infiniband/hw/mlx5/main.c if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { ib_dev 4747 drivers/infiniband/hw/mlx5/main.c err = mlx5_ib_query_device(&dev->ib_dev, dprops, NULL); ib_dev 4753 drivers/infiniband/hw/mlx5/main.c err = mlx5_ib_query_port(&dev->ib_dev, port, pprops); ib_dev 4820 drivers/infiniband/hw/mlx5/main.c pd = ib_alloc_pd(&dev->ib_dev, 0); ib_dev 4827 drivers/infiniband/hw/mlx5/main.c cq = ib_alloc_cq(&dev->ib_dev, NULL, 128, 0, IB_POLL_SOFTIRQ); ib_dev 4847 drivers/infiniband/hw/mlx5/main.c qp->device = &dev->ib_dev; ib_dev 4937 drivers/infiniband/hw/mlx5/main.c ibdev = &dev->ib_dev; ib_dev 4959 drivers/infiniband/hw/mlx5/main.c devr->c0->device = &dev->ib_dev; ib_dev 4966 drivers/infiniband/hw/mlx5/main.c devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); ib_dev 4971 drivers/infiniband/hw/mlx5/main.c devr->x0->device = &dev->ib_dev; ib_dev 4977 drivers/infiniband/hw/mlx5/main.c devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL); ib_dev 4982 drivers/infiniband/hw/mlx5/main.c devr->x1->device = &dev->ib_dev; ib_dev 5001 drivers/infiniband/hw/mlx5/main.c devr->s0->device = &dev->ib_dev; ib_dev 5025 drivers/infiniband/hw/mlx5/main.c devr->s1->device = &dev->ib_dev; ib_dev 5171 drivers/infiniband/hw/mlx5/main.c container_of(ibdev, struct mlx5_ib_dev, ib_dev); ib_dev 5718 drivers/infiniband/hw/mlx5/main.c if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) ib_dev 5814 drivers/infiniband/hw/mlx5/main.c if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) ib_dev 5940 drivers/infiniband/hw/mlx5/main.c enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, ib_dev 6010 drivers/infiniband/hw/mlx5/main.c enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, ib_dev 6191 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.node_type = RDMA_NODE_IB_CA; ib_dev 6192 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; ib_dev 6193 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.phys_port_cnt = dev->num_ports; ib_dev 6194 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_count(mdev); ib_dev 6195 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.dev.parent = mdev->device; ib_dev 6340 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.uverbs_cmd_mask = ib_dev 6367 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.uverbs_ex_cmd_mask = ib_dev 6378 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, ib_dev 6382 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); ib_dev 6387 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.uverbs_cmd_mask |= ib_dev 6390 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); ib_dev 6394 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.uverbs_cmd_mask |= ib_dev 6397 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); ib_dev 6403 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); ib_dev 6407 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops); ib_dev 6408 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); ib_dev 6411 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.driver_def = mlx5_ib_defs; ib_dev 6422 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.use_cq_dim = true; ib_dev 6434 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); ib_dev 6445 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); ib_dev 6462 drivers/infiniband/hw/mlx5/main.c dev->ib_dev.uverbs_ex_cmd_mask |= ib_dev 6468 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); ib_dev 6579 drivers/infiniband/hw/mlx5/main.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops); ib_dev 6642 drivers/infiniband/hw/mlx5/main.c rdma_set_device_sysfs_group(&dev->ib_dev, &mlx5_attr_group); ib_dev 6647 drivers/infiniband/hw/mlx5/main.c return ib_register_device(&dev->ib_dev, name); ib_dev 6657 drivers/infiniband/hw/mlx5/main.c ib_unregister_device(&dev->ib_dev); ib_dev 6721 drivers/infiniband/hw/mlx5/main.c ib_dealloc_device(&dev->ib_dev); ib_dev 6880 drivers/infiniband/hw/mlx5/main.c rdma_roce_rescan_device(&dev->ib_dev); ib_dev 6919 drivers/infiniband/hw/mlx5/main.c dev = ib_alloc_device(mlx5_ib_dev, ib_dev); ib_dev 6925 drivers/infiniband/hw/mlx5/main.c ib_dealloc_device(&dev->ib_dev); ib_dev 56 drivers/infiniband/hw/mlx5/mlx5_ib.h dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ ib_dev 60 drivers/infiniband/hw/mlx5/mlx5_ib.h dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ ib_dev 64 drivers/infiniband/hw/mlx5/mlx5_ib.h dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \ ib_dev 954 drivers/infiniband/hw/mlx5/mlx5_ib.h struct ib_device ib_dev; ib_dev 1017 drivers/infiniband/hw/mlx5/mlx5_ib.h return container_of(ibdev, struct mlx5_ib_dev, ib_dev); ib_dev 923 drivers/infiniband/hw/mlx5/mr.c struct device *ddev = dev->ib_dev.dev.parent; ib_dev 1594 drivers/infiniband/hw/mlx5/odp.c ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops); ib_dev 3176 drivers/infiniband/hw/mlx5/qp.c &dev->ib_dev.dev, ib_dev 5490 drivers/infiniband/hw/mlx5/qp.c ah_attr->type = rdma_ah_find_type(&ibdev->ib_dev, path->port); ib_dev 5992 drivers/infiniband/hw/mlx5/qp.c if (!(dev->ib_dev.attrs.raw_packet_caps & ib_dev 6362 drivers/infiniband/hw/mlx5/qp.c &dev->ib_dev.dev, ib_dev 94 drivers/infiniband/hw/mthca/mthca_catas.c event.device = &dev->ib_dev; ib_dev 252 drivers/infiniband/hw/mthca/mthca_cq.c event.device = &dev->ib_dev; ib_dev 290 drivers/infiniband/hw/mthca/mthca_dev.h struct ib_device ib_dev; ib_dev 592 drivers/infiniband/hw/mthca/mthca_dev.h return container_of(ibdev, struct mthca_dev, ib_dev); ib_dev 253 drivers/infiniband/hw/mthca/mthca_eq.c record.device = &dev->ib_dev; ib_dev 59 drivers/infiniband/hw/mthca/mthca_mad.c ret = ib_query_port(&dev->ib_dev, port_num, tprops); ib_dev 61 drivers/infiniband/hw/mthca/mthca_mad.c dev_warn(&dev->ib_dev.dev, ib_dev 86 drivers/infiniband/hw/mthca/mthca_mad.c ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num); ib_dev 305 drivers/infiniband/hw/mthca/mthca_mad.c agent = ib_register_mad_agent(&dev->ib_dev, p + 1, ib_dev 964 drivers/infiniband/hw/mthca/mthca_main.c mdev = ib_alloc_device(mthca_dev, ib_dev); ib_dev 1070 drivers/infiniband/hw/mthca/mthca_main.c ib_dealloc_device(&mdev->ib_dev); ib_dev 1115 drivers/infiniband/hw/mthca/mthca_main.c ib_dealloc_device(&mdev->ib_dev); ib_dev 1029 drivers/infiniband/hw/mthca/mthca_provider.c rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); ib_dev 1039 drivers/infiniband/hw/mthca/mthca_provider.c rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); ib_dev 1061 drivers/infiniband/hw/mthca/mthca_provider.c rdma_device_to_drv_device(device, struct mthca_dev, ib_dev); ib_dev 1097 drivers/infiniband/hw/mthca/mthca_provider.c memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); ib_dev 1108 drivers/infiniband/hw/mthca/mthca_provider.c memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); ib_dev 1138 drivers/infiniband/hw/mthca/mthca_provider.c container_of(device, struct mthca_dev, ib_dev); ib_dev 1242 drivers/infiniband/hw/mthca/mthca_provider.c dev->ib_dev.uverbs_cmd_mask = ib_dev 1260 drivers/infiniband/hw/mthca/mthca_provider.c dev->ib_dev.node_type = RDMA_NODE_IB_CA; ib_dev 1261 drivers/infiniband/hw/mthca/mthca_provider.c dev->ib_dev.phys_port_cnt = dev->limits.num_ports; ib_dev 1262 drivers/infiniband/hw/mthca/mthca_provider.c dev->ib_dev.num_comp_vectors = 1; ib_dev 1263 drivers/infiniband/hw/mthca/mthca_provider.c dev->ib_dev.dev.parent = &dev->pdev->dev; ib_dev 1266 drivers/infiniband/hw/mthca/mthca_provider.c dev->ib_dev.uverbs_cmd_mask |= ib_dev 1273 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, ib_dev 1276 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, ib_dev 1282 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, ib_dev 1285 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, ib_dev 1289 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops); ib_dev 1292 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops); ib_dev 1294 drivers/infiniband/hw/mthca/mthca_provider.c ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops); ib_dev 1298 drivers/infiniband/hw/mthca/mthca_provider.c rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group); ib_dev 1299 drivers/infiniband/hw/mthca/mthca_provider.c ret = ib_register_device(&dev->ib_dev, "mthca%d"); ib_dev 1311 drivers/infiniband/hw/mthca/mthca_provider.c ib_unregister_device(&dev->ib_dev); ib_dev 259 drivers/infiniband/hw/mthca/mthca_qp.c event.device = &dev->ib_dev; ib_dev 407 drivers/infiniband/hw/mthca/mthca_qp.c ah_attr->type = rdma_ah_find_type(&dev->ib_dev, port_num); ib_dev 1542 drivers/infiniband/hw/mthca/mthca_qp.c ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, ib_dev 1545 drivers/infiniband/hw/mthca/mthca_qp.c ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port, ib_dev 450 drivers/infiniband/hw/mthca/mthca_srq.c event.device = &dev->ib_dev; ib_dev 74 drivers/infiniband/hw/usnic/usnic_ib.h struct ib_device ib_dev; ib_dev 103 drivers/infiniband/hw/usnic/usnic_ib.h return container_of(ibdev, struct usnic_ib_dev, ib_dev); ib_dev 79 drivers/infiniband/hw/usnic/usnic_ib_main.c return scnprintf(buf, buf_sz, "PF: %s ", dev_name(&vf->pf->ib_dev.dev)); ib_dev 147 drivers/infiniband/hw/usnic/usnic_ib_main.c usnic_info("PF Reset on %s\n", dev_name(&us_ibdev->ib_dev.dev)); ib_dev 150 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.device = &us_ibdev->ib_dev; ib_dev 161 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 163 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.device = &us_ibdev->ib_dev; ib_dev 170 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 173 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.device = &us_ibdev->ib_dev; ib_dev 179 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 186 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 189 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev), ib_dev 195 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.device = &us_ibdev->ib_dev; ib_dev 204 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev), ib_dev 210 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 216 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 233 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev); ib_dev 260 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.device = &us_ibdev->ib_dev; ib_dev 270 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_event.device = &us_ibdev->ib_dev; ib_dev 277 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 296 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev = container_of(ibdev, struct usnic_ib_dev, ib_dev); ib_dev 327 drivers/infiniband/hw/usnic/usnic_ib_main.c container_of(device, struct usnic_ib_dev, ib_dev); ib_dev 379 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev = ib_alloc_device(usnic_ib_dev, ib_dev); ib_dev 398 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev->ib_dev.node_type = RDMA_NODE_USNIC_UDP; ib_dev 399 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev->ib_dev.phys_port_cnt = USNIC_IB_PORT_CNT; ib_dev 400 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev->ib_dev.num_comp_vectors = USNIC_IB_NUM_COMP_VECTORS; ib_dev 401 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev->ib_dev.dev.parent = &dev->dev; ib_dev 403 drivers/infiniband/hw/usnic/usnic_ib_main.c us_ibdev->ib_dev.uverbs_cmd_mask = ib_dev 422 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_set_device_ops(&us_ibdev->ib_dev, &usnic_dev_ops); ib_dev 424 drivers/infiniband/hw/usnic/usnic_ib_main.c rdma_set_device_sysfs_group(&us_ibdev->ib_dev, &usnic_attr_group); ib_dev 426 drivers/infiniband/hw/usnic/usnic_ib_main.c ret = ib_device_set_netdev(&us_ibdev->ib_dev, us_ibdev->netdev, 1); ib_dev 430 drivers/infiniband/hw/usnic/usnic_ib_main.c if (ib_register_device(&us_ibdev->ib_dev, "usnic_%d")) ib_dev 451 drivers/infiniband/hw/usnic/usnic_ib_main.c memcpy(&us_ibdev->ib_dev.node_guid, &gid.global.interface_id, ib_dev 456 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&us_ibdev->ib_dev.dev), ib_dev 465 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_dealloc_device(&us_ibdev->ib_dev); ib_dev 471 drivers/infiniband/hw/usnic/usnic_ib_main.c usnic_info("Unregistering %s\n", dev_name(&us_ibdev->ib_dev.dev)); ib_dev 474 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_unregister_device(&us_ibdev->ib_dev); ib_dev 475 drivers/infiniband/hw/usnic/usnic_ib_main.c ib_dealloc_device(&us_ibdev->ib_dev); ib_dev 612 drivers/infiniband/hw/usnic/usnic_ib_main.c dev_name(&pf->ib_dev.dev)); ib_dev 486 drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c ib_event.device = &qp_grp->vf->pf->ib_dev; ib_dev 53 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); ib_dev 71 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); ib_dev 95 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c dev_name(&us_ibdev->ib_dev.dev), ib_dev 120 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 133 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); ib_dev 144 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); ib_dev 155 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); ib_dev 170 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c rdma_device_to_drv_device(device, struct usnic_ib_dev, ib_dev); ib_dev 276 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c kobject_get(&us_ibdev->ib_dev.dev.kobj); ib_dev 278 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c &us_ibdev->ib_dev.dev.kobj); ib_dev 280 drivers/infiniband/hw/usnic/usnic_ib_sysfs.c kobject_put(&us_ibdev->ib_dev.dev.kobj); ib_dev 164 drivers/infiniband/hw/usnic/usnic_ib_verbs.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 202 drivers/infiniband/hw/usnic/usnic_ib_verbs.c dev_name(&us_ibdev->ib_dev.dev), ib_dev 236 drivers/infiniband/hw/usnic/usnic_ib_verbs.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 493 drivers/infiniband/hw/usnic/usnic_ib_verbs.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 500 drivers/infiniband/hw/usnic/usnic_ib_verbs.c dev_name(&us_ibdev->ib_dev.dev)); ib_dev 506 drivers/infiniband/hw/usnic/usnic_ib_verbs.c dev_name(&us_ibdev->ib_dev.dev), init_attr->qp_type); ib_dev 205 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h struct ib_device ib_dev; ib_dev 260 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h return container_of(ibdev, struct pvrdma_dev, ib_dev); ib_dev 103 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c container_of(device, struct pvrdma_dev, ib_dev); ib_dev 203 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.node_guid = dev->dsr->caps.node_guid; ib_dev 206 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.num_comp_vectors = 1; ib_dev 207 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.dev.parent = &dev->pdev->dev; ib_dev 208 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.uverbs_cmd_mask = ib_dev 230 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.node_type = RDMA_NODE_IB_CA; ib_dev 231 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt; ib_dev 233 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops); ib_dev 252 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev->ib_dev.uverbs_cmd_mask |= ib_dev 259 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops); ib_dev 267 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1); ib_dev 271 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); ib_dev 273 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d"); ib_dev 398 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_event.device = &dev->ib_dev; ib_dev 713 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_device_set_netdev(&dev->ib_dev, NULL, 1); ib_dev 725 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_device_set_netdev(&dev->ib_dev, ndev, 1); ib_dev 734 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c event, dev_name(&dev->ib_dev.dev)); ib_dev 792 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c dev = ib_alloc_device(pvrdma_dev, ib_dev); ib_dev 1052 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_unregister_device(&dev->ib_dev); ib_dev 1087 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_dealloc_device(&dev->ib_dev); ib_dev 1111 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_unregister_device(&dev->ib_dev); ib_dev 1137 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c ib_dealloc_device(&dev->ib_dev); ib_dev 54 drivers/infiniband/sw/rxe/rxe.c void rxe_dealloc(struct ib_device *ib_dev) ib_dev 56 drivers/infiniband/sw/rxe/rxe.c struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); ib_dev 325 drivers/infiniband/sw/rxe/rxe.c ib_device_put(&exists->ib_dev); ib_dev 105 drivers/infiniband/sw/rxe/rxe.h return container_of(ibdev, struct rxe_dev, ib_dev); ib_dev 233 drivers/infiniband/sw/rxe/rxe_loc.h void rxe_dealloc(struct ib_device *ib_dev); ib_dev 209 drivers/infiniband/sw/rxe/rxe_net.c ib_device_put(&rxe->ib_dev); ib_dev 226 drivers/infiniband/sw/rxe/rxe_net.c ib_device_put(&rxe->ib_dev); ib_dev 468 drivers/infiniband/sw/rxe/rxe_net.c attr = rdma_get_gid_attr(&rxe->ib_dev, port_num, av->grh.sgid_index); ib_dev 533 drivers/infiniband/sw/rxe/rxe_net.c rxe = ib_alloc_device(rxe_dev, ib_dev); ib_dev 541 drivers/infiniband/sw/rxe/rxe_net.c ib_dealloc_device(&rxe->ib_dev); ib_dev 553 drivers/infiniband/sw/rxe/rxe_net.c ev.device = &rxe->ib_dev; ib_dev 569 drivers/infiniband/sw/rxe/rxe_net.c dev_info(&rxe->ib_dev.dev, "set active\n"); ib_dev 582 drivers/infiniband/sw/rxe/rxe_net.c dev_info(&rxe->ib_dev.dev, "set down\n"); ib_dev 605 drivers/infiniband/sw/rxe/rxe_net.c ib_unregister_device_queued(&rxe->ib_dev); ib_dev 631 drivers/infiniband/sw/rxe/rxe_net.c ib_device_put(&rxe->ib_dev); ib_dev 403 drivers/infiniband/sw/rxe/rxe_pool.c if (!ib_device_try_get(&pool->rxe->ib_dev)) ib_dev 422 drivers/infiniband/sw/rxe/rxe_pool.c ib_device_put(&pool->rxe->ib_dev); ib_dev 442 drivers/infiniband/sw/rxe/rxe_pool.c if (!ib_device_try_get(&pool->rxe->ib_dev)) ib_dev 455 drivers/infiniband/sw/rxe/rxe_pool.c ib_device_put(&pool->rxe->ib_dev); ib_dev 473 drivers/infiniband/sw/rxe/rxe_pool.c ib_device_put(&pool->rxe->ib_dev); ib_dev 101 drivers/infiniband/sw/rxe/rxe_qp.c if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) { ib_dev 436 drivers/infiniband/sw/rxe/rxe_qp.c if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) { ib_dev 451 drivers/infiniband/sw/rxe/rxe_qp.c if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num)) { ib_dev 345 drivers/infiniband/sw/rxe/rxe_recv.c gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid, ib_dev 78 drivers/infiniband/sw/rxe/rxe_sysfs.c ib_device_put(&exists->ib_dev); ib_dev 99 drivers/infiniband/sw/rxe/rxe_sysfs.c struct ib_device *ib_dev; ib_dev 113 drivers/infiniband/sw/rxe/rxe_sysfs.c ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE); ib_dev 114 drivers/infiniband/sw/rxe/rxe_sysfs.c if (!ib_dev) { ib_dev 119 drivers/infiniband/sw/rxe/rxe_sysfs.c ib_unregister_device_and_put(ib_dev); ib_dev 113 drivers/infiniband/sw/rxe/rxe_verbs.c memcpy(rxe->ib_dev.node_desc, ib_dev 114 drivers/infiniband/sw/rxe/rxe_verbs.c attr->node_desc, sizeof(rxe->ib_dev.node_desc)); ib_dev 1076 drivers/infiniband/sw/rxe/rxe_verbs.c rdma_device_to_drv_device(device, struct rxe_dev, ib_dev); ib_dev 1092 drivers/infiniband/sw/rxe/rxe_verbs.c static int rxe_enable_driver(struct ib_device *ib_dev) ib_dev 1094 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev); ib_dev 1097 drivers/infiniband/sw/rxe/rxe_verbs.c dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev)); ib_dev 1161 drivers/infiniband/sw/rxe/rxe_verbs.c struct ib_device *dev = &rxe->ib_dev; ib_dev 1211 drivers/infiniband/sw/rxe/rxe_verbs.c err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1); ib_dev 385 drivers/infiniband/sw/rxe/rxe_verbs.h struct ib_device ib_dev; ib_dev 425 drivers/infiniband/sw/rxe/rxe_verbs.h return dev ? container_of(dev, struct rxe_dev, ib_dev) : NULL; ib_dev 614 drivers/infiniband/ulp/iser/iscsi_iser.c struct ib_device *ib_dev; ib_dev 645 drivers/infiniband/ulp/iser/iscsi_iser.c ib_dev = ib_conn->device->ib_device; ib_dev 647 drivers/infiniband/ulp/iser/iscsi_iser.c u32 sig_caps = ib_dev->attrs.sig_prot_cap; ib_dev 655 drivers/infiniband/ulp/iser/iscsi_iser.c if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) ib_dev 658 drivers/infiniband/ulp/iser/iscsi_iser.c if (iscsi_host_add(shost, ib_dev->dev.parent)) { ib_dev 77 drivers/infiniband/ulp/iser/iser_memory.c struct ib_device *ib_dev = device->ib_device; ib_dev 80 drivers/infiniband/ulp/iser/iser_memory.c if (ib_dev->ops.alloc_fmr && ib_dev->ops.dealloc_fmr && ib_dev 81 drivers/infiniband/ulp/iser/iser_memory.c ib_dev->ops.map_phys_fmr && ib_dev->ops.unmap_fmr) { ib_dev 84 drivers/infiniband/ulp/iser/iser_memory.c } else if (ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) { ib_dev 70 drivers/infiniband/ulp/iser/iser_verbs.c struct ib_device *ib_dev = device->ib_device; ib_dev 78 drivers/infiniband/ulp/iser/iser_verbs.c ib_dev->num_comp_vectors); ib_dev 85 drivers/infiniband/ulp/iser/iser_verbs.c max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe); ib_dev 88 drivers/infiniband/ulp/iser/iser_verbs.c device->comps_used, dev_name(&ib_dev->dev), ib_dev 89 drivers/infiniband/ulp/iser/iser_verbs.c ib_dev->num_comp_vectors, max_cqe); ib_dev 91 drivers/infiniband/ulp/iser/iser_verbs.c device->pd = ib_alloc_pd(ib_dev, ib_dev 99 drivers/infiniband/ulp/iser/iser_verbs.c comp->cq = ib_alloc_cq(ib_dev, comp, max_cqe, i, ib_dev 107 drivers/infiniband/ulp/iser/iser_verbs.c INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, ib_dev 243 drivers/infiniband/ulp/iser/iser_verbs.c struct ib_device *ib_dev = device->ib_device; ib_dev 251 drivers/infiniband/ulp/iser/iser_verbs.c if (ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG) ib_dev 367 drivers/infiniband/ulp/iser/iser_verbs.c struct ib_device *ib_dev; ib_dev 375 drivers/infiniband/ulp/iser/iser_verbs.c ib_dev = device->ib_device; ib_dev 406 drivers/infiniband/ulp/iser/iser_verbs.c if (ib_dev->attrs.max_qp_wr > ISER_QP_MAX_REQ_DTOS) { ib_dev 411 drivers/infiniband/ulp/iser/iser_verbs.c init_attr.cap.max_send_wr = ib_dev->attrs.max_qp_wr; ib_dev 413 drivers/infiniband/ulp/iser/iser_verbs.c ISER_GET_MAX_XMIT_CMDS(ib_dev->attrs.max_qp_wr); ib_dev 416 drivers/infiniband/ulp/iser/iser_verbs.c ib_dev->attrs.max_qp_wr); ib_dev 170 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 185 drivers/infiniband/ulp/isert/ib_isert.c dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc, ib_dev 187 drivers/infiniband/ulp/isert/ib_isert.c if (ib_dma_mapping_error(ib_dev, dma_addr)) ib_dev 204 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, ib_dev 216 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->device->ib_device; ib_dev 225 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, rx_desc->dma_addr, ib_dev 291 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 295 drivers/infiniband/ulp/isert/ib_isert.c ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge); ib_dev 296 drivers/infiniband/ulp/isert/ib_isert.c isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd); ib_dev 302 drivers/infiniband/ulp/isert/ib_isert.c device->pd = ib_alloc_pd(ib_dev, 0); ib_dev 311 drivers/infiniband/ulp/isert/ib_isert.c device->pi_capable = ib_dev->attrs.device_cap_flags & ib_dev 405 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->device->ib_device; ib_dev 407 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma, ib_dev 411 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, ib_dev 419 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev) ib_dev 428 drivers/infiniband/ulp/isert/ib_isert.c isert_conn->login_req_dma = ib_dma_map_single(ib_dev, ib_dev 431 drivers/infiniband/ulp/isert/ib_isert.c ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma); ib_dev 444 drivers/infiniband/ulp/isert/ib_isert.c isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev, ib_dev 447 drivers/infiniband/ulp/isert/ib_isert.c ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma); ib_dev 459 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma, ib_dev 858 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->cm_id->device; ib_dev 862 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr, ib_dev 903 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 905 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr, ib_dev 916 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 919 drivers/infiniband/ulp/isert/ib_isert.c dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc, ib_dev 921 drivers/infiniband/ulp/isert/ib_isert.c if (ib_dma_mapping_error(ib_dev, dma_addr)) { ib_dev 994 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 1008 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma, ib_dev 1013 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma, ib_dev 1399 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->cm_id->device; ib_dev 1415 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr, ib_dev 1450 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr, ib_dev 1458 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->device->ib_device; ib_dev 1465 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma, ib_dev 1481 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma, ib_dev 1583 drivers/infiniband/ulp/isert/ib_isert.c isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) ib_dev 1587 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, tx_desc->dma_addr, ib_dev 1595 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev, bool comp_err) ib_dev 1599 drivers/infiniband/ulp/isert/ib_isert.c ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma, ib_dev 1604 drivers/infiniband/ulp/isert/ib_isert.c isert_unmap_tx_desc(tx_desc, ib_dev); ib_dev 1745 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->cm_id->device; ib_dev 1758 drivers/infiniband/ulp/isert/ib_isert.c ib_dev, false); ib_dev 1774 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->cm_id->device; ib_dev 1783 drivers/infiniband/ulp/isert/ib_isert.c isert_unmap_tx_desc(tx_desc, ib_dev); ib_dev 1790 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = isert_conn->cm_id->device; ib_dev 1798 drivers/infiniband/ulp/isert/ib_isert.c isert_completion_put(tx_desc, isert_cmd, ib_dev, true); ib_dev 1809 drivers/infiniband/ulp/isert/ib_isert.c isert_unmap_tx_desc(tx_desc, ib_dev); ib_dev 1816 drivers/infiniband/ulp/isert/ib_isert.c isert_completion_put(tx_desc, isert_cmd, ib_dev, false); ib_dev 1859 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 1871 drivers/infiniband/ulp/isert/ib_isert.c isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, ib_dev 1874 drivers/infiniband/ulp/isert/ib_isert.c if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) ib_dev 1990 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 2000 drivers/infiniband/ulp/isert/ib_isert.c isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, ib_dev 2003 drivers/infiniband/ulp/isert/ib_isert.c if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) ib_dev 2039 drivers/infiniband/ulp/isert/ib_isert.c struct ib_device *ib_dev = device->ib_device; ib_dev 2043 drivers/infiniband/ulp/isert/ib_isert.c isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev, ib_dev 2045 drivers/infiniband/ulp/isert/ib_isert.c if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma)) ib_dev 56 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = NULL; ib_dev 58 drivers/target/target_core_iblock.c ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL); ib_dev 59 drivers/target/target_core_iblock.c if (!ib_dev) { ib_dev 66 drivers/target/target_core_iblock.c return &ib_dev->dev; ib_dev 71 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 79 drivers/target/target_core_iblock.c if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { ib_dev 84 drivers/target/target_core_iblock.c ret = bioset_init(&ib_dev->ibd_bio_set, IBLOCK_BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); ib_dev 91 drivers/target/target_core_iblock.c ib_dev->ibd_udev_path); ib_dev 94 drivers/target/target_core_iblock.c if (!ib_dev->ibd_readonly) ib_dev 99 drivers/target/target_core_iblock.c bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev); ib_dev 104 drivers/target/target_core_iblock.c ib_dev->ibd_bd = bd; ib_dev 131 drivers/target/target_core_iblock.c struct bio_set *bs = &ib_dev->ibd_bio_set; ib_dev 162 drivers/target/target_core_iblock.c blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); ib_dev 164 drivers/target/target_core_iblock.c bioset_exit(&ib_dev->ibd_bio_set); ib_dev 172 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 174 drivers/target/target_core_iblock.c kfree(ib_dev); ib_dev 184 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 186 drivers/target/target_core_iblock.c if (ib_dev->ibd_bd != NULL) ib_dev 187 drivers/target/target_core_iblock.c blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); ib_dev 188 drivers/target/target_core_iblock.c bioset_exit(&ib_dev->ibd_bio_set); ib_dev 310 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); ib_dev 320 drivers/target/target_core_iblock.c bio = bio_alloc_bioset(GFP_NOIO, sg_num, &ib_dev->ibd_bio_set); ib_dev 326 drivers/target/target_core_iblock.c bio_set_dev(bio, ib_dev->ibd_bd); ib_dev 370 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev); ib_dev 383 drivers/target/target_core_iblock.c bio_set_dev(bio, ib_dev->ibd_bd); ib_dev 534 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 553 drivers/target/target_core_iblock.c if (ib_dev->ibd_bd) { ib_dev 559 drivers/target/target_core_iblock.c if (match_strlcpy(ib_dev->ibd_udev_path, &args[0], ib_dev 565 drivers/target/target_core_iblock.c ib_dev->ibd_udev_path); ib_dev 566 drivers/target/target_core_iblock.c ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH; ib_dev 581 drivers/target/target_core_iblock.c ib_dev->ibd_readonly = tmp_readonly; ib_dev 582 drivers/target/target_core_iblock.c pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly); ib_dev 598 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 599 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 606 drivers/target/target_core_iblock.c if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH) ib_dev 608 drivers/target/target_core_iblock.c ib_dev->ibd_udev_path); ib_dev 609 drivers/target/target_core_iblock.c bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly); ib_dev 615 drivers/target/target_core_iblock.c "" : (bd->bd_holder == ib_dev) ? ib_dev 631 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 635 drivers/target/target_core_iblock.c bi = bdev_get_integrity(ib_dev->ibd_bd); ib_dev 696 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 697 drivers/target/target_core_iblock.c struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); ib_dev 794 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 795 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 803 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 804 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 817 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 818 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 826 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 827 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 834 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 835 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 855 drivers/target/target_core_iblock.c struct iblock_dev *ib_dev = IBLOCK_DEV(dev); ib_dev 856 drivers/target/target_core_iblock.c struct block_device *bd = ib_dev->ibd_bd; ib_dev 46 include/rdma/ib_cache.h const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev, ib_dev 2159 include/rdma/ib_verbs.h struct ib_device *ib_dev; ib_dev 2245 include/rdma/ib_verbs.h #define rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, gfp) \ ib_dev 2246 include/rdma/ib_verbs.h ((struct ib_type *)kzalloc(ib_dev->ops.size_##ib_type, gfp)) ib_dev 2248 include/rdma/ib_verbs.h #define rdma_zalloc_drv_obj(ib_dev, ib_type) \ ib_dev 2249 include/rdma/ib_verbs.h rdma_zalloc_drv_obj_gfp(ib_dev, ib_type, GFP_KERNEL) ib_dev 2738 include/rdma/ib_verbs.h void ib_unregister_device_queued(struct ib_device *ib_dev); ib_dev 4319 include/rdma/ib_verbs.h int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev, ib_dev 125 include/rdma/uverbs_std_types.h struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev) ib_dev 131 include/rdma/uverbs_std_types.h *ib_dev = attrs->context->device; ib_dev 140 include/rdma/uverbs_std_types.h struct ib_device *ib_dev, ib_dev 144 include/rdma/uverbs_std_types.h action->device = ib_dev; ib_dev 53 include/trace/events/ib_umad.h __entry->dev_index = file->port->ib_dev->index; ib_dev 534 net/smc/af_smc.c if (!ini->ib_dev) ib_dev 424 net/smc/smc_clc.c memcpy(&pclc.lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1], ib_dev 239 net/smc/smc_core.c get_device(&ini->ib_dev->ibdev->dev); ib_dev 248 net/smc/smc_core.c lnk->smcibdev = ini->ib_dev; ib_dev 251 net/smc/smc_core.c ini->ib_dev->pattr[ini->ib_port - 1].active_mtu; ib_dev 252 net/smc/smc_core.c if (!ini->ib_dev->initialized) ib_dev 253 net/smc/smc_core.c smc_ib_setup_per_ibdev(ini->ib_dev); ib_dev 241 net/smc/smc_core.h struct smc_ib_device *ib_dev; ib_dev 201 net/smc/smc_pnet.c struct smc_ib_device *ib_dev = new_pnetelem->smcibdev; ib_dev 205 net/smc/smc_pnet.c if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) { ib_dev 206 net/smc/smc_pnet.c memcpy(ib_dev->pnetid[ib_port - 1], ib_dev 208 net/smc/smc_pnet.c ib_dev->pnetid_by_user[ib_port - 1] = true; ib_dev 784 net/smc/smc_pnet.c ini->ib_dev = ibdev; ib_dev 823 net/smc/smc_pnet.c ini->ib_dev = ibdev; ib_dev 863 net/smc/smc_pnet.c ini->ib_dev = NULL;