Lines Matching refs:device

54 	struct ib_device  *device;  member
170 event.device = ib_dev; in write_gid()
725 int ib_get_cached_gid(struct ib_device *device, in ib_get_cached_gid() argument
731 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_get_cached_gid()
734 return __ib_cache_gid_get(device, port_num, index, gid, gid_attr); in ib_get_cached_gid()
738 int ib_find_cached_gid(struct ib_device *device, in ib_find_cached_gid() argument
744 return ib_cache_gid_find(device, gid, ndev, port_num, index); in ib_find_cached_gid()
748 int ib_find_gid_by_filter(struct ib_device *device, in ib_find_gid_by_filter() argument
757 if (!rdma_cap_roce_gid_table(device, port_num) && filter) in ib_find_gid_by_filter()
760 return ib_cache_gid_find_by_filter(device, gid, in ib_find_gid_by_filter()
766 int ib_get_cached_pkey(struct ib_device *device, in ib_get_cached_pkey() argument
775 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_get_cached_pkey()
778 read_lock_irqsave(&device->cache.lock, flags); in ib_get_cached_pkey()
780 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; in ib_get_cached_pkey()
787 read_unlock_irqrestore(&device->cache.lock, flags); in ib_get_cached_pkey()
793 int ib_find_cached_pkey(struct ib_device *device, in ib_find_cached_pkey() argument
804 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_find_cached_pkey()
807 read_lock_irqsave(&device->cache.lock, flags); in ib_find_cached_pkey()
809 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; in ib_find_cached_pkey()
828 read_unlock_irqrestore(&device->cache.lock, flags); in ib_find_cached_pkey()
834 int ib_find_exact_cached_pkey(struct ib_device *device, in ib_find_exact_cached_pkey() argument
844 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_find_exact_cached_pkey()
847 read_lock_irqsave(&device->cache.lock, flags); in ib_find_exact_cached_pkey()
849 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; in ib_find_exact_cached_pkey()
860 read_unlock_irqrestore(&device->cache.lock, flags); in ib_find_exact_cached_pkey()
866 int ib_get_cached_lmc(struct ib_device *device, in ib_get_cached_lmc() argument
873 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_get_cached_lmc()
876 read_lock_irqsave(&device->cache.lock, flags); in ib_get_cached_lmc()
877 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; in ib_get_cached_lmc()
878 read_unlock_irqrestore(&device->cache.lock, flags); in ib_get_cached_lmc()
884 static void ib_cache_update(struct ib_device *device, in ib_cache_update() argument
896 struct ib_gid_table **ports_table = device->cache.gid_cache; in ib_cache_update()
898 rdma_cap_roce_gid_table(device, port); in ib_cache_update()
900 if (port < rdma_start_port(device) || port > rdma_end_port(device)) in ib_cache_update()
903 table = ports_table[port - rdma_start_port(device)]; in ib_cache_update()
909 ret = ib_query_port(device, port, tprops); in ib_cache_update()
912 ret, device->name); in ib_cache_update()
933 ret = ib_query_pkey(device, port, i, pkey_cache->table + i); in ib_cache_update()
936 ret, device->name, i); in ib_cache_update()
943 ret = ib_query_gid(device, port, i, in ib_cache_update()
947 ret, device->name, i); in ib_cache_update()
953 write_lock_irq(&device->cache.lock); in ib_cache_update()
955 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; in ib_cache_update()
957 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; in ib_cache_update()
960 modify_gid(device, port, table, i, gid_cache->table + i, in ib_cache_update()
965 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; in ib_cache_update()
967 write_unlock_irq(&device->cache.lock); in ib_cache_update()
985 ib_cache_update(work->device, work->port_num); in ib_cache_task()
1004 work->device = event->device; in ib_cache_event()
1011 int ib_cache_setup_one(struct ib_device *device) in ib_cache_setup_one() argument
1016 rwlock_init(&device->cache.lock); in ib_cache_setup_one()
1018 device->cache.pkey_cache = in ib_cache_setup_one()
1019 kzalloc(sizeof *device->cache.pkey_cache * in ib_cache_setup_one()
1020 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); in ib_cache_setup_one()
1021 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * in ib_cache_setup_one()
1022 (rdma_end_port(device) - in ib_cache_setup_one()
1023 rdma_start_port(device) + 1), in ib_cache_setup_one()
1025 if (!device->cache.pkey_cache || in ib_cache_setup_one()
1026 !device->cache.lmc_cache) { in ib_cache_setup_one()
1028 "for %s\n", device->name); in ib_cache_setup_one()
1032 err = gid_table_setup_one(device); in ib_cache_setup_one()
1037 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) in ib_cache_setup_one()
1038 ib_cache_update(device, p + rdma_start_port(device)); in ib_cache_setup_one()
1040 INIT_IB_EVENT_HANDLER(&device->cache.event_handler, in ib_cache_setup_one()
1041 device, ib_cache_event); in ib_cache_setup_one()
1042 err = ib_register_event_handler(&device->cache.event_handler); in ib_cache_setup_one()
1049 gid_table_cleanup_one(device); in ib_cache_setup_one()
1053 void ib_cache_release_one(struct ib_device *device) in ib_cache_release_one() argument
1063 if (device->cache.pkey_cache) in ib_cache_release_one()
1065 p <= rdma_end_port(device) - rdma_start_port(device); ++p) in ib_cache_release_one()
1066 kfree(device->cache.pkey_cache[p]); in ib_cache_release_one()
1068 gid_table_release_one(device); in ib_cache_release_one()
1069 kfree(device->cache.pkey_cache); in ib_cache_release_one()
1070 kfree(device->cache.lmc_cache); in ib_cache_release_one()
1073 void ib_cache_cleanup_one(struct ib_device *device) in ib_cache_cleanup_one() argument
1082 ib_unregister_event_handler(&device->cache.event_handler); in ib_cache_cleanup_one()
1084 gid_table_cleanup_one(device); in ib_cache_cleanup_one()