Lines Matching refs:device
85 static int ib_device_check_mandatory(struct ib_device *device) in ib_device_check_mandatory() argument
116 if (!*(void **) ((void *) device + mandatory_table[i].offset)) { in ib_device_check_mandatory()
118 device->name, mandatory_table[i].name); in ib_device_check_mandatory()
128 struct ib_device *device; in __ib_device_get_by_name() local
130 list_for_each_entry(device, &device_list, core_list) in __ib_device_get_by_name()
131 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) in __ib_device_get_by_name()
132 return device; in __ib_device_get_by_name()
142 struct ib_device *device; in alloc_name() local
149 list_for_each_entry(device, &device_list, core_list) { in alloc_name()
150 if (!sscanf(device->name, name, &i)) in alloc_name()
155 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) in alloc_name()
170 static void ib_device_release(struct device *device) in ib_device_release() argument
172 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_release()
179 static int ib_device_uevent(struct device *device, in ib_device_uevent() argument
182 struct ib_device *dev = container_of(device, struct ib_device, dev); in ib_device_uevent()
212 struct ib_device *device; in ib_alloc_device() local
217 device = kzalloc(size, GFP_KERNEL); in ib_alloc_device()
218 if (!device) in ib_alloc_device()
221 device->dev.class = &ib_class; in ib_alloc_device()
222 device_initialize(&device->dev); in ib_alloc_device()
224 dev_set_drvdata(&device->dev, device); in ib_alloc_device()
226 INIT_LIST_HEAD(&device->event_handler_list); in ib_alloc_device()
227 spin_lock_init(&device->event_handler_lock); in ib_alloc_device()
228 spin_lock_init(&device->client_data_lock); in ib_alloc_device()
229 INIT_LIST_HEAD(&device->client_data_list); in ib_alloc_device()
230 INIT_LIST_HEAD(&device->port_list); in ib_alloc_device()
232 return device; in ib_alloc_device()
242 void ib_dealloc_device(struct ib_device *device) in ib_dealloc_device() argument
244 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED && in ib_dealloc_device()
245 device->reg_state != IB_DEV_UNINITIALIZED); in ib_dealloc_device()
246 kobject_put(&device->dev.kobj); in ib_dealloc_device()
250 static int add_client_context(struct ib_device *device, struct ib_client *client) in add_client_context() argument
258 device->name, client->name); in add_client_context()
267 spin_lock_irqsave(&device->client_data_lock, flags); in add_client_context()
268 list_add(&context->list, &device->client_data_list); in add_client_context()
269 spin_unlock_irqrestore(&device->client_data_lock, flags); in add_client_context()
281 static int read_port_immutable(struct ib_device *device) in read_port_immutable() argument
284 u8 start_port = rdma_start_port(device); in read_port_immutable()
285 u8 end_port = rdma_end_port(device); in read_port_immutable()
295 device->port_immutable = kzalloc(sizeof(*device->port_immutable) in read_port_immutable()
298 if (!device->port_immutable) in read_port_immutable()
302 ret = device->get_port_immutable(device, port, in read_port_immutable()
303 &device->port_immutable[port]); in read_port_immutable()
307 if (verify_immutable(device, port)) in read_port_immutable()
322 int ib_register_device(struct ib_device *device, in ib_register_device() argument
331 if (strchr(device->name, '%')) { in ib_register_device()
332 ret = alloc_name(device->name); in ib_register_device()
337 if (ib_device_check_mandatory(device)) { in ib_register_device()
342 ret = read_port_immutable(device); in ib_register_device()
345 device->name); in ib_register_device()
349 ret = ib_cache_setup_one(device); in ib_register_device()
355 ret = ib_device_register_sysfs(device, port_callback); in ib_register_device()
358 device->name); in ib_register_device()
359 ib_cache_cleanup_one(device); in ib_register_device()
363 device->reg_state = IB_DEV_REGISTERED; in ib_register_device()
366 if (client->add && !add_client_context(device, client)) in ib_register_device()
367 client->add(device); in ib_register_device()
370 list_add_tail(&device->core_list, &device_list); in ib_register_device()
384 void ib_unregister_device(struct ib_device *device) in ib_unregister_device() argument
392 list_del(&device->core_list); in ib_unregister_device()
393 spin_lock_irqsave(&device->client_data_lock, flags); in ib_unregister_device()
394 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) in ib_unregister_device()
396 spin_unlock_irqrestore(&device->client_data_lock, flags); in ib_unregister_device()
399 list_for_each_entry_safe(context, tmp, &device->client_data_list, in ib_unregister_device()
402 context->client->remove(device, context->data); in ib_unregister_device()
408 ib_device_unregister_sysfs(device); in ib_unregister_device()
409 ib_cache_cleanup_one(device); in ib_unregister_device()
412 spin_lock_irqsave(&device->client_data_lock, flags); in ib_unregister_device()
413 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) in ib_unregister_device()
415 spin_unlock_irqrestore(&device->client_data_lock, flags); in ib_unregister_device()
418 device->reg_state = IB_DEV_UNREGISTERED; in ib_unregister_device()
437 struct ib_device *device; in ib_register_client() local
441 list_for_each_entry(device, &device_list, core_list) in ib_register_client()
442 if (client->add && !add_client_context(device, client)) in ib_register_client()
443 client->add(device); in ib_register_client()
466 struct ib_device *device; in ib_unregister_client() local
475 list_for_each_entry(device, &device_list, core_list) { in ib_unregister_client()
479 spin_lock_irqsave(&device->client_data_lock, flags); in ib_unregister_client()
480 list_for_each_entry_safe(context, tmp, &device->client_data_list, list) in ib_unregister_client()
486 spin_unlock_irqrestore(&device->client_data_lock, flags); in ib_unregister_client()
490 client->remove(device, found_context ? in ib_unregister_client()
495 device->name, client->name); in ib_unregister_client()
500 spin_lock_irqsave(&device->client_data_lock, flags); in ib_unregister_client()
503 spin_unlock_irqrestore(&device->client_data_lock, flags); in ib_unregister_client()
519 void *ib_get_client_data(struct ib_device *device, struct ib_client *client) in ib_get_client_data() argument
525 spin_lock_irqsave(&device->client_data_lock, flags); in ib_get_client_data()
526 list_for_each_entry(context, &device->client_data_list, list) in ib_get_client_data()
531 spin_unlock_irqrestore(&device->client_data_lock, flags); in ib_get_client_data()
546 void ib_set_client_data(struct ib_device *device, struct ib_client *client, in ib_set_client_data() argument
552 spin_lock_irqsave(&device->client_data_lock, flags); in ib_set_client_data()
553 list_for_each_entry(context, &device->client_data_list, list) in ib_set_client_data()
560 device->name, client->name); in ib_set_client_data()
563 spin_unlock_irqrestore(&device->client_data_lock, flags); in ib_set_client_data()
580 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); in ib_register_event_handler()
582 &event_handler->device->event_handler_list); in ib_register_event_handler()
583 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); in ib_register_event_handler()
600 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); in ib_unregister_event_handler()
602 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); in ib_unregister_event_handler()
621 spin_lock_irqsave(&event->device->event_handler_lock, flags); in ib_dispatch_event()
623 list_for_each_entry(handler, &event->device->event_handler_list, list) in ib_dispatch_event()
626 spin_unlock_irqrestore(&event->device->event_handler_lock, flags); in ib_dispatch_event()
638 int ib_query_device(struct ib_device *device, in ib_query_device() argument
645 return device->query_device(device, device_attr, &uhw); in ib_query_device()
658 int ib_query_port(struct ib_device *device, in ib_query_port() argument
662 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_query_port()
665 return device->query_port(device, port_num, port_attr); in ib_query_port()
680 int ib_query_gid(struct ib_device *device, in ib_query_gid() argument
684 if (rdma_cap_roce_gid_table(device, port_num)) in ib_query_gid()
685 return ib_get_cached_gid(device, port_num, index, gid, attr); in ib_query_gid()
690 return device->query_gid(device, port_num, index, gid); in ib_query_gid()
769 int ib_query_pkey(struct ib_device *device, in ib_query_pkey() argument
772 return device->query_pkey(device, port_num, index, pkey); in ib_query_pkey()
785 int ib_modify_device(struct ib_device *device, in ib_modify_device() argument
789 if (!device->modify_device) in ib_modify_device()
792 return device->modify_device(device, device_modify_mask, in ib_modify_device()
808 int ib_modify_port(struct ib_device *device, in ib_modify_port() argument
812 if (!device->modify_port) in ib_modify_port()
815 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) in ib_modify_port()
818 return device->modify_port(device, port_num, port_modify_mask, in ib_modify_port()
833 int ib_find_gid(struct ib_device *device, union ib_gid *gid, in ib_find_gid() argument
839 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { in ib_find_gid()
840 if (rdma_cap_roce_gid_table(device, port)) { in ib_find_gid()
841 if (!ib_find_cached_gid_by_port(device, gid, port, in ib_find_gid()
848 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { in ib_find_gid()
849 ret = ib_query_gid(device, port, i, &tmp_gid, NULL); in ib_find_gid()
873 int ib_find_pkey(struct ib_device *device, in ib_find_pkey() argument
880 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { in ib_find_pkey()
881 ret = ib_query_pkey(device, port_num, i, &tmp_pkey); in ib_find_pkey()