Lines Matching refs:group
92 struct vfio_group *group; member
149 static int vfio_alloc_group_minor(struct vfio_group *group) in vfio_alloc_group_minor() argument
151 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL); in vfio_alloc_group_minor()
161 static void vfio_group_get(struct vfio_group *group);
187 static void vfio_group_unlock_and_free(struct vfio_group *group) in vfio_group_unlock_and_free() argument
194 iommu_group_unregister_notifier(group->iommu_group, &group->nb); in vfio_group_unlock_and_free()
195 kfree(group); in vfio_group_unlock_and_free()
203 struct vfio_group *group, *tmp; in vfio_create_group() local
207 group = kzalloc(sizeof(*group), GFP_KERNEL); in vfio_create_group()
208 if (!group) in vfio_create_group()
211 kref_init(&group->kref); in vfio_create_group()
212 INIT_LIST_HEAD(&group->device_list); in vfio_create_group()
213 mutex_init(&group->device_lock); in vfio_create_group()
214 INIT_LIST_HEAD(&group->unbound_list); in vfio_create_group()
215 mutex_init(&group->unbound_lock); in vfio_create_group()
216 atomic_set(&group->container_users, 0); in vfio_create_group()
217 atomic_set(&group->opened, 0); in vfio_create_group()
218 group->iommu_group = iommu_group; in vfio_create_group()
220 group->nb.notifier_call = vfio_iommu_group_notifier; in vfio_create_group()
229 ret = iommu_group_register_notifier(iommu_group, &group->nb); in vfio_create_group()
231 kfree(group); in vfio_create_group()
241 vfio_group_unlock_and_free(group); in vfio_create_group()
246 minor = vfio_alloc_group_minor(group); in vfio_create_group()
248 vfio_group_unlock_and_free(group); in vfio_create_group()
254 group, "%d", iommu_group_id(iommu_group)); in vfio_create_group()
257 vfio_group_unlock_and_free(group); in vfio_create_group()
261 group->minor = minor; in vfio_create_group()
262 group->dev = dev; in vfio_create_group()
264 list_add(&group->vfio_next, &vfio.group_list); in vfio_create_group()
268 return group; in vfio_create_group()
274 struct vfio_group *group = container_of(kref, struct vfio_group, kref); in vfio_group_release() local
276 struct iommu_group *iommu_group = group->iommu_group; in vfio_group_release()
278 WARN_ON(!list_empty(&group->device_list)); in vfio_group_release()
281 &group->unbound_list, unbound_next) { in vfio_group_release()
286 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); in vfio_group_release()
287 list_del(&group->vfio_next); in vfio_group_release()
288 vfio_free_group_minor(group->minor); in vfio_group_release()
289 vfio_group_unlock_and_free(group); in vfio_group_release()
293 static void vfio_group_put(struct vfio_group *group) in vfio_group_put() argument
295 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); in vfio_group_put()
299 static void vfio_group_get(struct vfio_group *group) in vfio_group_get() argument
301 kref_get(&group->kref); in vfio_group_get()
308 static struct vfio_group *vfio_group_try_get(struct vfio_group *group) in vfio_group_try_get() argument
310 struct vfio_group *target = group; in vfio_group_try_get()
313 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_try_get()
314 if (group == target) { in vfio_group_try_get()
315 vfio_group_get(group); in vfio_group_try_get()
317 return group; in vfio_group_try_get()
328 struct vfio_group *group; in vfio_group_get_from_iommu() local
331 list_for_each_entry(group, &vfio.group_list, vfio_next) { in vfio_group_get_from_iommu()
332 if (group->iommu_group == iommu_group) { in vfio_group_get_from_iommu()
333 vfio_group_get(group); in vfio_group_get_from_iommu()
335 return group; in vfio_group_get_from_iommu()
345 struct vfio_group *group; in vfio_group_get_from_minor() local
348 group = idr_find(&vfio.group_idr, minor); in vfio_group_get_from_minor()
349 if (!group) { in vfio_group_get_from_minor()
353 vfio_group_get(group); in vfio_group_get_from_minor()
356 return group; in vfio_group_get_from_minor()
363 struct vfio_device *vfio_group_create_device(struct vfio_group *group, in vfio_group_create_device() argument
376 device->group = group; in vfio_group_create_device()
382 vfio_group_get(group); in vfio_group_create_device()
384 mutex_lock(&group->device_lock); in vfio_group_create_device()
385 list_add(&device->group_next, &group->device_list); in vfio_group_create_device()
386 mutex_unlock(&group->device_lock); in vfio_group_create_device()
395 struct vfio_group *group = device->group; in vfio_device_release() local
398 mutex_unlock(&group->device_lock); in vfio_device_release()
411 struct vfio_group *group = device->group; in vfio_device_put() local
412 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); in vfio_device_put()
413 vfio_group_put(group); in vfio_device_put()
419 vfio_group_get(device->group); in vfio_device_get()
423 static struct vfio_device *vfio_group_get_device(struct vfio_group *group, in vfio_group_get_device() argument
428 mutex_lock(&group->device_lock); in vfio_group_get_device()
429 list_for_each_entry(device, &group->device_list, group_next) { in vfio_group_get_device()
432 mutex_unlock(&group->device_lock); in vfio_group_get_device()
436 mutex_unlock(&group->device_lock); in vfio_group_get_device()
474 struct vfio_group *group = data; in vfio_dev_viable() local
480 mutex_lock(&group->unbound_lock); in vfio_dev_viable()
481 list_for_each_entry(unbound, &group->unbound_list, unbound_next) { in vfio_dev_viable()
487 mutex_unlock(&group->unbound_lock); in vfio_dev_viable()
492 device = vfio_group_get_device(group, dev); in vfio_dev_viable()
504 static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev) in vfio_group_nb_add_dev() argument
509 device = vfio_group_get_device(group, dev); in vfio_group_nb_add_dev()
516 if (!atomic_read(&group->container_users)) in vfio_group_nb_add_dev()
521 iommu_group_id(group->iommu_group)); in vfio_group_nb_add_dev()
526 static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev) in vfio_group_nb_verify() argument
529 if (!atomic_read(&group->container_users)) in vfio_group_nb_verify()
532 return vfio_dev_viable(dev, group); in vfio_group_nb_verify()
538 struct vfio_group *group = container_of(nb, struct vfio_group, nb); in vfio_iommu_group_notifier() local
546 group = vfio_group_try_get(group); in vfio_iommu_group_notifier()
547 if (!group) in vfio_iommu_group_notifier()
552 vfio_group_nb_add_dev(group, dev); in vfio_iommu_group_notifier()
566 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
571 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
572 BUG_ON(vfio_group_nb_verify(group, dev)); in vfio_iommu_group_notifier()
577 iommu_group_id(group->iommu_group), dev->driver->name); in vfio_iommu_group_notifier()
582 iommu_group_id(group->iommu_group)); in vfio_iommu_group_notifier()
591 mutex_lock(&group->unbound_lock); in vfio_iommu_group_notifier()
593 &group->unbound_list, unbound_next) { in vfio_iommu_group_notifier()
600 mutex_unlock(&group->unbound_lock); in vfio_iommu_group_notifier()
604 vfio_group_put(group); in vfio_iommu_group_notifier()
615 struct vfio_group *group; in vfio_add_group_dev() local
622 group = vfio_group_get_from_iommu(iommu_group); in vfio_add_group_dev()
623 if (!group) { in vfio_add_group_dev()
624 group = vfio_create_group(iommu_group); in vfio_add_group_dev()
625 if (IS_ERR(group)) { in vfio_add_group_dev()
627 return PTR_ERR(group); in vfio_add_group_dev()
637 device = vfio_group_get_device(group, dev); in vfio_add_group_dev()
642 vfio_group_put(group); in vfio_add_group_dev()
646 device = vfio_group_create_device(group, dev, ops, device_data); in vfio_add_group_dev()
648 vfio_group_put(group); in vfio_add_group_dev()
657 vfio_group_put(group); in vfio_add_group_dev()
691 static bool vfio_dev_present(struct vfio_group *group, struct device *dev) in vfio_dev_present() argument
695 device = vfio_group_get_device(group, dev); in vfio_dev_present()
709 struct vfio_group *group = device->group; in vfio_del_group_dev() local
720 vfio_group_get(group); in vfio_del_group_dev()
734 mutex_lock(&group->unbound_lock); in vfio_del_group_dev()
735 list_add(&unbound->unbound_next, &group->unbound_list); in vfio_del_group_dev()
736 mutex_unlock(&group->unbound_lock); in vfio_del_group_dev()
751 device = vfio_group_get_device(group, dev); in vfio_del_group_dev()
762 !vfio_dev_present(group, dev), HZ * 10); in vfio_del_group_dev()
765 !vfio_dev_present(group, dev), HZ * 10); in vfio_del_group_dev()
777 vfio_group_put(group); in vfio_del_group_dev()
835 struct vfio_group *group; in __vfio_container_attach_groups() local
838 list_for_each_entry(group, &container->group_list, container_next) { in __vfio_container_attach_groups()
839 ret = driver->ops->attach_group(data, group->iommu_group); in __vfio_container_attach_groups()
847 list_for_each_entry_continue_reverse(group, &container->group_list, in __vfio_container_attach_groups()
849 driver->ops->detach_group(data, group->iommu_group); in __vfio_container_attach_groups()
1072 static void __vfio_group_unset_container(struct vfio_group *group) in __vfio_group_unset_container() argument
1074 struct vfio_container *container = group->container; in __vfio_group_unset_container()
1082 group->iommu_group); in __vfio_group_unset_container()
1084 group->container = NULL; in __vfio_group_unset_container()
1085 list_del(&group->container_next); in __vfio_group_unset_container()
1106 static int vfio_group_unset_container(struct vfio_group *group) in vfio_group_unset_container() argument
1108 int users = atomic_cmpxchg(&group->container_users, 1, 0); in vfio_group_unset_container()
1115 __vfio_group_unset_container(group); in vfio_group_unset_container()
1126 static void vfio_group_try_dissolve_container(struct vfio_group *group) in vfio_group_try_dissolve_container() argument
1128 if (0 == atomic_dec_if_positive(&group->container_users)) in vfio_group_try_dissolve_container()
1129 __vfio_group_unset_container(group); in vfio_group_try_dissolve_container()
1132 static int vfio_group_set_container(struct vfio_group *group, int container_fd) in vfio_group_set_container() argument
1139 if (atomic_read(&group->container_users)) in vfio_group_set_container()
1160 group->iommu_group); in vfio_group_set_container()
1165 group->container = container; in vfio_group_set_container()
1166 list_add(&group->container_next, &container->group_list); in vfio_group_set_container()
1170 atomic_inc(&group->container_users); in vfio_group_set_container()
1178 static bool vfio_group_viable(struct vfio_group *group) in vfio_group_viable() argument
1180 return (iommu_group_for_each_dev(group->iommu_group, in vfio_group_viable()
1181 group, vfio_dev_viable) == 0); in vfio_group_viable()
1186 static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) in vfio_group_get_device_fd() argument
1192 if (0 == atomic_read(&group->container_users) || in vfio_group_get_device_fd()
1193 !group->container->iommu_driver || !vfio_group_viable(group)) in vfio_group_get_device_fd()
1196 mutex_lock(&group->device_lock); in vfio_group_get_device_fd()
1197 list_for_each_entry(device, &group->device_list, group_next) { in vfio_group_get_device_fd()
1231 atomic_inc(&group->container_users); in vfio_group_get_device_fd()
1236 mutex_unlock(&group->device_lock); in vfio_group_get_device_fd()
1244 struct vfio_group *group = filep->private_data; in vfio_group_fops_unl_ioctl() local
1263 if (vfio_group_viable(group)) in vfio_group_fops_unl_ioctl()
1266 if (group->container) in vfio_group_fops_unl_ioctl()
1285 ret = vfio_group_set_container(group, fd); in vfio_group_fops_unl_ioctl()
1289 ret = vfio_group_unset_container(group); in vfio_group_fops_unl_ioctl()
1299 ret = vfio_group_get_device_fd(group, buf); in vfio_group_fops_unl_ioctl()
1319 struct vfio_group *group; in vfio_group_fops_open() local
1322 group = vfio_group_get_from_minor(iminor(inode)); in vfio_group_fops_open()
1323 if (!group) in vfio_group_fops_open()
1327 opened = atomic_cmpxchg(&group->opened, 0, 1); in vfio_group_fops_open()
1329 vfio_group_put(group); in vfio_group_fops_open()
1334 if (group->container) { in vfio_group_fops_open()
1335 atomic_dec(&group->opened); in vfio_group_fops_open()
1336 vfio_group_put(group); in vfio_group_fops_open()
1340 filep->private_data = group; in vfio_group_fops_open()
1347 struct vfio_group *group = filep->private_data; in vfio_group_fops_release() local
1351 vfio_group_try_dissolve_container(group); in vfio_group_fops_release()
1353 atomic_dec(&group->opened); in vfio_group_fops_release()
1355 vfio_group_put(group); in vfio_group_fops_release()
1379 vfio_group_try_dissolve_container(device->group); in vfio_device_fops_release()
1480 struct vfio_group *group = filep->private_data; in vfio_group_get_external_user() local
1485 if (!atomic_inc_not_zero(&group->container_users)) in vfio_group_get_external_user()
1488 if (!group->container->iommu_driver || in vfio_group_get_external_user()
1489 !vfio_group_viable(group)) { in vfio_group_get_external_user()
1490 atomic_dec(&group->container_users); in vfio_group_get_external_user()
1494 vfio_group_get(group); in vfio_group_get_external_user()
1496 return group; in vfio_group_get_external_user()
1500 void vfio_group_put_external_user(struct vfio_group *group) in vfio_group_put_external_user() argument
1502 vfio_group_put(group); in vfio_group_put_external_user()
1503 vfio_group_try_dissolve_container(group); in vfio_group_put_external_user()
1507 int vfio_external_user_iommu_id(struct vfio_group *group) in vfio_external_user_iommu_id() argument
1509 return iommu_group_id(group->iommu_group); in vfio_external_user_iommu_id()
1513 long vfio_external_check_extension(struct vfio_group *group, unsigned long arg) in vfio_external_check_extension() argument
1515 return vfio_ioctl_check_extension(group->container, arg); in vfio_external_check_extension()