Lines Matching refs:group

66 	ssize_t (*show)(struct iommu_group *group, char *buf);
67 ssize_t (*store)(struct iommu_group *group,
85 struct iommu_group *group);
87 struct iommu_group *group);
93 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_show() local
97 ret = attr->show(group, buf); in iommu_group_attr_show()
106 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_attr_store() local
110 ret = attr->store(group, buf, count); in iommu_group_attr_store()
119 static int iommu_group_create_file(struct iommu_group *group, in iommu_group_create_file() argument
122 return sysfs_create_file(&group->kobj, &attr->attr); in iommu_group_create_file()
125 static void iommu_group_remove_file(struct iommu_group *group, in iommu_group_remove_file() argument
128 sysfs_remove_file(&group->kobj, &attr->attr); in iommu_group_remove_file()
131 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) in iommu_group_show_name() argument
133 return sprintf(buf, "%s\n", group->name); in iommu_group_show_name()
140 struct iommu_group *group = to_iommu_group(kobj); in iommu_group_release() local
142 pr_debug("Releasing group %d\n", group->id); in iommu_group_release()
144 if (group->iommu_data_release) in iommu_group_release()
145 group->iommu_data_release(group->iommu_data); in iommu_group_release()
148 ida_remove(&iommu_group_ida, group->id); in iommu_group_release()
151 if (group->default_domain) in iommu_group_release()
152 iommu_domain_free(group->default_domain); in iommu_group_release()
154 kfree(group->name); in iommu_group_release()
155 kfree(group); in iommu_group_release()
177 struct iommu_group *group; in iommu_group_alloc() local
180 group = kzalloc(sizeof(*group), GFP_KERNEL); in iommu_group_alloc()
181 if (!group) in iommu_group_alloc()
184 group->kobj.kset = iommu_group_kset; in iommu_group_alloc()
185 mutex_init(&group->mutex); in iommu_group_alloc()
186 INIT_LIST_HEAD(&group->devices); in iommu_group_alloc()
187 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); in iommu_group_alloc()
193 kfree(group); in iommu_group_alloc()
198 if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id)) in iommu_group_alloc()
203 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype, in iommu_group_alloc()
204 NULL, "%d", group->id); in iommu_group_alloc()
207 ida_remove(&iommu_group_ida, group->id); in iommu_group_alloc()
209 kfree(group); in iommu_group_alloc()
213 group->devices_kobj = kobject_create_and_add("devices", &group->kobj); in iommu_group_alloc()
214 if (!group->devices_kobj) { in iommu_group_alloc()
215 kobject_put(&group->kobj); /* triggers .release & free */ in iommu_group_alloc()
224 kobject_put(&group->kobj); in iommu_group_alloc()
226 pr_debug("Allocated group %d\n", group->id); in iommu_group_alloc()
228 return group; in iommu_group_alloc()
235 struct iommu_group *group; in iommu_group_get_by_id() local
251 group = container_of(group_kobj, struct iommu_group, kobj); in iommu_group_get_by_id()
252 BUG_ON(group->id != id); in iommu_group_get_by_id()
254 kobject_get(group->devices_kobj); in iommu_group_get_by_id()
255 kobject_put(&group->kobj); in iommu_group_get_by_id()
257 return group; in iommu_group_get_by_id()
269 void *iommu_group_get_iommudata(struct iommu_group *group) in iommu_group_get_iommudata() argument
271 return group->iommu_data; in iommu_group_get_iommudata()
285 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, in iommu_group_set_iommudata() argument
288 group->iommu_data = iommu_data; in iommu_group_set_iommudata()
289 group->iommu_data_release = release; in iommu_group_set_iommudata()
301 int iommu_group_set_name(struct iommu_group *group, const char *name) in iommu_group_set_name() argument
305 if (group->name) { in iommu_group_set_name()
306 iommu_group_remove_file(group, &iommu_group_attr_name); in iommu_group_set_name()
307 kfree(group->name); in iommu_group_set_name()
308 group->name = NULL; in iommu_group_set_name()
313 group->name = kstrdup(name, GFP_KERNEL); in iommu_group_set_name()
314 if (!group->name) in iommu_group_set_name()
317 ret = iommu_group_create_file(group, &iommu_group_attr_name); in iommu_group_set_name()
319 kfree(group->name); in iommu_group_set_name()
320 group->name = NULL; in iommu_group_set_name()
328 static int iommu_group_create_direct_mappings(struct iommu_group *group, in iommu_group_create_direct_mappings() argument
331 struct iommu_domain *domain = group->default_domain; in iommu_group_create_direct_mappings()
382 int iommu_group_add_device(struct iommu_group *group, struct device *dev) in iommu_group_add_device() argument
393 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group"); in iommu_group_add_device()
407 ret = sysfs_create_link_nowarn(group->devices_kobj, in iommu_group_add_device()
426 kobject_get(group->devices_kobj); in iommu_group_add_device()
428 dev->iommu_group = group; in iommu_group_add_device()
430 iommu_group_create_direct_mappings(group, dev); in iommu_group_add_device()
432 mutex_lock(&group->mutex); in iommu_group_add_device()
433 list_add_tail(&device->list, &group->devices); in iommu_group_add_device()
434 if (group->domain) in iommu_group_add_device()
435 __iommu_attach_device(group->domain, dev); in iommu_group_add_device()
436 mutex_unlock(&group->mutex); in iommu_group_add_device()
439 blocking_notifier_call_chain(&group->notifier, in iommu_group_add_device()
442 trace_add_device_to_group(group->id, dev); in iommu_group_add_device()
444 pr_info("Adding device %s to group %d\n", dev_name(dev), group->id); in iommu_group_add_device()
459 struct iommu_group *group = dev->iommu_group; in iommu_group_remove_device() local
462 pr_info("Removing device %s from group %d\n", dev_name(dev), group->id); in iommu_group_remove_device()
465 blocking_notifier_call_chain(&group->notifier, in iommu_group_remove_device()
468 mutex_lock(&group->mutex); in iommu_group_remove_device()
469 list_for_each_entry(tmp_device, &group->devices, list) { in iommu_group_remove_device()
476 mutex_unlock(&group->mutex); in iommu_group_remove_device()
481 sysfs_remove_link(group->devices_kobj, device->name); in iommu_group_remove_device()
484 trace_remove_device_from_group(group->id, dev); in iommu_group_remove_device()
489 kobject_put(group->devices_kobj); in iommu_group_remove_device()
493 static int iommu_group_device_count(struct iommu_group *group) in iommu_group_device_count() argument
498 list_for_each_entry(entry, &group->devices, list) in iommu_group_device_count()
515 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, in __iommu_group_for_each_dev() argument
521 list_for_each_entry(device, &group->devices, list) { in __iommu_group_for_each_dev()
530 int iommu_group_for_each_dev(struct iommu_group *group, void *data, in iommu_group_for_each_dev() argument
535 mutex_lock(&group->mutex); in iommu_group_for_each_dev()
536 ret = __iommu_group_for_each_dev(group, data, fn); in iommu_group_for_each_dev()
537 mutex_unlock(&group->mutex); in iommu_group_for_each_dev()
553 struct iommu_group *group = dev->iommu_group; in iommu_group_get() local
555 if (group) in iommu_group_get()
556 kobject_get(group->devices_kobj); in iommu_group_get()
558 return group; in iommu_group_get()
569 void iommu_group_put(struct iommu_group *group) in iommu_group_put() argument
571 if (group) in iommu_group_put()
572 kobject_put(group->devices_kobj); in iommu_group_put()
585 int iommu_group_register_notifier(struct iommu_group *group, in iommu_group_register_notifier() argument
588 return blocking_notifier_chain_register(&group->notifier, nb); in iommu_group_register_notifier()
599 int iommu_group_unregister_notifier(struct iommu_group *group, in iommu_group_unregister_notifier() argument
602 return blocking_notifier_chain_unregister(&group->notifier, nb); in iommu_group_unregister_notifier()
612 int iommu_group_id(struct iommu_group *group) in iommu_group_id() argument
614 return group->id; in iommu_group_id()
641 struct iommu_group *group; in get_pci_function_alias_group() local
652 group = get_pci_alias_group(tmp, devfns); in get_pci_function_alias_group()
653 if (group) { in get_pci_function_alias_group()
655 return group; in get_pci_function_alias_group()
675 struct iommu_group *group; in get_pci_alias_group() local
680 group = iommu_group_get(&pdev->dev); in get_pci_alias_group()
681 if (group) in get_pci_alias_group()
682 return group; in get_pci_alias_group()
694 group = get_pci_alias_group(tmp, devfns); in get_pci_alias_group()
695 if (group) { in get_pci_alias_group()
697 return group; in get_pci_alias_group()
700 group = get_pci_function_alias_group(tmp, devfns); in get_pci_alias_group()
701 if (group) { in get_pci_alias_group()
703 return group; in get_pci_alias_group()
713 struct iommu_group *group; member
725 data->group = iommu_group_get(&pdev->dev); in get_pci_alias_or_group()
727 return data->group != NULL; in get_pci_alias_or_group()
736 struct iommu_group *group; in generic_device_group() local
738 group = iommu_group_alloc(); in generic_device_group()
739 if (IS_ERR(group)) in generic_device_group()
742 return group; in generic_device_group()
754 struct iommu_group *group = NULL; in pci_device_group() local
767 return data.group; in pci_device_group()
786 group = iommu_group_get(&pdev->dev); in pci_device_group()
787 if (group) in pci_device_group()
788 return group; in pci_device_group()
795 group = get_pci_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
796 if (group) in pci_device_group()
797 return group; in pci_device_group()
804 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns); in pci_device_group()
805 if (group) in pci_device_group()
806 return group; in pci_device_group()
809 group = iommu_group_alloc(); in pci_device_group()
810 if (IS_ERR(group)) in pci_device_group()
813 return group; in pci_device_group()
829 struct iommu_group *group; in iommu_group_get_for_dev() local
832 group = iommu_group_get(dev); in iommu_group_get_for_dev()
833 if (group) in iommu_group_get_for_dev()
834 return group; in iommu_group_get_for_dev()
836 group = ERR_PTR(-EINVAL); in iommu_group_get_for_dev()
839 group = ops->device_group(dev); in iommu_group_get_for_dev()
841 if (IS_ERR(group)) in iommu_group_get_for_dev()
842 return group; in iommu_group_get_for_dev()
848 if (!group->default_domain) { in iommu_group_get_for_dev()
849 group->default_domain = __iommu_domain_alloc(dev->bus, in iommu_group_get_for_dev()
851 if (!group->domain) in iommu_group_get_for_dev()
852 group->domain = group->default_domain; in iommu_group_get_for_dev()
855 ret = iommu_group_add_device(group, dev); in iommu_group_get_for_dev()
857 iommu_group_put(group); in iommu_group_get_for_dev()
861 return group; in iommu_group_get_for_dev()
864 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group) in iommu_group_default_domain() argument
866 return group->default_domain; in iommu_group_default_domain()
909 struct iommu_group *group; in iommu_bus_notifier() local
930 group = iommu_group_get(dev); in iommu_bus_notifier()
931 if (!group) in iommu_bus_notifier()
950 blocking_notifier_call_chain(&group->notifier, in iommu_bus_notifier()
953 iommu_group_put(group); in iommu_bus_notifier()
1107 struct iommu_group *group; in iommu_attach_device() local
1110 group = iommu_group_get(dev); in iommu_attach_device()
1112 if (group == NULL) in iommu_attach_device()
1119 mutex_lock(&group->mutex); in iommu_attach_device()
1121 if (iommu_group_device_count(group) != 1) in iommu_attach_device()
1124 ret = __iommu_attach_group(domain, group); in iommu_attach_device()
1127 mutex_unlock(&group->mutex); in iommu_attach_device()
1128 iommu_group_put(group); in iommu_attach_device()
1146 struct iommu_group *group; in iommu_detach_device() local
1148 group = iommu_group_get(dev); in iommu_detach_device()
1150 if (group == NULL) in iommu_detach_device()
1153 mutex_lock(&group->mutex); in iommu_detach_device()
1154 if (iommu_group_device_count(group) != 1) { in iommu_detach_device()
1159 __iommu_detach_group(domain, group); in iommu_detach_device()
1162 mutex_unlock(&group->mutex); in iommu_detach_device()
1163 iommu_group_put(group); in iommu_detach_device()
1170 struct iommu_group *group; in iommu_get_domain_for_dev() local
1172 group = iommu_group_get(dev); in iommu_get_domain_for_dev()
1174 if (group == NULL) in iommu_get_domain_for_dev()
1177 domain = group->domain; in iommu_get_domain_for_dev()
1179 iommu_group_put(group); in iommu_get_domain_for_dev()
1203 struct iommu_group *group) in __iommu_attach_group() argument
1207 if (group->default_domain && group->domain != group->default_domain) in __iommu_attach_group()
1210 ret = __iommu_group_for_each_dev(group, domain, in __iommu_attach_group()
1213 group->domain = domain; in __iommu_attach_group()
1218 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_attach_group() argument
1222 mutex_lock(&group->mutex); in iommu_attach_group()
1223 ret = __iommu_attach_group(domain, group); in iommu_attach_group()
1224 mutex_unlock(&group->mutex); in iommu_attach_group()
1240 struct iommu_group *group) in __iommu_detach_group() argument
1244 if (!group->default_domain) { in __iommu_detach_group()
1245 __iommu_group_for_each_dev(group, domain, in __iommu_detach_group()
1247 group->domain = NULL; in __iommu_detach_group()
1251 if (group->domain == group->default_domain) in __iommu_detach_group()
1255 ret = __iommu_group_for_each_dev(group, group->default_domain, in __iommu_detach_group()
1260 group->domain = group->default_domain; in __iommu_detach_group()
1263 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) in iommu_detach_group() argument
1265 mutex_lock(&group->mutex); in iommu_detach_group()
1266 __iommu_detach_group(domain, group); in iommu_detach_group()
1267 mutex_unlock(&group->mutex); in iommu_detach_group()
1581 struct iommu_group *group; in iommu_request_dm_for_dev() local
1585 group = iommu_group_get_for_dev(dev); in iommu_request_dm_for_dev()
1586 if (IS_ERR(group)) in iommu_request_dm_for_dev()
1587 return PTR_ERR(group); in iommu_request_dm_for_dev()
1589 mutex_lock(&group->mutex); in iommu_request_dm_for_dev()
1593 if (group->default_domain && in iommu_request_dm_for_dev()
1594 group->default_domain->type == IOMMU_DOMAIN_IDENTITY) in iommu_request_dm_for_dev()
1599 if (iommu_group_device_count(group) != 1) in iommu_request_dm_for_dev()
1609 ret = __iommu_attach_group(dm_domain, group); in iommu_request_dm_for_dev()
1616 if (group->default_domain) in iommu_request_dm_for_dev()
1617 iommu_domain_free(group->default_domain); in iommu_request_dm_for_dev()
1618 group->default_domain = dm_domain; in iommu_request_dm_for_dev()
1624 mutex_unlock(&group->mutex); in iommu_request_dm_for_dev()
1625 iommu_group_put(group); in iommu_request_dm_for_dev()