This source file includes following definitions.
- iommu_set_cmd_line_dma_api
- iommu_cmd_line_dma_api
- iommu_domain_type_str
- iommu_subsys_init
- iommu_device_register
- iommu_device_unregister
- iommu_get_dev_param
- iommu_free_dev_param
- iommu_probe_device
- iommu_release_device
- iommu_set_def_domain_type
- iommu_dma_setup
- iommu_group_attr_show
- iommu_group_attr_store
- iommu_group_create_file
- iommu_group_remove_file
- iommu_group_show_name
- iommu_insert_resv_region
- iommu_insert_device_resv_regions
- iommu_get_group_resv_regions
- iommu_group_show_resv_regions
- iommu_group_show_type
- iommu_group_release
- iommu_group_alloc
- iommu_group_get_by_id
- iommu_group_get_iommudata
- iommu_group_set_iommudata
- iommu_group_set_name
- iommu_group_create_direct_mappings
- iommu_group_add_device
- iommu_group_remove_device
- iommu_group_device_count
- __iommu_group_for_each_dev
- iommu_group_for_each_dev
- iommu_group_get
- iommu_group_ref_get
- iommu_group_put
- iommu_group_register_notifier
- iommu_group_unregister_notifier
- iommu_register_device_fault_handler
- iommu_unregister_device_fault_handler
- iommu_report_device_fault
- iommu_page_response
- iommu_group_id
- get_pci_function_alias_group
- get_pci_alias_group
- get_pci_alias_or_group
- generic_device_group
- pci_device_group
- fsl_mc_device_group
- iommu_group_get_for_dev
- iommu_group_default_domain
- add_iommu_group
- remove_iommu_group
- iommu_bus_notifier
- iommu_bus_init
- bus_set_iommu
- iommu_present
- iommu_capable
- iommu_set_fault_handler
- __iommu_domain_alloc
- iommu_domain_alloc
- iommu_domain_free
- __iommu_attach_device
- iommu_attach_device
- __iommu_detach_device
- iommu_detach_device
- iommu_get_domain_for_dev
- iommu_get_dma_domain
- iommu_group_do_attach_device
- __iommu_attach_group
- iommu_attach_group
- iommu_group_do_detach_device
- __iommu_detach_group
- iommu_detach_group
- iommu_iova_to_phys
- iommu_pgsize
- iommu_map
- __iommu_unmap
- iommu_unmap
- iommu_unmap_fast
- iommu_map_sg
- iommu_domain_window_enable
- iommu_domain_window_disable
- report_iommu_fault
- iommu_init
- iommu_domain_get_attr
- iommu_domain_set_attr
- iommu_get_resv_regions
- iommu_put_resv_regions
- iommu_alloc_resv_region
- request_default_domain_for_dev
- iommu_request_dm_for_dev
- iommu_request_dma_domain_for_dev
- iommu_set_default_passthrough
- iommu_set_default_translated
- iommu_default_passthrough
- iommu_ops_from_fwnode
- iommu_fwspec_init
- iommu_fwspec_free
- iommu_fwspec_add_ids
- iommu_dev_has_feature
- iommu_dev_enable_feature
- iommu_dev_disable_feature
- iommu_dev_feature_enabled
- iommu_aux_attach_device
- iommu_aux_detach_device
- iommu_aux_get_pasid
- iommu_sva_bind_device
- iommu_sva_unbind_device
- iommu_sva_set_ops
- iommu_sva_get_pasid
1
2
3
4
5
6
7 #define pr_fmt(fmt) "iommu: " fmt
8
9 #include <linux/device.h>
10 #include <linux/kernel.h>
11 #include <linux/bug.h>
12 #include <linux/types.h>
13 #include <linux/init.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/errno.h>
17 #include <linux/iommu.h>
18 #include <linux/idr.h>
19 #include <linux/notifier.h>
20 #include <linux/err.h>
21 #include <linux/pci.h>
22 #include <linux/bitops.h>
23 #include <linux/property.h>
24 #include <linux/fsl/mc.h>
25 #include <trace/events/iommu.h>
26
27 static struct kset *iommu_group_kset;
28 static DEFINE_IDA(iommu_group_ida);
29
30 static unsigned int iommu_def_domain_type __read_mostly;
31 static bool iommu_dma_strict __read_mostly = true;
32 static u32 iommu_cmd_line __read_mostly;
33
34 struct iommu_group {
35 struct kobject kobj;
36 struct kobject *devices_kobj;
37 struct list_head devices;
38 struct mutex mutex;
39 struct blocking_notifier_head notifier;
40 void *iommu_data;
41 void (*iommu_data_release)(void *iommu_data);
42 char *name;
43 int id;
44 struct iommu_domain *default_domain;
45 struct iommu_domain *domain;
46 };
47
48 struct group_device {
49 struct list_head list;
50 struct device *dev;
51 char *name;
52 };
53
54 struct iommu_group_attribute {
55 struct attribute attr;
56 ssize_t (*show)(struct iommu_group *group, char *buf);
57 ssize_t (*store)(struct iommu_group *group,
58 const char *buf, size_t count);
59 };
60
61 static const char * const iommu_group_resv_type_string[] = {
62 [IOMMU_RESV_DIRECT] = "direct",
63 [IOMMU_RESV_DIRECT_RELAXABLE] = "direct-relaxable",
64 [IOMMU_RESV_RESERVED] = "reserved",
65 [IOMMU_RESV_MSI] = "msi",
66 [IOMMU_RESV_SW_MSI] = "msi",
67 };
68
69 #define IOMMU_CMD_LINE_DMA_API BIT(0)
70
71 static void iommu_set_cmd_line_dma_api(void)
72 {
73 iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
74 }
75
76 static bool iommu_cmd_line_dma_api(void)
77 {
78 return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
79 }
80
81 #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
82 struct iommu_group_attribute iommu_group_attr_##_name = \
83 __ATTR(_name, _mode, _show, _store)
84
85 #define to_iommu_group_attr(_attr) \
86 container_of(_attr, struct iommu_group_attribute, attr)
87 #define to_iommu_group(_kobj) \
88 container_of(_kobj, struct iommu_group, kobj)
89
90 static LIST_HEAD(iommu_device_list);
91 static DEFINE_SPINLOCK(iommu_device_lock);
92
93
94
95
96
97 static const char *iommu_domain_type_str(unsigned int t)
98 {
99 switch (t) {
100 case IOMMU_DOMAIN_BLOCKED:
101 return "Blocked";
102 case IOMMU_DOMAIN_IDENTITY:
103 return "Passthrough";
104 case IOMMU_DOMAIN_UNMANAGED:
105 return "Unmanaged";
106 case IOMMU_DOMAIN_DMA:
107 return "Translated";
108 default:
109 return "Unknown";
110 }
111 }
112
113 static int __init iommu_subsys_init(void)
114 {
115 bool cmd_line = iommu_cmd_line_dma_api();
116
117 if (!cmd_line) {
118 if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
119 iommu_set_default_passthrough(false);
120 else
121 iommu_set_default_translated(false);
122
123 if (iommu_default_passthrough() && mem_encrypt_active()) {
124 pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
125 iommu_set_default_translated(false);
126 }
127 }
128
129 pr_info("Default domain type: %s %s\n",
130 iommu_domain_type_str(iommu_def_domain_type),
131 cmd_line ? "(set via kernel command line)" : "");
132
133 return 0;
134 }
135 subsys_initcall(iommu_subsys_init);
136
137 int iommu_device_register(struct iommu_device *iommu)
138 {
139 spin_lock(&iommu_device_lock);
140 list_add_tail(&iommu->list, &iommu_device_list);
141 spin_unlock(&iommu_device_lock);
142 return 0;
143 }
144
145 void iommu_device_unregister(struct iommu_device *iommu)
146 {
147 spin_lock(&iommu_device_lock);
148 list_del(&iommu->list);
149 spin_unlock(&iommu_device_lock);
150 }
151
152 static struct iommu_param *iommu_get_dev_param(struct device *dev)
153 {
154 struct iommu_param *param = dev->iommu_param;
155
156 if (param)
157 return param;
158
159 param = kzalloc(sizeof(*param), GFP_KERNEL);
160 if (!param)
161 return NULL;
162
163 mutex_init(¶m->lock);
164 dev->iommu_param = param;
165 return param;
166 }
167
168 static void iommu_free_dev_param(struct device *dev)
169 {
170 kfree(dev->iommu_param);
171 dev->iommu_param = NULL;
172 }
173
174 int iommu_probe_device(struct device *dev)
175 {
176 const struct iommu_ops *ops = dev->bus->iommu_ops;
177 int ret;
178
179 WARN_ON(dev->iommu_group);
180 if (!ops)
181 return -EINVAL;
182
183 if (!iommu_get_dev_param(dev))
184 return -ENOMEM;
185
186 ret = ops->add_device(dev);
187 if (ret)
188 iommu_free_dev_param(dev);
189
190 return ret;
191 }
192
193 void iommu_release_device(struct device *dev)
194 {
195 const struct iommu_ops *ops = dev->bus->iommu_ops;
196
197 if (dev->iommu_group)
198 ops->remove_device(dev);
199
200 iommu_free_dev_param(dev);
201 }
202
203 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
204 unsigned type);
205 static int __iommu_attach_device(struct iommu_domain *domain,
206 struct device *dev);
207 static int __iommu_attach_group(struct iommu_domain *domain,
208 struct iommu_group *group);
209 static void __iommu_detach_group(struct iommu_domain *domain,
210 struct iommu_group *group);
211
212 static int __init iommu_set_def_domain_type(char *str)
213 {
214 bool pt;
215 int ret;
216
217 ret = kstrtobool(str, &pt);
218 if (ret)
219 return ret;
220
221 if (pt)
222 iommu_set_default_passthrough(true);
223 else
224 iommu_set_default_translated(true);
225
226 return 0;
227 }
228 early_param("iommu.passthrough", iommu_set_def_domain_type);
229
230 static int __init iommu_dma_setup(char *str)
231 {
232 return kstrtobool(str, &iommu_dma_strict);
233 }
234 early_param("iommu.strict", iommu_dma_setup);
235
236 static ssize_t iommu_group_attr_show(struct kobject *kobj,
237 struct attribute *__attr, char *buf)
238 {
239 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
240 struct iommu_group *group = to_iommu_group(kobj);
241 ssize_t ret = -EIO;
242
243 if (attr->show)
244 ret = attr->show(group, buf);
245 return ret;
246 }
247
248 static ssize_t iommu_group_attr_store(struct kobject *kobj,
249 struct attribute *__attr,
250 const char *buf, size_t count)
251 {
252 struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
253 struct iommu_group *group = to_iommu_group(kobj);
254 ssize_t ret = -EIO;
255
256 if (attr->store)
257 ret = attr->store(group, buf, count);
258 return ret;
259 }
260
261 static const struct sysfs_ops iommu_group_sysfs_ops = {
262 .show = iommu_group_attr_show,
263 .store = iommu_group_attr_store,
264 };
265
266 static int iommu_group_create_file(struct iommu_group *group,
267 struct iommu_group_attribute *attr)
268 {
269 return sysfs_create_file(&group->kobj, &attr->attr);
270 }
271
272 static void iommu_group_remove_file(struct iommu_group *group,
273 struct iommu_group_attribute *attr)
274 {
275 sysfs_remove_file(&group->kobj, &attr->attr);
276 }
277
278 static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
279 {
280 return sprintf(buf, "%s\n", group->name);
281 }
282
283
284
285
286
287
288
289
290
291
292 int iommu_insert_resv_region(struct iommu_resv_region *new,
293 struct list_head *regions)
294 {
295 struct iommu_resv_region *iter, *tmp, *nr, *top;
296 LIST_HEAD(stack);
297
298 nr = iommu_alloc_resv_region(new->start, new->length,
299 new->prot, new->type);
300 if (!nr)
301 return -ENOMEM;
302
303
304 list_for_each_entry(iter, regions, list) {
305 if (nr->start < iter->start ||
306 (nr->start == iter->start && nr->type <= iter->type))
307 break;
308 }
309 list_add_tail(&nr->list, &iter->list);
310
311
312 list_for_each_entry_safe(iter, tmp, regions, list) {
313 phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
314
315
316 if (iter->type != new->type) {
317 list_move_tail(&iter->list, &stack);
318 continue;
319 }
320
321
322 list_for_each_entry_reverse(top, &stack, list)
323 if (top->type == iter->type)
324 goto check_overlap;
325
326 list_move_tail(&iter->list, &stack);
327 continue;
328
329 check_overlap:
330 top_end = top->start + top->length - 1;
331
332 if (iter->start > top_end + 1) {
333 list_move_tail(&iter->list, &stack);
334 } else {
335 top->length = max(top_end, iter_end) - top->start + 1;
336 list_del(&iter->list);
337 kfree(iter);
338 }
339 }
340 list_splice(&stack, regions);
341 return 0;
342 }
343
344 static int
345 iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
346 struct list_head *group_resv_regions)
347 {
348 struct iommu_resv_region *entry;
349 int ret = 0;
350
351 list_for_each_entry(entry, dev_resv_regions, list) {
352 ret = iommu_insert_resv_region(entry, group_resv_regions);
353 if (ret)
354 break;
355 }
356 return ret;
357 }
358
359 int iommu_get_group_resv_regions(struct iommu_group *group,
360 struct list_head *head)
361 {
362 struct group_device *device;
363 int ret = 0;
364
365 mutex_lock(&group->mutex);
366 list_for_each_entry(device, &group->devices, list) {
367 struct list_head dev_resv_regions;
368
369 INIT_LIST_HEAD(&dev_resv_regions);
370 iommu_get_resv_regions(device->dev, &dev_resv_regions);
371 ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
372 iommu_put_resv_regions(device->dev, &dev_resv_regions);
373 if (ret)
374 break;
375 }
376 mutex_unlock(&group->mutex);
377 return ret;
378 }
379 EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
380
381 static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
382 char *buf)
383 {
384 struct iommu_resv_region *region, *next;
385 struct list_head group_resv_regions;
386 char *str = buf;
387
388 INIT_LIST_HEAD(&group_resv_regions);
389 iommu_get_group_resv_regions(group, &group_resv_regions);
390
391 list_for_each_entry_safe(region, next, &group_resv_regions, list) {
392 str += sprintf(str, "0x%016llx 0x%016llx %s\n",
393 (long long int)region->start,
394 (long long int)(region->start +
395 region->length - 1),
396 iommu_group_resv_type_string[region->type]);
397 kfree(region);
398 }
399
400 return (str - buf);
401 }
402
403 static ssize_t iommu_group_show_type(struct iommu_group *group,
404 char *buf)
405 {
406 char *type = "unknown\n";
407
408 if (group->default_domain) {
409 switch (group->default_domain->type) {
410 case IOMMU_DOMAIN_BLOCKED:
411 type = "blocked\n";
412 break;
413 case IOMMU_DOMAIN_IDENTITY:
414 type = "identity\n";
415 break;
416 case IOMMU_DOMAIN_UNMANAGED:
417 type = "unmanaged\n";
418 break;
419 case IOMMU_DOMAIN_DMA:
420 type = "DMA\n";
421 break;
422 }
423 }
424 strcpy(buf, type);
425
426 return strlen(type);
427 }
428
429 static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
430
431 static IOMMU_GROUP_ATTR(reserved_regions, 0444,
432 iommu_group_show_resv_regions, NULL);
433
434 static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
435
436 static void iommu_group_release(struct kobject *kobj)
437 {
438 struct iommu_group *group = to_iommu_group(kobj);
439
440 pr_debug("Releasing group %d\n", group->id);
441
442 if (group->iommu_data_release)
443 group->iommu_data_release(group->iommu_data);
444
445 ida_simple_remove(&iommu_group_ida, group->id);
446
447 if (group->default_domain)
448 iommu_domain_free(group->default_domain);
449
450 kfree(group->name);
451 kfree(group);
452 }
453
454 static struct kobj_type iommu_group_ktype = {
455 .sysfs_ops = &iommu_group_sysfs_ops,
456 .release = iommu_group_release,
457 };
458
459
460
461
462
463
464
465
466
467
468
469
470 struct iommu_group *iommu_group_alloc(void)
471 {
472 struct iommu_group *group;
473 int ret;
474
475 group = kzalloc(sizeof(*group), GFP_KERNEL);
476 if (!group)
477 return ERR_PTR(-ENOMEM);
478
479 group->kobj.kset = iommu_group_kset;
480 mutex_init(&group->mutex);
481 INIT_LIST_HEAD(&group->devices);
482 BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
483
484 ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
485 if (ret < 0) {
486 kfree(group);
487 return ERR_PTR(ret);
488 }
489 group->id = ret;
490
491 ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
492 NULL, "%d", group->id);
493 if (ret) {
494 ida_simple_remove(&iommu_group_ida, group->id);
495 kobject_put(&group->kobj);
496 return ERR_PTR(ret);
497 }
498
499 group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
500 if (!group->devices_kobj) {
501 kobject_put(&group->kobj);
502 return ERR_PTR(-ENOMEM);
503 }
504
505
506
507
508
509
510 kobject_put(&group->kobj);
511
512 ret = iommu_group_create_file(group,
513 &iommu_group_attr_reserved_regions);
514 if (ret)
515 return ERR_PTR(ret);
516
517 ret = iommu_group_create_file(group, &iommu_group_attr_type);
518 if (ret)
519 return ERR_PTR(ret);
520
521 pr_debug("Allocated group %d\n", group->id);
522
523 return group;
524 }
525 EXPORT_SYMBOL_GPL(iommu_group_alloc);
526
527 struct iommu_group *iommu_group_get_by_id(int id)
528 {
529 struct kobject *group_kobj;
530 struct iommu_group *group;
531 const char *name;
532
533 if (!iommu_group_kset)
534 return NULL;
535
536 name = kasprintf(GFP_KERNEL, "%d", id);
537 if (!name)
538 return NULL;
539
540 group_kobj = kset_find_obj(iommu_group_kset, name);
541 kfree(name);
542
543 if (!group_kobj)
544 return NULL;
545
546 group = container_of(group_kobj, struct iommu_group, kobj);
547 BUG_ON(group->id != id);
548
549 kobject_get(group->devices_kobj);
550 kobject_put(&group->kobj);
551
552 return group;
553 }
554 EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
555
556
557
558
559
560
561
562
563
564 void *iommu_group_get_iommudata(struct iommu_group *group)
565 {
566 return group->iommu_data;
567 }
568 EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
569
570
571
572
573
574
575
576
577
578
579
580 void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
581 void (*release)(void *iommu_data))
582 {
583 group->iommu_data = iommu_data;
584 group->iommu_data_release = release;
585 }
586 EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
587
588
589
590
591
592
593
594
595
596 int iommu_group_set_name(struct iommu_group *group, const char *name)
597 {
598 int ret;
599
600 if (group->name) {
601 iommu_group_remove_file(group, &iommu_group_attr_name);
602 kfree(group->name);
603 group->name = NULL;
604 if (!name)
605 return 0;
606 }
607
608 group->name = kstrdup(name, GFP_KERNEL);
609 if (!group->name)
610 return -ENOMEM;
611
612 ret = iommu_group_create_file(group, &iommu_group_attr_name);
613 if (ret) {
614 kfree(group->name);
615 group->name = NULL;
616 return ret;
617 }
618
619 return 0;
620 }
621 EXPORT_SYMBOL_GPL(iommu_group_set_name);
622
623 static int iommu_group_create_direct_mappings(struct iommu_group *group,
624 struct device *dev)
625 {
626 struct iommu_domain *domain = group->default_domain;
627 struct iommu_resv_region *entry;
628 struct list_head mappings;
629 unsigned long pg_size;
630 int ret = 0;
631
632 if (!domain || domain->type != IOMMU_DOMAIN_DMA)
633 return 0;
634
635 BUG_ON(!domain->pgsize_bitmap);
636
637 pg_size = 1UL << __ffs(domain->pgsize_bitmap);
638 INIT_LIST_HEAD(&mappings);
639
640 iommu_get_resv_regions(dev, &mappings);
641
642
643 list_for_each_entry(entry, &mappings, list) {
644 dma_addr_t start, end, addr;
645
646 if (domain->ops->apply_resv_region)
647 domain->ops->apply_resv_region(dev, domain, entry);
648
649 start = ALIGN(entry->start, pg_size);
650 end = ALIGN(entry->start + entry->length, pg_size);
651
652 if (entry->type != IOMMU_RESV_DIRECT &&
653 entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
654 continue;
655
656 for (addr = start; addr < end; addr += pg_size) {
657 phys_addr_t phys_addr;
658
659 phys_addr = iommu_iova_to_phys(domain, addr);
660 if (phys_addr)
661 continue;
662
663 ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
664 if (ret)
665 goto out;
666 }
667
668 }
669
670 iommu_flush_tlb_all(domain);
671
672 out:
673 iommu_put_resv_regions(dev, &mappings);
674
675 return ret;
676 }
677
678
679
680
681
682
683
684
685
686 int iommu_group_add_device(struct iommu_group *group, struct device *dev)
687 {
688 int ret, i = 0;
689 struct group_device *device;
690
691 device = kzalloc(sizeof(*device), GFP_KERNEL);
692 if (!device)
693 return -ENOMEM;
694
695 device->dev = dev;
696
697 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
698 if (ret)
699 goto err_free_device;
700
701 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
702 rename:
703 if (!device->name) {
704 ret = -ENOMEM;
705 goto err_remove_link;
706 }
707
708 ret = sysfs_create_link_nowarn(group->devices_kobj,
709 &dev->kobj, device->name);
710 if (ret) {
711 if (ret == -EEXIST && i >= 0) {
712
713
714
715
716 kfree(device->name);
717 device->name = kasprintf(GFP_KERNEL, "%s.%d",
718 kobject_name(&dev->kobj), i++);
719 goto rename;
720 }
721 goto err_free_name;
722 }
723
724 kobject_get(group->devices_kobj);
725
726 dev->iommu_group = group;
727
728 iommu_group_create_direct_mappings(group, dev);
729
730 mutex_lock(&group->mutex);
731 list_add_tail(&device->list, &group->devices);
732 if (group->domain)
733 ret = __iommu_attach_device(group->domain, dev);
734 mutex_unlock(&group->mutex);
735 if (ret)
736 goto err_put_group;
737
738
739 blocking_notifier_call_chain(&group->notifier,
740 IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
741
742 trace_add_device_to_group(group->id, dev);
743
744 dev_info(dev, "Adding to iommu group %d\n", group->id);
745
746 return 0;
747
748 err_put_group:
749 mutex_lock(&group->mutex);
750 list_del(&device->list);
751 mutex_unlock(&group->mutex);
752 dev->iommu_group = NULL;
753 kobject_put(group->devices_kobj);
754 sysfs_remove_link(group->devices_kobj, device->name);
755 err_free_name:
756 kfree(device->name);
757 err_remove_link:
758 sysfs_remove_link(&dev->kobj, "iommu_group");
759 err_free_device:
760 kfree(device);
761 dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
762 return ret;
763 }
764 EXPORT_SYMBOL_GPL(iommu_group_add_device);
765
766
767
768
769
770
771
772
773 void iommu_group_remove_device(struct device *dev)
774 {
775 struct iommu_group *group = dev->iommu_group;
776 struct group_device *tmp_device, *device = NULL;
777
778 dev_info(dev, "Removing from iommu group %d\n", group->id);
779
780
781 blocking_notifier_call_chain(&group->notifier,
782 IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
783
784 mutex_lock(&group->mutex);
785 list_for_each_entry(tmp_device, &group->devices, list) {
786 if (tmp_device->dev == dev) {
787 device = tmp_device;
788 list_del(&device->list);
789 break;
790 }
791 }
792 mutex_unlock(&group->mutex);
793
794 if (!device)
795 return;
796
797 sysfs_remove_link(group->devices_kobj, device->name);
798 sysfs_remove_link(&dev->kobj, "iommu_group");
799
800 trace_remove_device_from_group(group->id, dev);
801
802 kfree(device->name);
803 kfree(device);
804 dev->iommu_group = NULL;
805 kobject_put(group->devices_kobj);
806 }
807 EXPORT_SYMBOL_GPL(iommu_group_remove_device);
808
809 static int iommu_group_device_count(struct iommu_group *group)
810 {
811 struct group_device *entry;
812 int ret = 0;
813
814 list_for_each_entry(entry, &group->devices, list)
815 ret++;
816
817 return ret;
818 }
819
820
821
822
823
824
825
826
827
828
829
830
831 static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
832 int (*fn)(struct device *, void *))
833 {
834 struct group_device *device;
835 int ret = 0;
836
837 list_for_each_entry(device, &group->devices, list) {
838 ret = fn(device->dev, data);
839 if (ret)
840 break;
841 }
842 return ret;
843 }
844
845
846 int iommu_group_for_each_dev(struct iommu_group *group, void *data,
847 int (*fn)(struct device *, void *))
848 {
849 int ret;
850
851 mutex_lock(&group->mutex);
852 ret = __iommu_group_for_each_dev(group, data, fn);
853 mutex_unlock(&group->mutex);
854
855 return ret;
856 }
857 EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
858
859
860
861
862
863
864
865
866
867 struct iommu_group *iommu_group_get(struct device *dev)
868 {
869 struct iommu_group *group = dev->iommu_group;
870
871 if (group)
872 kobject_get(group->devices_kobj);
873
874 return group;
875 }
876 EXPORT_SYMBOL_GPL(iommu_group_get);
877
878
879
880
881
882
883
884
885 struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
886 {
887 kobject_get(group->devices_kobj);
888 return group;
889 }
890
891
892
893
894
895
896
897
898 void iommu_group_put(struct iommu_group *group)
899 {
900 if (group)
901 kobject_put(group->devices_kobj);
902 }
903 EXPORT_SYMBOL_GPL(iommu_group_put);
904
905
906
907
908
909
910
911
912
913
914 int iommu_group_register_notifier(struct iommu_group *group,
915 struct notifier_block *nb)
916 {
917 return blocking_notifier_chain_register(&group->notifier, nb);
918 }
919 EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
920
921
922
923
924
925
926
927
928 int iommu_group_unregister_notifier(struct iommu_group *group,
929 struct notifier_block *nb)
930 {
931 return blocking_notifier_chain_unregister(&group->notifier, nb);
932 }
933 EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953 int iommu_register_device_fault_handler(struct device *dev,
954 iommu_dev_fault_handler_t handler,
955 void *data)
956 {
957 struct iommu_param *param = dev->iommu_param;
958 int ret = 0;
959
960 if (!param)
961 return -EINVAL;
962
963 mutex_lock(¶m->lock);
964
965 if (param->fault_param) {
966 ret = -EBUSY;
967 goto done_unlock;
968 }
969
970 get_device(dev);
971 param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
972 if (!param->fault_param) {
973 put_device(dev);
974 ret = -ENOMEM;
975 goto done_unlock;
976 }
977 param->fault_param->handler = handler;
978 param->fault_param->data = data;
979 mutex_init(¶m->fault_param->lock);
980 INIT_LIST_HEAD(¶m->fault_param->faults);
981
982 done_unlock:
983 mutex_unlock(¶m->lock);
984
985 return ret;
986 }
987 EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
988
989
990
991
992
993
994
995
996
997
998 int iommu_unregister_device_fault_handler(struct device *dev)
999 {
1000 struct iommu_param *param = dev->iommu_param;
1001 int ret = 0;
1002
1003 if (!param)
1004 return -EINVAL;
1005
1006 mutex_lock(¶m->lock);
1007
1008 if (!param->fault_param)
1009 goto unlock;
1010
1011
1012 if (!list_empty(¶m->fault_param->faults)) {
1013 ret = -EBUSY;
1014 goto unlock;
1015 }
1016
1017 kfree(param->fault_param);
1018 param->fault_param = NULL;
1019 put_device(dev);
1020 unlock:
1021 mutex_unlock(¶m->lock);
1022
1023 return ret;
1024 }
1025 EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038 int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1039 {
1040 struct iommu_param *param = dev->iommu_param;
1041 struct iommu_fault_event *evt_pending = NULL;
1042 struct iommu_fault_param *fparam;
1043 int ret = 0;
1044
1045 if (!param || !evt)
1046 return -EINVAL;
1047
1048
1049 mutex_lock(¶m->lock);
1050 fparam = param->fault_param;
1051 if (!fparam || !fparam->handler) {
1052 ret = -EINVAL;
1053 goto done_unlock;
1054 }
1055
1056 if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1057 (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1058 evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1059 GFP_KERNEL);
1060 if (!evt_pending) {
1061 ret = -ENOMEM;
1062 goto done_unlock;
1063 }
1064 mutex_lock(&fparam->lock);
1065 list_add_tail(&evt_pending->list, &fparam->faults);
1066 mutex_unlock(&fparam->lock);
1067 }
1068
1069 ret = fparam->handler(&evt->fault, fparam->data);
1070 if (ret && evt_pending) {
1071 mutex_lock(&fparam->lock);
1072 list_del(&evt_pending->list);
1073 mutex_unlock(&fparam->lock);
1074 kfree(evt_pending);
1075 }
1076 done_unlock:
1077 mutex_unlock(¶m->lock);
1078 return ret;
1079 }
1080 EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1081
1082 int iommu_page_response(struct device *dev,
1083 struct iommu_page_response *msg)
1084 {
1085 bool pasid_valid;
1086 int ret = -EINVAL;
1087 struct iommu_fault_event *evt;
1088 struct iommu_fault_page_request *prm;
1089 struct iommu_param *param = dev->iommu_param;
1090 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1091
1092 if (!domain || !domain->ops->page_response)
1093 return -ENODEV;
1094
1095 if (!param || !param->fault_param)
1096 return -EINVAL;
1097
1098 if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1099 msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1100 return -EINVAL;
1101
1102
1103 mutex_lock(¶m->fault_param->lock);
1104 if (list_empty(¶m->fault_param->faults)) {
1105 dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1106 goto done_unlock;
1107 }
1108
1109
1110
1111
1112 list_for_each_entry(evt, ¶m->fault_param->faults, list) {
1113 prm = &evt->fault.prm;
1114 pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1115
1116 if ((pasid_valid && prm->pasid != msg->pasid) ||
1117 prm->grpid != msg->grpid)
1118 continue;
1119
1120
1121 msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1122
1123 ret = domain->ops->page_response(dev, evt, msg);
1124 list_del(&evt->list);
1125 kfree(evt);
1126 break;
1127 }
1128
1129 done_unlock:
1130 mutex_unlock(¶m->fault_param->lock);
1131 return ret;
1132 }
1133 EXPORT_SYMBOL_GPL(iommu_page_response);
1134
1135
1136
1137
1138
1139
1140
1141 int iommu_group_id(struct iommu_group *group)
1142 {
1143 return group->id;
1144 }
1145 EXPORT_SYMBOL_GPL(iommu_group_id);
1146
1147 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1148 unsigned long *devfns);
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158 #define REQ_ACS_FLAGS (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1159
1160
1161
1162
1163
1164
1165
1166 static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1167 unsigned long *devfns)
1168 {
1169 struct pci_dev *tmp = NULL;
1170 struct iommu_group *group;
1171
1172 if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1173 return NULL;
1174
1175 for_each_pci_dev(tmp) {
1176 if (tmp == pdev || tmp->bus != pdev->bus ||
1177 PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1178 pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1179 continue;
1180
1181 group = get_pci_alias_group(tmp, devfns);
1182 if (group) {
1183 pci_dev_put(tmp);
1184 return group;
1185 }
1186 }
1187
1188 return NULL;
1189 }
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200 static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1201 unsigned long *devfns)
1202 {
1203 struct pci_dev *tmp = NULL;
1204 struct iommu_group *group;
1205
1206 if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1207 return NULL;
1208
1209 group = iommu_group_get(&pdev->dev);
1210 if (group)
1211 return group;
1212
1213 for_each_pci_dev(tmp) {
1214 if (tmp == pdev || tmp->bus != pdev->bus)
1215 continue;
1216
1217
1218 if (pci_devs_are_dma_aliases(pdev, tmp)) {
1219 group = get_pci_alias_group(tmp, devfns);
1220 if (group) {
1221 pci_dev_put(tmp);
1222 return group;
1223 }
1224
1225 group = get_pci_function_alias_group(tmp, devfns);
1226 if (group) {
1227 pci_dev_put(tmp);
1228 return group;
1229 }
1230 }
1231 }
1232
1233 return NULL;
1234 }
1235
1236 struct group_for_pci_data {
1237 struct pci_dev *pdev;
1238 struct iommu_group *group;
1239 };
1240
1241
1242
1243
1244
1245 static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1246 {
1247 struct group_for_pci_data *data = opaque;
1248
1249 data->pdev = pdev;
1250 data->group = iommu_group_get(&pdev->dev);
1251
1252 return data->group != NULL;
1253 }
1254
1255
1256
1257
1258
1259 struct iommu_group *generic_device_group(struct device *dev)
1260 {
1261 return iommu_group_alloc();
1262 }
1263
1264
1265
1266
1267
1268 struct iommu_group *pci_device_group(struct device *dev)
1269 {
1270 struct pci_dev *pdev = to_pci_dev(dev);
1271 struct group_for_pci_data data;
1272 struct pci_bus *bus;
1273 struct iommu_group *group = NULL;
1274 u64 devfns[4] = { 0 };
1275
1276 if (WARN_ON(!dev_is_pci(dev)))
1277 return ERR_PTR(-EINVAL);
1278
1279
1280
1281
1282
1283
1284
1285 if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1286 return data.group;
1287
1288 pdev = data.pdev;
1289
1290
1291
1292
1293
1294
1295
1296 for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1297 if (!bus->self)
1298 continue;
1299
1300 if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1301 break;
1302
1303 pdev = bus->self;
1304
1305 group = iommu_group_get(&pdev->dev);
1306 if (group)
1307 return group;
1308 }
1309
1310
1311
1312
1313
1314 group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1315 if (group)
1316 return group;
1317
1318
1319
1320
1321
1322
1323 group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1324 if (group)
1325 return group;
1326
1327
1328 return iommu_group_alloc();
1329 }
1330
1331
1332 struct iommu_group *fsl_mc_device_group(struct device *dev)
1333 {
1334 struct device *cont_dev = fsl_mc_cont_dev(dev);
1335 struct iommu_group *group;
1336
1337 group = iommu_group_get(cont_dev);
1338 if (!group)
1339 group = iommu_group_alloc();
1340 return group;
1341 }
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353 struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1354 {
1355 const struct iommu_ops *ops = dev->bus->iommu_ops;
1356 struct iommu_group *group;
1357 int ret;
1358
1359 group = iommu_group_get(dev);
1360 if (group)
1361 return group;
1362
1363 if (!ops)
1364 return ERR_PTR(-EINVAL);
1365
1366 group = ops->device_group(dev);
1367 if (WARN_ON_ONCE(group == NULL))
1368 return ERR_PTR(-EINVAL);
1369
1370 if (IS_ERR(group))
1371 return group;
1372
1373
1374
1375
1376
1377 if (!group->default_domain) {
1378 struct iommu_domain *dom;
1379
1380 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1381 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1382 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1383 if (dom) {
1384 dev_warn(dev,
1385 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1386 iommu_def_domain_type);
1387 }
1388 }
1389
1390 group->default_domain = dom;
1391 if (!group->domain)
1392 group->domain = dom;
1393
1394 if (dom && !iommu_dma_strict) {
1395 int attr = 1;
1396 iommu_domain_set_attr(dom,
1397 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1398 &attr);
1399 }
1400 }
1401
1402 ret = iommu_group_add_device(group, dev);
1403 if (ret) {
1404 iommu_group_put(group);
1405 return ERR_PTR(ret);
1406 }
1407
1408 return group;
1409 }
1410
1411 struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1412 {
1413 return group->default_domain;
1414 }
1415
1416 static int add_iommu_group(struct device *dev, void *data)
1417 {
1418 int ret = iommu_probe_device(dev);
1419
1420
1421
1422
1423
1424
1425 if (ret == -ENODEV)
1426 ret = 0;
1427
1428 return ret;
1429 }
1430
1431 static int remove_iommu_group(struct device *dev, void *data)
1432 {
1433 iommu_release_device(dev);
1434
1435 return 0;
1436 }
1437
1438 static int iommu_bus_notifier(struct notifier_block *nb,
1439 unsigned long action, void *data)
1440 {
1441 unsigned long group_action = 0;
1442 struct device *dev = data;
1443 struct iommu_group *group;
1444
1445
1446
1447
1448
1449 if (action == BUS_NOTIFY_ADD_DEVICE) {
1450 int ret;
1451
1452 ret = iommu_probe_device(dev);
1453 return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1454 } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1455 iommu_release_device(dev);
1456 return NOTIFY_OK;
1457 }
1458
1459
1460
1461
1462
1463 group = iommu_group_get(dev);
1464 if (!group)
1465 return 0;
1466
1467 switch (action) {
1468 case BUS_NOTIFY_BIND_DRIVER:
1469 group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1470 break;
1471 case BUS_NOTIFY_BOUND_DRIVER:
1472 group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1473 break;
1474 case BUS_NOTIFY_UNBIND_DRIVER:
1475 group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1476 break;
1477 case BUS_NOTIFY_UNBOUND_DRIVER:
1478 group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1479 break;
1480 }
1481
1482 if (group_action)
1483 blocking_notifier_call_chain(&group->notifier,
1484 group_action, dev);
1485
1486 iommu_group_put(group);
1487 return 0;
1488 }
1489
1490 static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1491 {
1492 int err;
1493 struct notifier_block *nb;
1494
1495 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1496 if (!nb)
1497 return -ENOMEM;
1498
1499 nb->notifier_call = iommu_bus_notifier;
1500
1501 err = bus_register_notifier(bus, nb);
1502 if (err)
1503 goto out_free;
1504
1505 err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1506 if (err)
1507 goto out_err;
1508
1509
1510 return 0;
1511
1512 out_err:
1513
1514 bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1515 bus_unregister_notifier(bus, nb);
1516
1517 out_free:
1518 kfree(nb);
1519
1520 return err;
1521 }
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536 int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1537 {
1538 int err;
1539
1540 if (bus->iommu_ops != NULL)
1541 return -EBUSY;
1542
1543 bus->iommu_ops = ops;
1544
1545
1546 err = iommu_bus_init(bus, ops);
1547 if (err)
1548 bus->iommu_ops = NULL;
1549
1550 return err;
1551 }
1552 EXPORT_SYMBOL_GPL(bus_set_iommu);
1553
1554 bool iommu_present(struct bus_type *bus)
1555 {
1556 return bus->iommu_ops != NULL;
1557 }
1558 EXPORT_SYMBOL_GPL(iommu_present);
1559
1560 bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1561 {
1562 if (!bus->iommu_ops || !bus->iommu_ops->capable)
1563 return false;
1564
1565 return bus->iommu_ops->capable(cap);
1566 }
1567 EXPORT_SYMBOL_GPL(iommu_capable);
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581 void iommu_set_fault_handler(struct iommu_domain *domain,
1582 iommu_fault_handler_t handler,
1583 void *token)
1584 {
1585 BUG_ON(!domain);
1586
1587 domain->handler = handler;
1588 domain->handler_token = token;
1589 }
1590 EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1591
1592 static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1593 unsigned type)
1594 {
1595 struct iommu_domain *domain;
1596
1597 if (bus == NULL || bus->iommu_ops == NULL)
1598 return NULL;
1599
1600 domain = bus->iommu_ops->domain_alloc(type);
1601 if (!domain)
1602 return NULL;
1603
1604 domain->ops = bus->iommu_ops;
1605 domain->type = type;
1606
1607 domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
1608
1609 return domain;
1610 }
1611
1612 struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1613 {
1614 return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1615 }
1616 EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1617
1618 void iommu_domain_free(struct iommu_domain *domain)
1619 {
1620 domain->ops->domain_free(domain);
1621 }
1622 EXPORT_SYMBOL_GPL(iommu_domain_free);
1623
1624 static int __iommu_attach_device(struct iommu_domain *domain,
1625 struct device *dev)
1626 {
1627 int ret;
1628 if ((domain->ops->is_attach_deferred != NULL) &&
1629 domain->ops->is_attach_deferred(domain, dev))
1630 return 0;
1631
1632 if (unlikely(domain->ops->attach_dev == NULL))
1633 return -ENODEV;
1634
1635 ret = domain->ops->attach_dev(domain, dev);
1636 if (!ret)
1637 trace_attach_device_to_domain(dev);
1638 return ret;
1639 }
1640
1641 int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1642 {
1643 struct iommu_group *group;
1644 int ret;
1645
1646 group = iommu_group_get(dev);
1647 if (!group)
1648 return -ENODEV;
1649
1650
1651
1652
1653
1654 mutex_lock(&group->mutex);
1655 ret = -EINVAL;
1656 if (iommu_group_device_count(group) != 1)
1657 goto out_unlock;
1658
1659 ret = __iommu_attach_group(domain, group);
1660
1661 out_unlock:
1662 mutex_unlock(&group->mutex);
1663 iommu_group_put(group);
1664
1665 return ret;
1666 }
1667 EXPORT_SYMBOL_GPL(iommu_attach_device);
1668
1669 static void __iommu_detach_device(struct iommu_domain *domain,
1670 struct device *dev)
1671 {
1672 if ((domain->ops->is_attach_deferred != NULL) &&
1673 domain->ops->is_attach_deferred(domain, dev))
1674 return;
1675
1676 if (unlikely(domain->ops->detach_dev == NULL))
1677 return;
1678
1679 domain->ops->detach_dev(domain, dev);
1680 trace_detach_device_from_domain(dev);
1681 }
1682
1683 void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1684 {
1685 struct iommu_group *group;
1686
1687 group = iommu_group_get(dev);
1688 if (!group)
1689 return;
1690
1691 mutex_lock(&group->mutex);
1692 if (iommu_group_device_count(group) != 1) {
1693 WARN_ON(1);
1694 goto out_unlock;
1695 }
1696
1697 __iommu_detach_group(domain, group);
1698
1699 out_unlock:
1700 mutex_unlock(&group->mutex);
1701 iommu_group_put(group);
1702 }
1703 EXPORT_SYMBOL_GPL(iommu_detach_device);
1704
1705 struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1706 {
1707 struct iommu_domain *domain;
1708 struct iommu_group *group;
1709
1710 group = iommu_group_get(dev);
1711 if (!group)
1712 return NULL;
1713
1714 domain = group->domain;
1715
1716 iommu_group_put(group);
1717
1718 return domain;
1719 }
1720 EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1721
1722
1723
1724
1725
1726 struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1727 {
1728 return dev->iommu_group->default_domain;
1729 }
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741 static int iommu_group_do_attach_device(struct device *dev, void *data)
1742 {
1743 struct iommu_domain *domain = data;
1744
1745 return __iommu_attach_device(domain, dev);
1746 }
1747
1748 static int __iommu_attach_group(struct iommu_domain *domain,
1749 struct iommu_group *group)
1750 {
1751 int ret;
1752
1753 if (group->default_domain && group->domain != group->default_domain)
1754 return -EBUSY;
1755
1756 ret = __iommu_group_for_each_dev(group, domain,
1757 iommu_group_do_attach_device);
1758 if (ret == 0)
1759 group->domain = domain;
1760
1761 return ret;
1762 }
1763
1764 int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1765 {
1766 int ret;
1767
1768 mutex_lock(&group->mutex);
1769 ret = __iommu_attach_group(domain, group);
1770 mutex_unlock(&group->mutex);
1771
1772 return ret;
1773 }
1774 EXPORT_SYMBOL_GPL(iommu_attach_group);
1775
1776 static int iommu_group_do_detach_device(struct device *dev, void *data)
1777 {
1778 struct iommu_domain *domain = data;
1779
1780 __iommu_detach_device(domain, dev);
1781
1782 return 0;
1783 }
1784
1785 static void __iommu_detach_group(struct iommu_domain *domain,
1786 struct iommu_group *group)
1787 {
1788 int ret;
1789
1790 if (!group->default_domain) {
1791 __iommu_group_for_each_dev(group, domain,
1792 iommu_group_do_detach_device);
1793 group->domain = NULL;
1794 return;
1795 }
1796
1797 if (group->domain == group->default_domain)
1798 return;
1799
1800
1801 ret = __iommu_group_for_each_dev(group, group->default_domain,
1802 iommu_group_do_attach_device);
1803 if (ret != 0)
1804 WARN_ON(1);
1805 else
1806 group->domain = group->default_domain;
1807 }
1808
1809 void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1810 {
1811 mutex_lock(&group->mutex);
1812 __iommu_detach_group(domain, group);
1813 mutex_unlock(&group->mutex);
1814 }
1815 EXPORT_SYMBOL_GPL(iommu_detach_group);
1816
1817 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1818 {
1819 if (unlikely(domain->ops->iova_to_phys == NULL))
1820 return 0;
1821
1822 return domain->ops->iova_to_phys(domain, iova);
1823 }
1824 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1825
1826 static size_t iommu_pgsize(struct iommu_domain *domain,
1827 unsigned long addr_merge, size_t size)
1828 {
1829 unsigned int pgsize_idx;
1830 size_t pgsize;
1831
1832
1833 pgsize_idx = __fls(size);
1834
1835
1836 if (likely(addr_merge)) {
1837
1838 unsigned int align_pgsize_idx = __ffs(addr_merge);
1839 pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1840 }
1841
1842
1843 pgsize = (1UL << (pgsize_idx + 1)) - 1;
1844
1845
1846 pgsize &= domain->pgsize_bitmap;
1847
1848
1849 BUG_ON(!pgsize);
1850
1851
1852 pgsize_idx = __fls(pgsize);
1853 pgsize = 1UL << pgsize_idx;
1854
1855 return pgsize;
1856 }
1857
1858 int iommu_map(struct iommu_domain *domain, unsigned long iova,
1859 phys_addr_t paddr, size_t size, int prot)
1860 {
1861 const struct iommu_ops *ops = domain->ops;
1862 unsigned long orig_iova = iova;
1863 unsigned int min_pagesz;
1864 size_t orig_size = size;
1865 phys_addr_t orig_paddr = paddr;
1866 int ret = 0;
1867
1868 if (unlikely(ops->map == NULL ||
1869 domain->pgsize_bitmap == 0UL))
1870 return -ENODEV;
1871
1872 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1873 return -EINVAL;
1874
1875
1876 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1877
1878
1879
1880
1881
1882
1883 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1884 pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1885 iova, &paddr, size, min_pagesz);
1886 return -EINVAL;
1887 }
1888
1889 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1890
1891 while (size) {
1892 size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1893
1894 pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1895 iova, &paddr, pgsize);
1896
1897 ret = ops->map(domain, iova, paddr, pgsize, prot);
1898 if (ret)
1899 break;
1900
1901 iova += pgsize;
1902 paddr += pgsize;
1903 size -= pgsize;
1904 }
1905
1906 if (ops->iotlb_sync_map)
1907 ops->iotlb_sync_map(domain);
1908
1909
1910 if (ret)
1911 iommu_unmap(domain, orig_iova, orig_size - size);
1912 else
1913 trace_map(orig_iova, orig_paddr, orig_size);
1914
1915 return ret;
1916 }
1917 EXPORT_SYMBOL_GPL(iommu_map);
1918
1919 static size_t __iommu_unmap(struct iommu_domain *domain,
1920 unsigned long iova, size_t size,
1921 struct iommu_iotlb_gather *iotlb_gather)
1922 {
1923 const struct iommu_ops *ops = domain->ops;
1924 size_t unmapped_page, unmapped = 0;
1925 unsigned long orig_iova = iova;
1926 unsigned int min_pagesz;
1927
1928 if (unlikely(ops->unmap == NULL ||
1929 domain->pgsize_bitmap == 0UL))
1930 return 0;
1931
1932 if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1933 return 0;
1934
1935
1936 min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1937
1938
1939
1940
1941
1942
1943 if (!IS_ALIGNED(iova | size, min_pagesz)) {
1944 pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1945 iova, size, min_pagesz);
1946 return 0;
1947 }
1948
1949 pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1950
1951
1952
1953
1954
1955 while (unmapped < size) {
1956 size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1957
1958 unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
1959 if (!unmapped_page)
1960 break;
1961
1962 pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1963 iova, unmapped_page);
1964
1965 iova += unmapped_page;
1966 unmapped += unmapped_page;
1967 }
1968
1969 trace_unmap(orig_iova, size, unmapped);
1970 return unmapped;
1971 }
1972
1973 size_t iommu_unmap(struct iommu_domain *domain,
1974 unsigned long iova, size_t size)
1975 {
1976 struct iommu_iotlb_gather iotlb_gather;
1977 size_t ret;
1978
1979 iommu_iotlb_gather_init(&iotlb_gather);
1980 ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
1981 iommu_tlb_sync(domain, &iotlb_gather);
1982
1983 return ret;
1984 }
1985 EXPORT_SYMBOL_GPL(iommu_unmap);
1986
1987 size_t iommu_unmap_fast(struct iommu_domain *domain,
1988 unsigned long iova, size_t size,
1989 struct iommu_iotlb_gather *iotlb_gather)
1990 {
1991 return __iommu_unmap(domain, iova, size, iotlb_gather);
1992 }
1993 EXPORT_SYMBOL_GPL(iommu_unmap_fast);
1994
1995 size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1996 struct scatterlist *sg, unsigned int nents, int prot)
1997 {
1998 size_t len = 0, mapped = 0;
1999 phys_addr_t start;
2000 unsigned int i = 0;
2001 int ret;
2002
2003 while (i <= nents) {
2004 phys_addr_t s_phys = sg_phys(sg);
2005
2006 if (len && s_phys != start + len) {
2007 ret = iommu_map(domain, iova + mapped, start, len, prot);
2008 if (ret)
2009 goto out_err;
2010
2011 mapped += len;
2012 len = 0;
2013 }
2014
2015 if (len) {
2016 len += sg->length;
2017 } else {
2018 len = sg->length;
2019 start = s_phys;
2020 }
2021
2022 if (++i < nents)
2023 sg = sg_next(sg);
2024 }
2025
2026 return mapped;
2027
2028 out_err:
2029
2030 iommu_unmap(domain, iova, mapped);
2031
2032 return 0;
2033
2034 }
2035 EXPORT_SYMBOL_GPL(iommu_map_sg);
2036
2037 int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2038 phys_addr_t paddr, u64 size, int prot)
2039 {
2040 if (unlikely(domain->ops->domain_window_enable == NULL))
2041 return -ENODEV;
2042
2043 return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2044 prot);
2045 }
2046 EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2047
2048 void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2049 {
2050 if (unlikely(domain->ops->domain_window_disable == NULL))
2051 return;
2052
2053 return domain->ops->domain_window_disable(domain, wnd_nr);
2054 }
2055 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081 int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2082 unsigned long iova, int flags)
2083 {
2084 int ret = -ENOSYS;
2085
2086
2087
2088
2089
2090 if (domain->handler)
2091 ret = domain->handler(domain, dev, iova, flags,
2092 domain->handler_token);
2093
2094 trace_io_page_fault(dev, iova, flags);
2095 return ret;
2096 }
2097 EXPORT_SYMBOL_GPL(report_iommu_fault);
2098
2099 static int __init iommu_init(void)
2100 {
2101 iommu_group_kset = kset_create_and_add("iommu_groups",
2102 NULL, kernel_kobj);
2103 BUG_ON(!iommu_group_kset);
2104
2105 iommu_debugfs_setup();
2106
2107 return 0;
2108 }
2109 core_initcall(iommu_init);
2110
2111 int iommu_domain_get_attr(struct iommu_domain *domain,
2112 enum iommu_attr attr, void *data)
2113 {
2114 struct iommu_domain_geometry *geometry;
2115 bool *paging;
2116 int ret = 0;
2117
2118 switch (attr) {
2119 case DOMAIN_ATTR_GEOMETRY:
2120 geometry = data;
2121 *geometry = domain->geometry;
2122
2123 break;
2124 case DOMAIN_ATTR_PAGING:
2125 paging = data;
2126 *paging = (domain->pgsize_bitmap != 0UL);
2127 break;
2128 default:
2129 if (!domain->ops->domain_get_attr)
2130 return -EINVAL;
2131
2132 ret = domain->ops->domain_get_attr(domain, attr, data);
2133 }
2134
2135 return ret;
2136 }
2137 EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2138
2139 int iommu_domain_set_attr(struct iommu_domain *domain,
2140 enum iommu_attr attr, void *data)
2141 {
2142 int ret = 0;
2143
2144 switch (attr) {
2145 default:
2146 if (domain->ops->domain_set_attr == NULL)
2147 return -EINVAL;
2148
2149 ret = domain->ops->domain_set_attr(domain, attr, data);
2150 }
2151
2152 return ret;
2153 }
2154 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2155
2156 void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2157 {
2158 const struct iommu_ops *ops = dev->bus->iommu_ops;
2159
2160 if (ops && ops->get_resv_regions)
2161 ops->get_resv_regions(dev, list);
2162 }
2163
2164 void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2165 {
2166 const struct iommu_ops *ops = dev->bus->iommu_ops;
2167
2168 if (ops && ops->put_resv_regions)
2169 ops->put_resv_regions(dev, list);
2170 }
2171
2172 struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2173 size_t length, int prot,
2174 enum iommu_resv_type type)
2175 {
2176 struct iommu_resv_region *region;
2177
2178 region = kzalloc(sizeof(*region), GFP_KERNEL);
2179 if (!region)
2180 return NULL;
2181
2182 INIT_LIST_HEAD(®ion->list);
2183 region->start = start;
2184 region->length = length;
2185 region->prot = prot;
2186 region->type = type;
2187 return region;
2188 }
2189
2190 static int
2191 request_default_domain_for_dev(struct device *dev, unsigned long type)
2192 {
2193 struct iommu_domain *domain;
2194 struct iommu_group *group;
2195 int ret;
2196
2197
2198 group = iommu_group_get(dev);
2199 if (!group)
2200 return -EINVAL;
2201
2202 mutex_lock(&group->mutex);
2203
2204 ret = 0;
2205 if (group->default_domain && group->default_domain->type == type)
2206 goto out;
2207
2208
2209 ret = -EBUSY;
2210 if (iommu_group_device_count(group) != 1)
2211 goto out;
2212
2213 ret = -ENOMEM;
2214 domain = __iommu_domain_alloc(dev->bus, type);
2215 if (!domain)
2216 goto out;
2217
2218
2219 ret = __iommu_attach_group(domain, group);
2220 if (ret) {
2221 iommu_domain_free(domain);
2222 goto out;
2223 }
2224
2225
2226 if (group->default_domain)
2227 iommu_domain_free(group->default_domain);
2228 group->default_domain = domain;
2229
2230 iommu_group_create_direct_mappings(group, dev);
2231
2232 dev_info(dev, "Using iommu %s mapping\n",
2233 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2234
2235 ret = 0;
2236 out:
2237 mutex_unlock(&group->mutex);
2238 iommu_group_put(group);
2239
2240 return ret;
2241 }
2242
2243
2244 int iommu_request_dm_for_dev(struct device *dev)
2245 {
2246 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2247 }
2248
2249
2250 int iommu_request_dma_domain_for_dev(struct device *dev)
2251 {
2252 return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2253 }
2254
2255 void iommu_set_default_passthrough(bool cmd_line)
2256 {
2257 if (cmd_line)
2258 iommu_set_cmd_line_dma_api();
2259
2260 iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2261 }
2262
2263 void iommu_set_default_translated(bool cmd_line)
2264 {
2265 if (cmd_line)
2266 iommu_set_cmd_line_dma_api();
2267
2268 iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2269 }
2270
2271 bool iommu_default_passthrough(void)
2272 {
2273 return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2274 }
2275 EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2276
2277 const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2278 {
2279 const struct iommu_ops *ops = NULL;
2280 struct iommu_device *iommu;
2281
2282 spin_lock(&iommu_device_lock);
2283 list_for_each_entry(iommu, &iommu_device_list, list)
2284 if (iommu->fwnode == fwnode) {
2285 ops = iommu->ops;
2286 break;
2287 }
2288 spin_unlock(&iommu_device_lock);
2289 return ops;
2290 }
2291
2292 int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2293 const struct iommu_ops *ops)
2294 {
2295 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2296
2297 if (fwspec)
2298 return ops == fwspec->ops ? 0 : -EINVAL;
2299
2300 fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
2301 if (!fwspec)
2302 return -ENOMEM;
2303
2304 of_node_get(to_of_node(iommu_fwnode));
2305 fwspec->iommu_fwnode = iommu_fwnode;
2306 fwspec->ops = ops;
2307 dev_iommu_fwspec_set(dev, fwspec);
2308 return 0;
2309 }
2310 EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2311
2312 void iommu_fwspec_free(struct device *dev)
2313 {
2314 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2315
2316 if (fwspec) {
2317 fwnode_handle_put(fwspec->iommu_fwnode);
2318 kfree(fwspec);
2319 dev_iommu_fwspec_set(dev, NULL);
2320 }
2321 }
2322 EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2323
2324 int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2325 {
2326 struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2327 size_t size;
2328 int i;
2329
2330 if (!fwspec)
2331 return -EINVAL;
2332
2333 size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
2334 if (size > sizeof(*fwspec)) {
2335 fwspec = krealloc(fwspec, size, GFP_KERNEL);
2336 if (!fwspec)
2337 return -ENOMEM;
2338
2339 dev_iommu_fwspec_set(dev, fwspec);
2340 }
2341
2342 for (i = 0; i < num_ids; i++)
2343 fwspec->ids[fwspec->num_ids + i] = ids[i];
2344
2345 fwspec->num_ids += num_ids;
2346 return 0;
2347 }
2348 EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2349
2350
2351
2352
2353 bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2354 {
2355 const struct iommu_ops *ops = dev->bus->iommu_ops;
2356
2357 if (ops && ops->dev_has_feat)
2358 return ops->dev_has_feat(dev, feat);
2359
2360 return false;
2361 }
2362 EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2363
2364 int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2365 {
2366 const struct iommu_ops *ops = dev->bus->iommu_ops;
2367
2368 if (ops && ops->dev_enable_feat)
2369 return ops->dev_enable_feat(dev, feat);
2370
2371 return -ENODEV;
2372 }
2373 EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2374
2375
2376
2377
2378
2379
2380 int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2381 {
2382 const struct iommu_ops *ops = dev->bus->iommu_ops;
2383
2384 if (ops && ops->dev_disable_feat)
2385 return ops->dev_disable_feat(dev, feat);
2386
2387 return -EBUSY;
2388 }
2389 EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2390
2391 bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2392 {
2393 const struct iommu_ops *ops = dev->bus->iommu_ops;
2394
2395 if (ops && ops->dev_feat_enabled)
2396 return ops->dev_feat_enabled(dev, feat);
2397
2398 return false;
2399 }
2400 EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412 int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2413 {
2414 int ret = -ENODEV;
2415
2416 if (domain->ops->aux_attach_dev)
2417 ret = domain->ops->aux_attach_dev(domain, dev);
2418
2419 if (!ret)
2420 trace_attach_device_to_domain(dev);
2421
2422 return ret;
2423 }
2424 EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2425
2426 void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2427 {
2428 if (domain->ops->aux_detach_dev) {
2429 domain->ops->aux_detach_dev(domain, dev);
2430 trace_detach_device_from_domain(dev);
2431 }
2432 }
2433 EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2434
2435 int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2436 {
2437 int ret = -ENODEV;
2438
2439 if (domain->ops->aux_get_pasid)
2440 ret = domain->ops->aux_get_pasid(domain, dev);
2441
2442 return ret;
2443 }
2444 EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461 struct iommu_sva *
2462 iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2463 {
2464 struct iommu_group *group;
2465 struct iommu_sva *handle = ERR_PTR(-EINVAL);
2466 const struct iommu_ops *ops = dev->bus->iommu_ops;
2467
2468 if (!ops || !ops->sva_bind)
2469 return ERR_PTR(-ENODEV);
2470
2471 group = iommu_group_get(dev);
2472 if (!group)
2473 return ERR_PTR(-ENODEV);
2474
2475
2476 mutex_lock(&group->mutex);
2477
2478
2479
2480
2481
2482
2483
2484 if (iommu_group_device_count(group) != 1)
2485 goto out_unlock;
2486
2487 handle = ops->sva_bind(dev, mm, drvdata);
2488
2489 out_unlock:
2490 mutex_unlock(&group->mutex);
2491 iommu_group_put(group);
2492
2493 return handle;
2494 }
2495 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507 void iommu_sva_unbind_device(struct iommu_sva *handle)
2508 {
2509 struct iommu_group *group;
2510 struct device *dev = handle->dev;
2511 const struct iommu_ops *ops = dev->bus->iommu_ops;
2512
2513 if (!ops || !ops->sva_unbind)
2514 return;
2515
2516 group = iommu_group_get(dev);
2517 if (!group)
2518 return;
2519
2520 mutex_lock(&group->mutex);
2521 ops->sva_unbind(handle);
2522 mutex_unlock(&group->mutex);
2523
2524 iommu_group_put(group);
2525 }
2526 EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2527
2528 int iommu_sva_set_ops(struct iommu_sva *handle,
2529 const struct iommu_sva_ops *sva_ops)
2530 {
2531 if (handle->ops && handle->ops != sva_ops)
2532 return -EEXIST;
2533
2534 handle->ops = sva_ops;
2535 return 0;
2536 }
2537 EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2538
2539 int iommu_sva_get_pasid(struct iommu_sva *handle)
2540 {
2541 const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2542
2543 if (!ops || !ops->sva_get_pasid)
2544 return IOMMU_PASID_INVALID;
2545
2546 return ops->sva_get_pasid(handle);
2547 }
2548 EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);