This source file includes following definitions.
- to_p2p_pgmap
- size_show
- available_show
- published_show
- pci_p2pdma_release
- pci_p2pdma_setup
- pci_p2pdma_add_resource
- find_parent_pci_dev
- pci_bridge_has_acs_redir
- seq_buf_print_bus_devfn
- __host_bridge_whitelist
- host_bridge_whitelist
- __upstream_bridge_distance
- map_types_idx
- upstream_bridge_distance
- upstream_bridge_distance_warn
- pci_p2pdma_distance_many
- pci_has_p2pmem
- pci_p2pmem_find_many
- pci_alloc_p2pmem
- pci_free_p2pmem
- pci_p2pmem_virt_to_bus
- pci_p2pmem_alloc_sgl
- pci_p2pmem_free_sgl
- pci_p2pmem_publish
- __pci_p2pdma_map_sg
- pci_p2pdma_map_sg_attrs
- pci_p2pdma_unmap_sg_attrs
- pci_p2pdma_enable_store
- pci_p2pdma_enable_show
1
2
3
4
5
6
7
8
9
10
11 #define pr_fmt(fmt) "pci-p2pdma: " fmt
12 #include <linux/ctype.h>
13 #include <linux/pci-p2pdma.h>
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/genalloc.h>
17 #include <linux/memremap.h>
18 #include <linux/percpu-refcount.h>
19 #include <linux/random.h>
20 #include <linux/seq_buf.h>
21 #include <linux/xarray.h>
22
23 enum pci_p2pdma_map_type {
24 PCI_P2PDMA_MAP_UNKNOWN = 0,
25 PCI_P2PDMA_MAP_NOT_SUPPORTED,
26 PCI_P2PDMA_MAP_BUS_ADDR,
27 PCI_P2PDMA_MAP_THRU_HOST_BRIDGE,
28 };
29
30 struct pci_p2pdma {
31 struct gen_pool *pool;
32 bool p2pmem_published;
33 struct xarray map_types;
34 };
35
36 struct pci_p2pdma_pagemap {
37 struct dev_pagemap pgmap;
38 struct pci_dev *provider;
39 u64 bus_offset;
40 };
41
42 static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap)
43 {
44 return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap);
45 }
46
47 static ssize_t size_show(struct device *dev, struct device_attribute *attr,
48 char *buf)
49 {
50 struct pci_dev *pdev = to_pci_dev(dev);
51 size_t size = 0;
52
53 if (pdev->p2pdma->pool)
54 size = gen_pool_size(pdev->p2pdma->pool);
55
56 return snprintf(buf, PAGE_SIZE, "%zd\n", size);
57 }
58 static DEVICE_ATTR_RO(size);
59
60 static ssize_t available_show(struct device *dev, struct device_attribute *attr,
61 char *buf)
62 {
63 struct pci_dev *pdev = to_pci_dev(dev);
64 size_t avail = 0;
65
66 if (pdev->p2pdma->pool)
67 avail = gen_pool_avail(pdev->p2pdma->pool);
68
69 return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
70 }
71 static DEVICE_ATTR_RO(available);
72
73 static ssize_t published_show(struct device *dev, struct device_attribute *attr,
74 char *buf)
75 {
76 struct pci_dev *pdev = to_pci_dev(dev);
77
78 return snprintf(buf, PAGE_SIZE, "%d\n",
79 pdev->p2pdma->p2pmem_published);
80 }
81 static DEVICE_ATTR_RO(published);
82
83 static struct attribute *p2pmem_attrs[] = {
84 &dev_attr_size.attr,
85 &dev_attr_available.attr,
86 &dev_attr_published.attr,
87 NULL,
88 };
89
90 static const struct attribute_group p2pmem_group = {
91 .attrs = p2pmem_attrs,
92 .name = "p2pmem",
93 };
94
95 static void pci_p2pdma_release(void *data)
96 {
97 struct pci_dev *pdev = data;
98 struct pci_p2pdma *p2pdma = pdev->p2pdma;
99
100 if (!p2pdma)
101 return;
102
103
104 pdev->p2pdma = NULL;
105 synchronize_rcu();
106
107 gen_pool_destroy(p2pdma->pool);
108 sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
109 xa_destroy(&p2pdma->map_types);
110 }
111
112 static int pci_p2pdma_setup(struct pci_dev *pdev)
113 {
114 int error = -ENOMEM;
115 struct pci_p2pdma *p2p;
116
117 p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL);
118 if (!p2p)
119 return -ENOMEM;
120
121 xa_init(&p2p->map_types);
122
123 p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev));
124 if (!p2p->pool)
125 goto out;
126
127 error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
128 if (error)
129 goto out_pool_destroy;
130
131 pdev->p2pdma = p2p;
132
133 error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group);
134 if (error)
135 goto out_pool_destroy;
136
137 return 0;
138
139 out_pool_destroy:
140 pdev->p2pdma = NULL;
141 gen_pool_destroy(p2p->pool);
142 out:
143 devm_kfree(&pdev->dev, p2p);
144 return error;
145 }
146
147
148
149
150
151
152
153
154
155
156
157 int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
158 u64 offset)
159 {
160 struct pci_p2pdma_pagemap *p2p_pgmap;
161 struct dev_pagemap *pgmap;
162 void *addr;
163 int error;
164
165 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
166 return -EINVAL;
167
168 if (offset >= pci_resource_len(pdev, bar))
169 return -EINVAL;
170
171 if (!size)
172 size = pci_resource_len(pdev, bar) - offset;
173
174 if (size + offset > pci_resource_len(pdev, bar))
175 return -EINVAL;
176
177 if (!pdev->p2pdma) {
178 error = pci_p2pdma_setup(pdev);
179 if (error)
180 return error;
181 }
182
183 p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
184 if (!p2p_pgmap)
185 return -ENOMEM;
186
187 pgmap = &p2p_pgmap->pgmap;
188 pgmap->res.start = pci_resource_start(pdev, bar) + offset;
189 pgmap->res.end = pgmap->res.start + size - 1;
190 pgmap->res.flags = pci_resource_flags(pdev, bar);
191 pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
192
193 p2p_pgmap->provider = pdev;
194 p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
195 pci_resource_start(pdev, bar);
196
197 addr = devm_memremap_pages(&pdev->dev, pgmap);
198 if (IS_ERR(addr)) {
199 error = PTR_ERR(addr);
200 goto pgmap_free;
201 }
202
203 error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
204 pci_bus_address(pdev, bar) + offset,
205 resource_size(&pgmap->res), dev_to_node(&pdev->dev),
206 pgmap->ref);
207 if (error)
208 goto pages_free;
209
210 pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
211 &pgmap->res);
212
213 return 0;
214
215 pages_free:
216 devm_memunmap_pages(&pdev->dev, pgmap);
217 pgmap_free:
218 devm_kfree(&pdev->dev, pgmap);
219 return error;
220 }
221 EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
222
223
224
225
226
227
228 static struct pci_dev *find_parent_pci_dev(struct device *dev)
229 {
230 struct device *parent;
231
232 dev = get_device(dev);
233
234 while (dev) {
235 if (dev_is_pci(dev))
236 return to_pci_dev(dev);
237
238 parent = get_device(dev->parent);
239 put_device(dev);
240 dev = parent;
241 }
242
243 return NULL;
244 }
245
246
247
248
249
250
251 static int pci_bridge_has_acs_redir(struct pci_dev *pdev)
252 {
253 int pos;
254 u16 ctrl;
255
256 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
257 if (!pos)
258 return 0;
259
260 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
261
262 if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC))
263 return 1;
264
265 return 0;
266 }
267
268 static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev)
269 {
270 if (!buf)
271 return;
272
273 seq_buf_printf(buf, "%s;", pci_name(pdev));
274 }
275
276 static const struct pci_p2pdma_whitelist_entry {
277 unsigned short vendor;
278 unsigned short device;
279 enum {
280 REQ_SAME_HOST_BRIDGE = 1 << 0,
281 } flags;
282 } pci_p2pdma_whitelist[] = {
283
284 {PCI_VENDOR_ID_AMD, 0x1450, 0},
285
286
287 {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE},
288 {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE},
289
290 {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE},
291 {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE},
292 {}
293 };
294
295 static bool __host_bridge_whitelist(struct pci_host_bridge *host,
296 bool same_host_bridge)
297 {
298 struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0));
299 const struct pci_p2pdma_whitelist_entry *entry;
300 unsigned short vendor, device;
301
302 if (!root)
303 return false;
304
305 vendor = root->vendor;
306 device = root->device;
307 pci_dev_put(root);
308
309 for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) {
310 if (vendor != entry->vendor || device != entry->device)
311 continue;
312 if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge)
313 return false;
314
315 return true;
316 }
317
318 return false;
319 }
320
321
322
323
324
325 static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b)
326 {
327 struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus);
328 struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus);
329
330 if (host_a == host_b)
331 return __host_bridge_whitelist(host_a, true);
332
333 if (__host_bridge_whitelist(host_a, false) &&
334 __host_bridge_whitelist(host_b, false))
335 return true;
336
337 return false;
338 }
339
340 static enum pci_p2pdma_map_type
341 __upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
342 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
343 {
344 struct pci_dev *a = provider, *b = client, *bb;
345 int dist_a = 0;
346 int dist_b = 0;
347 int acs_cnt = 0;
348
349 if (acs_redirects)
350 *acs_redirects = false;
351
352
353
354
355
356
357
358 while (a) {
359 dist_b = 0;
360
361 if (pci_bridge_has_acs_redir(a)) {
362 seq_buf_print_bus_devfn(acs_list, a);
363 acs_cnt++;
364 }
365
366 bb = b;
367
368 while (bb) {
369 if (a == bb)
370 goto check_b_path_acs;
371
372 bb = pci_upstream_bridge(bb);
373 dist_b++;
374 }
375
376 a = pci_upstream_bridge(a);
377 dist_a++;
378 }
379
380 if (dist)
381 *dist = dist_a + dist_b;
382
383 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
384
385 check_b_path_acs:
386 bb = b;
387
388 while (bb) {
389 if (a == bb)
390 break;
391
392 if (pci_bridge_has_acs_redir(bb)) {
393 seq_buf_print_bus_devfn(acs_list, bb);
394 acs_cnt++;
395 }
396
397 bb = pci_upstream_bridge(bb);
398 }
399
400 if (dist)
401 *dist = dist_a + dist_b;
402
403 if (acs_cnt) {
404 if (acs_redirects)
405 *acs_redirects = true;
406
407 return PCI_P2PDMA_MAP_THRU_HOST_BRIDGE;
408 }
409
410 return PCI_P2PDMA_MAP_BUS_ADDR;
411 }
412
413 static unsigned long map_types_idx(struct pci_dev *client)
414 {
415 return (pci_domain_nr(client->bus) << 16) |
416 (client->bus->number << 8) | client->devfn;
417 }
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 static enum pci_p2pdma_map_type
459 upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client,
460 int *dist, bool *acs_redirects, struct seq_buf *acs_list)
461 {
462 enum pci_p2pdma_map_type map_type;
463
464 map_type = __upstream_bridge_distance(provider, client, dist,
465 acs_redirects, acs_list);
466
467 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) {
468 if (!host_bridge_whitelist(provider, client))
469 map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED;
470 }
471
472 if (provider->p2pdma)
473 xa_store(&provider->p2pdma->map_types, map_types_idx(client),
474 xa_mk_value(map_type), GFP_KERNEL);
475
476 return map_type;
477 }
478
479 static enum pci_p2pdma_map_type
480 upstream_bridge_distance_warn(struct pci_dev *provider, struct pci_dev *client,
481 int *dist)
482 {
483 struct seq_buf acs_list;
484 bool acs_redirects;
485 int ret;
486
487 seq_buf_init(&acs_list, kmalloc(PAGE_SIZE, GFP_KERNEL), PAGE_SIZE);
488 if (!acs_list.buffer)
489 return -ENOMEM;
490
491 ret = upstream_bridge_distance(provider, client, dist, &acs_redirects,
492 &acs_list);
493 if (acs_redirects) {
494 pci_warn(client, "ACS redirect is set between the client and provider (%s)\n",
495 pci_name(provider));
496
497 acs_list.buffer[acs_list.len-1] = 0;
498 pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n",
499 acs_list.buffer);
500 }
501
502 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED) {
503 pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n",
504 pci_name(provider));
505 }
506
507 kfree(acs_list.buffer);
508
509 return ret;
510 }
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529 int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients,
530 int num_clients, bool verbose)
531 {
532 bool not_supported = false;
533 struct pci_dev *pci_client;
534 int total_dist = 0;
535 int distance;
536 int i, ret;
537
538 if (num_clients == 0)
539 return -1;
540
541 for (i = 0; i < num_clients; i++) {
542 if (IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
543 clients[i]->dma_ops == &dma_virt_ops) {
544 if (verbose)
545 dev_warn(clients[i],
546 "cannot be used for peer-to-peer DMA because the driver makes use of dma_virt_ops\n");
547 return -1;
548 }
549
550 pci_client = find_parent_pci_dev(clients[i]);
551 if (!pci_client) {
552 if (verbose)
553 dev_warn(clients[i],
554 "cannot be used for peer-to-peer DMA as it is not a PCI device\n");
555 return -1;
556 }
557
558 if (verbose)
559 ret = upstream_bridge_distance_warn(provider,
560 pci_client, &distance);
561 else
562 ret = upstream_bridge_distance(provider, pci_client,
563 &distance, NULL, NULL);
564
565 pci_dev_put(pci_client);
566
567 if (ret == PCI_P2PDMA_MAP_NOT_SUPPORTED)
568 not_supported = true;
569
570 if (not_supported && !verbose)
571 break;
572
573 total_dist += distance;
574 }
575
576 if (not_supported)
577 return -1;
578
579 return total_dist;
580 }
581 EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many);
582
583
584
585
586
587 bool pci_has_p2pmem(struct pci_dev *pdev)
588 {
589 return pdev->p2pdma && pdev->p2pdma->p2pmem_published;
590 }
591 EXPORT_SYMBOL_GPL(pci_has_p2pmem);
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610 struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients)
611 {
612 struct pci_dev *pdev = NULL;
613 int distance;
614 int closest_distance = INT_MAX;
615 struct pci_dev **closest_pdevs;
616 int dev_cnt = 0;
617 const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs);
618 int i;
619
620 closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL);
621 if (!closest_pdevs)
622 return NULL;
623
624 while ((pdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
625 if (!pci_has_p2pmem(pdev))
626 continue;
627
628 distance = pci_p2pdma_distance_many(pdev, clients,
629 num_clients, false);
630 if (distance < 0 || distance > closest_distance)
631 continue;
632
633 if (distance == closest_distance && dev_cnt >= max_devs)
634 continue;
635
636 if (distance < closest_distance) {
637 for (i = 0; i < dev_cnt; i++)
638 pci_dev_put(closest_pdevs[i]);
639
640 dev_cnt = 0;
641 closest_distance = distance;
642 }
643
644 closest_pdevs[dev_cnt++] = pci_dev_get(pdev);
645 }
646
647 if (dev_cnt)
648 pdev = pci_dev_get(closest_pdevs[prandom_u32_max(dev_cnt)]);
649
650 for (i = 0; i < dev_cnt; i++)
651 pci_dev_put(closest_pdevs[i]);
652
653 kfree(closest_pdevs);
654 return pdev;
655 }
656 EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
657
658
659
660
661
662
663
664
665 void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
666 {
667 void *ret = NULL;
668 struct percpu_ref *ref;
669
670
671
672
673
674
675 rcu_read_lock();
676 if (unlikely(!pdev->p2pdma))
677 goto out;
678
679 ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
680 (void **) &ref);
681 if (!ret)
682 goto out;
683
684 if (unlikely(!percpu_ref_tryget_live(ref))) {
685 gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
686 ret = NULL;
687 goto out;
688 }
689 out:
690 rcu_read_unlock();
691 return ret;
692 }
693 EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
694
695
696
697
698
699
700
701 void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
702 {
703 struct percpu_ref *ref;
704
705 gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
706 (void **) &ref);
707 percpu_ref_put(ref);
708 }
709 EXPORT_SYMBOL_GPL(pci_free_p2pmem);
710
711
712
713
714
715
716
717 pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr)
718 {
719 if (!addr)
720 return 0;
721 if (!pdev->p2pdma)
722 return 0;
723
724
725
726
727
728
729 return gen_pool_virt_to_phys(pdev->p2pdma->pool, (unsigned long)addr);
730 }
731 EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus);
732
733
734
735
736
737
738
739
740
741 struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
742 unsigned int *nents, u32 length)
743 {
744 struct scatterlist *sg;
745 void *addr;
746
747 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
748 if (!sg)
749 return NULL;
750
751 sg_init_table(sg, 1);
752
753 addr = pci_alloc_p2pmem(pdev, length);
754 if (!addr)
755 goto out_free_sg;
756
757 sg_set_buf(sg, addr, length);
758 *nents = 1;
759 return sg;
760
761 out_free_sg:
762 kfree(sg);
763 return NULL;
764 }
765 EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl);
766
767
768
769
770
771
772 void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl)
773 {
774 struct scatterlist *sg;
775 int count;
776
777 for_each_sg(sgl, sg, INT_MAX, count) {
778 if (!sg)
779 break;
780
781 pci_free_p2pmem(pdev, sg_virt(sg), sg->length);
782 }
783 kfree(sgl);
784 }
785 EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl);
786
787
788
789
790
791
792
793
794
795
796
797
798 void pci_p2pmem_publish(struct pci_dev *pdev, bool publish)
799 {
800 if (pdev->p2pdma)
801 pdev->p2pdma->p2pmem_published = publish;
802 }
803 EXPORT_SYMBOL_GPL(pci_p2pmem_publish);
804
805 static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct pci_dev *provider,
806 struct pci_dev *client)
807 {
808 if (!provider->p2pdma)
809 return PCI_P2PDMA_MAP_NOT_SUPPORTED;
810
811 return xa_to_value(xa_load(&provider->p2pdma->map_types,
812 map_types_idx(client)));
813 }
814
815 static int __pci_p2pdma_map_sg(struct pci_p2pdma_pagemap *p2p_pgmap,
816 struct device *dev, struct scatterlist *sg, int nents)
817 {
818 struct scatterlist *s;
819 phys_addr_t paddr;
820 int i;
821
822
823
824
825
826
827
828 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_DMA_VIRT_OPS) &&
829 dev->dma_ops == &dma_virt_ops))
830 return 0;
831
832 for_each_sg(sg, s, nents, i) {
833 paddr = sg_phys(s);
834
835 s->dma_address = paddr - p2p_pgmap->bus_offset;
836 sg_dma_len(s) = s->length;
837 }
838
839 return nents;
840 }
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855 int pci_p2pdma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
856 int nents, enum dma_data_direction dir, unsigned long attrs)
857 {
858 struct pci_p2pdma_pagemap *p2p_pgmap =
859 to_p2p_pgmap(sg_page(sg)->pgmap);
860 struct pci_dev *client;
861
862 if (WARN_ON_ONCE(!dev_is_pci(dev)))
863 return 0;
864
865 client = to_pci_dev(dev);
866
867 switch (pci_p2pdma_map_type(p2p_pgmap->provider, client)) {
868 case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
869 return dma_map_sg_attrs(dev, sg, nents, dir, attrs);
870 case PCI_P2PDMA_MAP_BUS_ADDR:
871 return __pci_p2pdma_map_sg(p2p_pgmap, dev, sg, nents);
872 default:
873 WARN_ON_ONCE(1);
874 return 0;
875 }
876 }
877 EXPORT_SYMBOL_GPL(pci_p2pdma_map_sg_attrs);
878
879
880
881
882
883
884
885
886
887
888 void pci_p2pdma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
889 int nents, enum dma_data_direction dir, unsigned long attrs)
890 {
891 struct pci_p2pdma_pagemap *p2p_pgmap =
892 to_p2p_pgmap(sg_page(sg)->pgmap);
893 enum pci_p2pdma_map_type map_type;
894 struct pci_dev *client;
895
896 if (WARN_ON_ONCE(!dev_is_pci(dev)))
897 return;
898
899 client = to_pci_dev(dev);
900
901 map_type = pci_p2pdma_map_type(p2p_pgmap->provider, client);
902
903 if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE)
904 dma_unmap_sg_attrs(dev, sg, nents, dir, attrs);
905 }
906 EXPORT_SYMBOL_GPL(pci_p2pdma_unmap_sg_attrs);
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928 int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev,
929 bool *use_p2pdma)
930 {
931 struct device *dev;
932
933 dev = bus_find_device_by_name(&pci_bus_type, NULL, page);
934 if (dev) {
935 *use_p2pdma = true;
936 *p2p_dev = to_pci_dev(dev);
937
938 if (!pci_has_p2pmem(*p2p_dev)) {
939 pci_err(*p2p_dev,
940 "PCI device has no peer-to-peer memory: %s\n",
941 page);
942 pci_dev_put(*p2p_dev);
943 return -ENODEV;
944 }
945
946 return 0;
947 } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) {
948
949
950
951
952
953
954 } else if (!strtobool(page, use_p2pdma)) {
955 return 0;
956 }
957
958 pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page);
959 return -ENODEV;
960 }
961 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store);
962
963
964
965
966
967
968
969
970
971
972
973
974
975 ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev,
976 bool use_p2pdma)
977 {
978 if (!use_p2pdma)
979 return sprintf(page, "0\n");
980
981 if (!p2p_dev)
982 return sprintf(page, "1\n");
983
984 return sprintf(page, "%s\n", pci_name(p2p_dev));
985 }
986 EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);