This source file includes following definitions.
- mk_iommu_pte
- size_for_memory
- iommu_arena_new_node
- iommu_arena_new
- iommu_arena_find_pages
- iommu_arena_alloc
- iommu_arena_free
- pci_dac_dma_supported
- pci_map_single_1
- alpha_gendev_to_pci
- alpha_pci_map_page
- alpha_pci_unmap_page
- alpha_pci_alloc_coherent
- alpha_pci_free_coherent
- sg_classify
- sg_fill
- alpha_pci_map_sg
- alpha_pci_unmap_sg
- alpha_pci_supported
- iommu_reserve
- iommu_release
- iommu_bind
- iommu_unbind
1
2
3
4
5
6 #include <linux/kernel.h>
7 #include <linux/mm.h>
8 #include <linux/pci.h>
9 #include <linux/gfp.h>
10 #include <linux/memblock.h>
11 #include <linux/export.h>
12 #include <linux/scatterlist.h>
13 #include <linux/log2.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/iommu-helper.h>
16
17 #include <asm/io.h>
18 #include <asm/hwrpb.h>
19
20 #include "proto.h"
21 #include "pci_impl.h"
22
23
24 #define DEBUG_ALLOC 0
25 #if DEBUG_ALLOC > 0
26 # define DBGA(args...) printk(KERN_DEBUG args)
27 #else
28 # define DBGA(args...)
29 #endif
30 #if DEBUG_ALLOC > 1
31 # define DBGA2(args...) printk(KERN_DEBUG args)
32 #else
33 # define DBGA2(args...)
34 #endif
35
36 #define DEBUG_NODIRECT 0
37
38 #define ISA_DMA_MASK 0x00ffffff
39
40 static inline unsigned long
41 mk_iommu_pte(unsigned long paddr)
42 {
43 return (paddr >> (PAGE_SHIFT-1)) | 1;
44 }
45
46
47
48
49 unsigned long
50 size_for_memory(unsigned long max)
51 {
52 unsigned long mem = max_low_pfn << PAGE_SHIFT;
53 if (mem < max)
54 max = roundup_pow_of_two(mem);
55 return max;
56 }
57
58 struct pci_iommu_arena * __init
59 iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
60 unsigned long window_size, unsigned long align)
61 {
62 unsigned long mem_size;
63 struct pci_iommu_arena *arena;
64
65 mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
66
67
68
69
70
71 if (align < mem_size)
72 align = mem_size;
73
74
75 #ifdef CONFIG_DISCONTIGMEM
76
77 arena = memblock_alloc_node(sizeof(*arena), align, nid);
78 if (!NODE_DATA(nid) || !arena) {
79 printk("%s: couldn't allocate arena from node %d\n"
80 " falling back to system-wide allocation\n",
81 __func__, nid);
82 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
83 if (!arena)
84 panic("%s: Failed to allocate %zu bytes\n", __func__,
85 sizeof(*arena));
86 }
87
88 arena->ptes = memblock_alloc_node(sizeof(*arena), align, nid);
89 if (!NODE_DATA(nid) || !arena->ptes) {
90 printk("%s: couldn't allocate arena ptes from node %d\n"
91 " falling back to system-wide allocation\n",
92 __func__, nid);
93 arena->ptes = memblock_alloc(mem_size, align);
94 if (!arena->ptes)
95 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
96 __func__, mem_size, align);
97 }
98
99 #else
100
101 arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES);
102 if (!arena)
103 panic("%s: Failed to allocate %zu bytes\n", __func__,
104 sizeof(*arena));
105 arena->ptes = memblock_alloc(mem_size, align);
106 if (!arena->ptes)
107 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
108 __func__, mem_size, align);
109
110 #endif
111
112 spin_lock_init(&arena->lock);
113 arena->hose = hose;
114 arena->dma_base = base;
115 arena->size = window_size;
116 arena->next_entry = 0;
117
118
119
120 arena->align_entry = 1;
121
122 return arena;
123 }
124
125 struct pci_iommu_arena * __init
126 iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
127 unsigned long window_size, unsigned long align)
128 {
129 return iommu_arena_new_node(0, hose, base, window_size, align);
130 }
131
132
133 static long
134 iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
135 long n, long mask)
136 {
137 unsigned long *ptes;
138 long i, p, nent;
139 int pass = 0;
140 unsigned long base;
141 unsigned long boundary_size;
142
143 base = arena->dma_base >> PAGE_SHIFT;
144 if (dev) {
145 boundary_size = dma_get_seg_boundary(dev) + 1;
146 boundary_size >>= PAGE_SHIFT;
147 } else {
148 boundary_size = 1UL << (32 - PAGE_SHIFT);
149 }
150
151
152 ptes = arena->ptes;
153 nent = arena->size >> PAGE_SHIFT;
154 p = ALIGN(arena->next_entry, mask + 1);
155 i = 0;
156
157 again:
158 while (i < n && p+i < nent) {
159 if (!i && iommu_is_span_boundary(p, n, base, boundary_size)) {
160 p = ALIGN(p + 1, mask + 1);
161 goto again;
162 }
163
164 if (ptes[p+i])
165 p = ALIGN(p + i + 1, mask + 1), i = 0;
166 else
167 i = i + 1;
168 }
169
170 if (i < n) {
171 if (pass < 1) {
172
173
174
175
176 alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
177
178 pass++;
179 p = 0;
180 i = 0;
181 goto again;
182 } else
183 return -1;
184 }
185
186
187
188 return p;
189 }
190
191 static long
192 iommu_arena_alloc(struct device *dev, struct pci_iommu_arena *arena, long n,
193 unsigned int align)
194 {
195 unsigned long flags;
196 unsigned long *ptes;
197 long i, p, mask;
198
199 spin_lock_irqsave(&arena->lock, flags);
200
201
202 ptes = arena->ptes;
203 mask = max(align, arena->align_entry) - 1;
204 p = iommu_arena_find_pages(dev, arena, n, mask);
205 if (p < 0) {
206 spin_unlock_irqrestore(&arena->lock, flags);
207 return -1;
208 }
209
210
211
212
213
214 for (i = 0; i < n; ++i)
215 ptes[p+i] = IOMMU_INVALID_PTE;
216
217 arena->next_entry = p + n;
218 spin_unlock_irqrestore(&arena->lock, flags);
219
220 return p;
221 }
222
223 static void
224 iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
225 {
226 unsigned long *p;
227 long i;
228
229 p = arena->ptes + ofs;
230 for (i = 0; i < n; ++i)
231 p[i] = 0;
232 }
233
234
235
236
237
238 static int pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
239 {
240 dma_addr_t dac_offset = alpha_mv.pci_dac_offset;
241 int ok = 1;
242
243
244 if (dac_offset == 0)
245 ok = 0;
246
247
248 if ((dac_offset & dev->dma_mask) != dac_offset)
249 ok = 0;
250
251
252 DBGA("pci_dac_dma_supported %s from %ps\n",
253 ok ? "yes" : "no", __builtin_return_address(0));
254
255 return ok;
256 }
257
258
259
260
261
262
263 static dma_addr_t
264 pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
265 int dac_allowed)
266 {
267 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
268 dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
269 struct pci_iommu_arena *arena;
270 long npages, dma_ofs, i;
271 unsigned long paddr;
272 dma_addr_t ret;
273 unsigned int align = 0;
274 struct device *dev = pdev ? &pdev->dev : NULL;
275
276 paddr = __pa(cpu_addr);
277
278 #if !DEBUG_NODIRECT
279
280 if (paddr + size + __direct_map_base - 1 <= max_dma
281 && paddr + size <= __direct_map_size) {
282 ret = paddr + __direct_map_base;
283
284 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %ps\n",
285 cpu_addr, size, ret, __builtin_return_address(0));
286
287 return ret;
288 }
289 #endif
290
291
292 if (dac_allowed) {
293 ret = paddr + alpha_mv.pci_dac_offset;
294
295 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %ps\n",
296 cpu_addr, size, ret, __builtin_return_address(0));
297
298 return ret;
299 }
300
301
302
303
304 if (! alpha_mv.mv_pci_tbi) {
305 printk_once(KERN_WARNING "pci_map_single: no HW sg\n");
306 return DMA_MAPPING_ERROR;
307 }
308
309 arena = hose->sg_pci;
310 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
311 arena = hose->sg_isa;
312
313 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
314
315
316 if (pdev && pdev == isa_bridge)
317 align = 8;
318 dma_ofs = iommu_arena_alloc(dev, arena, npages, align);
319 if (dma_ofs < 0) {
320 printk(KERN_WARNING "pci_map_single failed: "
321 "could not allocate dma page tables\n");
322 return DMA_MAPPING_ERROR;
323 }
324
325 paddr &= PAGE_MASK;
326 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
327 arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
328
329 ret = arena->dma_base + dma_ofs * PAGE_SIZE;
330 ret += (unsigned long)cpu_addr & ~PAGE_MASK;
331
332 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %ps\n",
333 cpu_addr, size, npages, ret, __builtin_return_address(0));
334
335 return ret;
336 }
337
338
339 static struct pci_dev *alpha_gendev_to_pci(struct device *dev)
340 {
341 if (dev && dev_is_pci(dev))
342 return to_pci_dev(dev);
343
344
345
346 BUG_ON(!isa_bridge);
347
348
349
350 if (!dev || !dev->dma_mask || !*dev->dma_mask)
351 return isa_bridge;
352
353
354
355 if (*dev->dma_mask >= isa_bridge->dma_mask)
356 return isa_bridge;
357
358
359 return NULL;
360 }
361
362 static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
363 unsigned long offset, size_t size,
364 enum dma_data_direction dir,
365 unsigned long attrs)
366 {
367 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
368 int dac_allowed;
369
370 BUG_ON(dir == PCI_DMA_NONE);
371
372 dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
373 return pci_map_single_1(pdev, (char *)page_address(page) + offset,
374 size, dac_allowed);
375 }
376
377
378
379
380
381
382
383 static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
384 size_t size, enum dma_data_direction dir,
385 unsigned long attrs)
386 {
387 unsigned long flags;
388 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
389 struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
390 struct pci_iommu_arena *arena;
391 long dma_ofs, npages;
392
393 BUG_ON(dir == PCI_DMA_NONE);
394
395 if (dma_addr >= __direct_map_base
396 && dma_addr < __direct_map_base + __direct_map_size) {
397
398
399 DBGA2("pci_unmap_single: direct [%llx,%zx] from %ps\n",
400 dma_addr, size, __builtin_return_address(0));
401
402 return;
403 }
404
405 if (dma_addr > 0xffffffff) {
406 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %ps\n",
407 dma_addr, size, __builtin_return_address(0));
408 return;
409 }
410
411 arena = hose->sg_pci;
412 if (!arena || dma_addr < arena->dma_base)
413 arena = hose->sg_isa;
414
415 dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
416 if (dma_ofs * PAGE_SIZE >= arena->size) {
417 printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %llx "
418 " base %llx size %x\n",
419 dma_addr, arena->dma_base, arena->size);
420 return;
421 BUG();
422 }
423
424 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
425
426 spin_lock_irqsave(&arena->lock, flags);
427
428 iommu_arena_free(arena, dma_ofs, npages);
429
430
431
432
433 if (dma_ofs >= arena->next_entry)
434 alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
435
436 spin_unlock_irqrestore(&arena->lock, flags);
437
438 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %ps\n",
439 dma_addr, size, npages, __builtin_return_address(0));
440 }
441
442
443
444
445
446
447 static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
448 dma_addr_t *dma_addrp, gfp_t gfp,
449 unsigned long attrs)
450 {
451 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
452 void *cpu_addr;
453 long order = get_order(size);
454
455 gfp &= ~GFP_DMA;
456
457 try_again:
458 cpu_addr = (void *)__get_free_pages(gfp | __GFP_ZERO, order);
459 if (! cpu_addr) {
460 printk(KERN_INFO "pci_alloc_consistent: "
461 "get_free_pages failed from %ps\n",
462 __builtin_return_address(0));
463
464
465 return NULL;
466 }
467 memset(cpu_addr, 0, size);
468
469 *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
470 if (*dma_addrp == DMA_MAPPING_ERROR) {
471 free_pages((unsigned long)cpu_addr, order);
472 if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
473 return NULL;
474
475
476 gfp |= GFP_DMA;
477 goto try_again;
478 }
479
480 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %ps\n",
481 size, cpu_addr, *dma_addrp, __builtin_return_address(0));
482
483 return cpu_addr;
484 }
485
486
487
488
489
490
491
492 static void alpha_pci_free_coherent(struct device *dev, size_t size,
493 void *cpu_addr, dma_addr_t dma_addr,
494 unsigned long attrs)
495 {
496 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
497 pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
498 free_pages((unsigned long)cpu_addr, get_order(size));
499
500 DBGA2("pci_free_consistent: [%llx,%zx] from %ps\n",
501 dma_addr, size, __builtin_return_address(0));
502 }
503
504
505
506
507
508
509
510
511
512
513 #define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG)))
514 #define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
515
516 static void
517 sg_classify(struct device *dev, struct scatterlist *sg, struct scatterlist *end,
518 int virt_ok)
519 {
520 unsigned long next_paddr;
521 struct scatterlist *leader;
522 long leader_flag, leader_length;
523 unsigned int max_seg_size;
524
525 leader = sg;
526 leader_flag = 0;
527 leader_length = leader->length;
528 next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
529
530
531 max_seg_size = dev ? dma_get_max_seg_size(dev) : 0;
532 for (++sg; sg < end; ++sg) {
533 unsigned long addr, len;
534 addr = SG_ENT_PHYS_ADDRESS(sg);
535 len = sg->length;
536
537 if (leader_length + len > max_seg_size)
538 goto new_segment;
539
540 if (next_paddr == addr) {
541 sg->dma_address = -1;
542 leader_length += len;
543 } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
544 sg->dma_address = -2;
545 leader_flag = 1;
546 leader_length += len;
547 } else {
548 new_segment:
549 leader->dma_address = leader_flag;
550 leader->dma_length = leader_length;
551 leader = sg;
552 leader_flag = 0;
553 leader_length = len;
554 }
555
556 next_paddr = addr + len;
557 }
558
559 leader->dma_address = leader_flag;
560 leader->dma_length = leader_length;
561 }
562
563
564
565
566 static int
567 sg_fill(struct device *dev, struct scatterlist *leader, struct scatterlist *end,
568 struct scatterlist *out, struct pci_iommu_arena *arena,
569 dma_addr_t max_dma, int dac_allowed)
570 {
571 unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
572 long size = leader->dma_length;
573 struct scatterlist *sg;
574 unsigned long *ptes;
575 long npages, dma_ofs, i;
576
577 #if !DEBUG_NODIRECT
578
579
580 if (leader->dma_address == 0
581 && paddr + size + __direct_map_base - 1 <= max_dma
582 && paddr + size <= __direct_map_size) {
583 out->dma_address = paddr + __direct_map_base;
584 out->dma_length = size;
585
586 DBGA(" sg_fill: [%p,%lx] -> direct %llx\n",
587 __va(paddr), size, out->dma_address);
588
589 return 0;
590 }
591 #endif
592
593
594 if (leader->dma_address == 0 && dac_allowed) {
595 out->dma_address = paddr + alpha_mv.pci_dac_offset;
596 out->dma_length = size;
597
598 DBGA(" sg_fill: [%p,%lx] -> DAC %llx\n",
599 __va(paddr), size, out->dma_address);
600
601 return 0;
602 }
603
604
605
606
607 paddr &= ~PAGE_MASK;
608 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
609 dma_ofs = iommu_arena_alloc(dev, arena, npages, 0);
610 if (dma_ofs < 0) {
611
612 if (leader->dma_address == 0)
613 return -1;
614
615
616
617 sg_classify(dev, leader, end, 0);
618 return sg_fill(dev, leader, end, out, arena, max_dma, dac_allowed);
619 }
620
621 out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
622 out->dma_length = size;
623
624 DBGA(" sg_fill: [%p,%lx] -> sg %llx np %ld\n",
625 __va(paddr), size, out->dma_address, npages);
626
627
628
629 ptes = &arena->ptes[dma_ofs];
630 sg = leader;
631 do {
632 #if DEBUG_ALLOC > 0
633 struct scatterlist *last_sg = sg;
634 #endif
635
636 size = sg->length;
637 paddr = SG_ENT_PHYS_ADDRESS(sg);
638
639 while (sg+1 < end && (int) sg[1].dma_address == -1) {
640 size += sg[1].length;
641 sg++;
642 }
643
644 npages = iommu_num_pages(paddr, size, PAGE_SIZE);
645
646 paddr &= PAGE_MASK;
647 for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
648 *ptes++ = mk_iommu_pte(paddr);
649
650 #if DEBUG_ALLOC > 0
651 DBGA(" (%ld) [%p,%x] np %ld\n",
652 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
653 last_sg->length, npages);
654 while (++last_sg <= sg) {
655 DBGA(" (%ld) [%p,%x] cont\n",
656 last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
657 last_sg->length);
658 }
659 #endif
660 } while (++sg < end && (int) sg->dma_address < 0);
661
662 return 1;
663 }
664
665 static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
666 int nents, enum dma_data_direction dir,
667 unsigned long attrs)
668 {
669 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
670 struct scatterlist *start, *end, *out;
671 struct pci_controller *hose;
672 struct pci_iommu_arena *arena;
673 dma_addr_t max_dma;
674 int dac_allowed;
675
676 BUG_ON(dir == PCI_DMA_NONE);
677
678 dac_allowed = dev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
679
680
681 if (nents == 1) {
682 sg->dma_length = sg->length;
683 sg->dma_address
684 = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
685 sg->length, dac_allowed);
686 return sg->dma_address != DMA_MAPPING_ERROR;
687 }
688
689 start = sg;
690 end = sg + nents;
691
692
693 sg_classify(dev, sg, end, alpha_mv.mv_pci_tbi != 0);
694
695
696 if (alpha_mv.mv_pci_tbi) {
697 hose = pdev ? pdev->sysdata : pci_isa_hose;
698 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
699 arena = hose->sg_pci;
700 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
701 arena = hose->sg_isa;
702 } else {
703 max_dma = -1;
704 arena = NULL;
705 hose = NULL;
706 }
707
708
709
710 for (out = sg; sg < end; ++sg) {
711 if ((int) sg->dma_address < 0)
712 continue;
713 if (sg_fill(dev, sg, end, out, arena, max_dma, dac_allowed) < 0)
714 goto error;
715 out++;
716 }
717
718
719 if (out < end)
720 out->dma_length = 0;
721
722 if (out - start == 0)
723 printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
724 DBGA("pci_map_sg: %ld entries\n", out - start);
725
726 return out - start;
727
728 error:
729 printk(KERN_WARNING "pci_map_sg failed: "
730 "could not allocate dma page tables\n");
731
732
733
734 if (out > start)
735 pci_unmap_sg(pdev, start, out - start, dir);
736 return 0;
737 }
738
739
740
741
742
743 static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
744 int nents, enum dma_data_direction dir,
745 unsigned long attrs)
746 {
747 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
748 unsigned long flags;
749 struct pci_controller *hose;
750 struct pci_iommu_arena *arena;
751 struct scatterlist *end;
752 dma_addr_t max_dma;
753 dma_addr_t fbeg, fend;
754
755 BUG_ON(dir == PCI_DMA_NONE);
756
757 if (! alpha_mv.mv_pci_tbi)
758 return;
759
760 hose = pdev ? pdev->sysdata : pci_isa_hose;
761 max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
762 arena = hose->sg_pci;
763 if (!arena || arena->dma_base + arena->size - 1 > max_dma)
764 arena = hose->sg_isa;
765
766 fbeg = -1, fend = 0;
767
768 spin_lock_irqsave(&arena->lock, flags);
769
770 for (end = sg + nents; sg < end; ++sg) {
771 dma_addr_t addr;
772 size_t size;
773 long npages, ofs;
774 dma_addr_t tend;
775
776 addr = sg->dma_address;
777 size = sg->dma_length;
778 if (!size)
779 break;
780
781 if (addr > 0xffffffff) {
782
783 DBGA(" (%ld) DAC [%llx,%zx]\n",
784 sg - end + nents, addr, size);
785 continue;
786 }
787
788 if (addr >= __direct_map_base
789 && addr < __direct_map_base + __direct_map_size) {
790
791 DBGA(" (%ld) direct [%llx,%zx]\n",
792 sg - end + nents, addr, size);
793 continue;
794 }
795
796 DBGA(" (%ld) sg [%llx,%zx]\n",
797 sg - end + nents, addr, size);
798
799 npages = iommu_num_pages(addr, size, PAGE_SIZE);
800 ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
801 iommu_arena_free(arena, ofs, npages);
802
803 tend = addr + size - 1;
804 if (fbeg > addr) fbeg = addr;
805 if (fend < tend) fend = tend;
806 }
807
808
809
810
811 if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
812 alpha_mv.mv_pci_tbi(hose, fbeg, fend);
813
814 spin_unlock_irqrestore(&arena->lock, flags);
815
816 DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
817 }
818
819
820
821
822 static int alpha_pci_supported(struct device *dev, u64 mask)
823 {
824 struct pci_dev *pdev = alpha_gendev_to_pci(dev);
825 struct pci_controller *hose;
826 struct pci_iommu_arena *arena;
827
828
829
830
831 if (__direct_map_size != 0
832 && (__direct_map_base + __direct_map_size - 1 <= mask ||
833 __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
834 return 1;
835
836
837 hose = pdev ? pdev->sysdata : pci_isa_hose;
838 arena = hose->sg_isa;
839 if (arena && arena->dma_base + arena->size - 1 <= mask)
840 return 1;
841 arena = hose->sg_pci;
842 if (arena && arena->dma_base + arena->size - 1 <= mask)
843 return 1;
844
845
846 if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
847 return 1;
848
849 return 0;
850 }
851
852
853
854
855
856 int
857 iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
858 {
859 unsigned long flags;
860 unsigned long *ptes;
861 long i, p;
862
863 if (!arena) return -EINVAL;
864
865 spin_lock_irqsave(&arena->lock, flags);
866
867
868 ptes = arena->ptes;
869 p = iommu_arena_find_pages(NULL, arena, pg_count, align_mask);
870 if (p < 0) {
871 spin_unlock_irqrestore(&arena->lock, flags);
872 return -1;
873 }
874
875
876
877
878 for (i = 0; i < pg_count; ++i)
879 ptes[p+i] = IOMMU_RESERVED_PTE;
880
881 arena->next_entry = p + pg_count;
882 spin_unlock_irqrestore(&arena->lock, flags);
883
884 return p;
885 }
886
887 int
888 iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
889 {
890 unsigned long *ptes;
891 long i;
892
893 if (!arena) return -EINVAL;
894
895 ptes = arena->ptes;
896
897
898 for(i = pg_start; i < pg_start + pg_count; i++)
899 if (ptes[i] != IOMMU_RESERVED_PTE)
900 return -EBUSY;
901
902 iommu_arena_free(arena, pg_start, pg_count);
903 return 0;
904 }
905
906 int
907 iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
908 struct page **pages)
909 {
910 unsigned long flags;
911 unsigned long *ptes;
912 long i, j;
913
914 if (!arena) return -EINVAL;
915
916 spin_lock_irqsave(&arena->lock, flags);
917
918 ptes = arena->ptes;
919
920 for(j = pg_start; j < pg_start + pg_count; j++) {
921 if (ptes[j] != IOMMU_RESERVED_PTE) {
922 spin_unlock_irqrestore(&arena->lock, flags);
923 return -EBUSY;
924 }
925 }
926
927 for(i = 0, j = pg_start; i < pg_count; i++, j++)
928 ptes[j] = mk_iommu_pte(page_to_phys(pages[i]));
929
930 spin_unlock_irqrestore(&arena->lock, flags);
931
932 return 0;
933 }
934
935 int
936 iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
937 {
938 unsigned long *p;
939 long i;
940
941 if (!arena) return -EINVAL;
942
943 p = arena->ptes + pg_start;
944 for(i = 0; i < pg_count; i++)
945 p[i] = IOMMU_RESERVED_PTE;
946
947 return 0;
948 }
949
950 const struct dma_map_ops alpha_pci_ops = {
951 .alloc = alpha_pci_alloc_coherent,
952 .free = alpha_pci_free_coherent,
953 .map_page = alpha_pci_map_page,
954 .unmap_page = alpha_pci_unmap_page,
955 .map_sg = alpha_pci_map_sg,
956 .unmap_sg = alpha_pci_unmap_sg,
957 .dma_supported = alpha_pci_supported,
958 .mmap = dma_common_mmap,
959 .get_sgtable = dma_common_get_sgtable,
960 };
961 EXPORT_SYMBOL(alpha_pci_ops);