This source file includes following definitions.
- sba_dump_ranges
- sba_dump_tlb
- sba_dump_pdir_entry
- sba_check_pdir
- sba_dump_sg
- ptr_to_pide
- sba_search_bitmap
- sba_alloc_range
- sba_free_range
- sba_io_pdir_entry
- sba_mark_invalid
- sba_dma_supported
- sba_map_single
- sba_map_page
- sba_unmap_page
- sba_alloc
- sba_free
- sba_map_sg
- sba_unmap_sg
- sba_get_pat_resources
- sba_alloc_pdir
- setup_ibase_imask_callback
- setup_ibase_imask
- sba_ioc_find_quicksilver
- sba_ioc_init_pluto
- sba_ioc_init
- ioc_remap
- sba_hw_init
- sba_common_init
- sba_proc_info
- sba_proc_bitmap_info
- sba_driver_callback
- sba_init
- sba_get_iommu
- sba_directed_lmmio
- sba_distributed_lmmio
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #include <linux/types.h>
20 #include <linux/kernel.h>
21 #include <linux/spinlock.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24
25 #include <linux/mm.h>
26 #include <linux/string.h>
27 #include <linux/pci.h>
28 #include <linux/scatterlist.h>
29 #include <linux/iommu-helper.h>
30
31 #include <asm/byteorder.h>
32 #include <asm/io.h>
33 #include <asm/dma.h>
34
35 #include <asm/hardware.h>
36
37 #include <linux/proc_fs.h>
38 #include <linux/seq_file.h>
39 #include <linux/module.h>
40
41 #include <asm/ropes.h>
42 #include <asm/mckinley.h>
43 #include <asm/runway.h>
44 #include <asm/page.h>
45 #include <asm/pdc.h>
46 #include <asm/pdcpat.h>
47 #include <asm/parisc-device.h>
48
49 #include "iommu.h"
50
51 #define MODULE_NAME "SBA"
52
53
54
55
56
57
58 #undef DEBUG_SBA_INIT
59 #undef DEBUG_SBA_RUN
60 #undef DEBUG_SBA_RUN_SG
61 #undef DEBUG_SBA_RESOURCE
62 #undef ASSERT_PDIR_SANITY
63 #undef DEBUG_LARGE_SG_ENTRIES
64 #undef DEBUG_DMB_TRAP
65
66 #ifdef DEBUG_SBA_INIT
67 #define DBG_INIT(x...) printk(x)
68 #else
69 #define DBG_INIT(x...)
70 #endif
71
72 #ifdef DEBUG_SBA_RUN
73 #define DBG_RUN(x...) printk(x)
74 #else
75 #define DBG_RUN(x...)
76 #endif
77
78 #ifdef DEBUG_SBA_RUN_SG
79 #define DBG_RUN_SG(x...) printk(x)
80 #else
81 #define DBG_RUN_SG(x...)
82 #endif
83
84
85 #ifdef DEBUG_SBA_RESOURCE
86 #define DBG_RES(x...) printk(x)
87 #else
88 #define DBG_RES(x...)
89 #endif
90
91 #define SBA_INLINE __inline__
92
93 #define DEFAULT_DMA_HINT_REG 0
94
95 struct sba_device *sba_list;
96 EXPORT_SYMBOL_GPL(sba_list);
97
98 static unsigned long ioc_needs_fdc = 0;
99
100
101 static unsigned int global_ioc_cnt = 0;
102
103
104 static unsigned long piranha_bad_128k = 0;
105
106
107 #define SBA_DEV(d) ((struct sba_device *) (d))
108
109 #ifdef CONFIG_AGP_PARISC
110 #define SBA_AGP_SUPPORT
111 #endif
112
113 #ifdef SBA_AGP_SUPPORT
114 static int sba_reserve_agpgart = 1;
115 module_param(sba_reserve_agpgart, int, 0444);
116 MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
117 #endif
118
119
120
121
122
123
124
125
126
127
128 #define READ_REG32(addr) readl(addr)
129 #define READ_REG64(addr) readq(addr)
130 #define WRITE_REG32(val, addr) writel((val), (addr))
131 #define WRITE_REG64(val, addr) writeq((val), (addr))
132
133 #ifdef CONFIG_64BIT
134 #define READ_REG(addr) READ_REG64(addr)
135 #define WRITE_REG(value, addr) WRITE_REG64(value, addr)
136 #else
137 #define READ_REG(addr) READ_REG32(addr)
138 #define WRITE_REG(value, addr) WRITE_REG32(value, addr)
139 #endif
140
141 #ifdef DEBUG_SBA_INIT
142
143
144
145
146
147
148
149
150
151
152 static void
153 sba_dump_ranges(void __iomem *hpa)
154 {
155 DBG_INIT("SBA at 0x%p\n", hpa);
156 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
157 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
158 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
159 DBG_INIT("\n");
160 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
161 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
162 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
163 }
164
165
166
167
168
169
170
171 static void sba_dump_tlb(void __iomem *hpa)
172 {
173 DBG_INIT("IO TLB at 0x%p\n", hpa);
174 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
175 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
176 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
177 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
178 DBG_INIT("\n");
179 }
180 #else
181 #define sba_dump_ranges(x)
182 #define sba_dump_tlb(x)
183 #endif
184
185
186 #ifdef ASSERT_PDIR_SANITY
187
188
189
190
191
192
193
194
195
196 static void
197 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
198 {
199
200 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
201 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
202 uint rcnt;
203
204 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
205 msg,
206 rptr, pide & (BITS_PER_LONG - 1), *rptr);
207
208 rcnt = 0;
209 while (rcnt < BITS_PER_LONG) {
210 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
211 (rcnt == (pide & (BITS_PER_LONG - 1)))
212 ? " -->" : " ",
213 rcnt, ptr, *ptr );
214 rcnt++;
215 ptr++;
216 }
217 printk(KERN_DEBUG "%s", msg);
218 }
219
220
221
222
223
224
225
226
227
228 static int
229 sba_check_pdir(struct ioc *ioc, char *msg)
230 {
231 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
232 u32 *rptr = (u32 *) ioc->res_map;
233 u64 *pptr = ioc->pdir_base;
234 uint pide = 0;
235
236 while (rptr < rptr_end) {
237 u32 rval = *rptr;
238 int rcnt = 32;
239
240 while (rcnt) {
241
242 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
243 if ((rval ^ pde) & 0x80000000)
244 {
245
246
247
248
249 sba_dump_pdir_entry(ioc, msg, pide);
250 return(1);
251 }
252 rcnt--;
253 rval <<= 1;
254 pptr++;
255 pide++;
256 }
257 rptr++;
258 }
259
260 return 0;
261 }
262
263
264
265
266
267
268
269
270
271
272 static void
273 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
274 {
275 while (nents-- > 0) {
276 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
277 nents,
278 (unsigned long) sg_dma_address(startsg),
279 sg_dma_len(startsg),
280 sg_virt(startsg), startsg->length);
281 startsg++;
282 }
283 }
284
285 #endif
286
287
288
289
290
291
292
293
294
295
296
297
298
299 #define PAGES_PER_RANGE 1
300
301
302
303 #ifdef ZX1_SUPPORT
304
305 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
306 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
307 #else
308
309 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
310 #define SBA_IOVP(ioc,iova) (iova)
311 #endif
312
313 #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
314
315 #define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
316 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
317
318 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
319 unsigned int bitshiftcnt)
320 {
321 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
322 + bitshiftcnt;
323 }
324
325
326
327
328
329
330
331
332
333
334 static SBA_INLINE unsigned long
335 sba_search_bitmap(struct ioc *ioc, struct device *dev,
336 unsigned long bits_wanted)
337 {
338 unsigned long *res_ptr = ioc->res_hint;
339 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
340 unsigned long pide = ~0UL, tpide;
341 unsigned long boundary_size;
342 unsigned long shift;
343 int ret;
344
345 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
346 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
347
348 #if defined(ZX1_SUPPORT)
349 BUG_ON(ioc->ibase & ~IOVP_MASK);
350 shift = ioc->ibase >> IOVP_SHIFT;
351 #else
352 shift = 0;
353 #endif
354
355 if (bits_wanted > (BITS_PER_LONG/2)) {
356
357 for(; res_ptr < res_end; ++res_ptr) {
358 tpide = ptr_to_pide(ioc, res_ptr, 0);
359 ret = iommu_is_span_boundary(tpide, bits_wanted,
360 shift,
361 boundary_size);
362 if ((*res_ptr == 0) && !ret) {
363 *res_ptr = RESMAP_MASK(bits_wanted);
364 pide = tpide;
365 break;
366 }
367 }
368
369 res_ptr++;
370 ioc->res_bitshift = 0;
371 } else {
372
373
374
375
376
377
378 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
379 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
380 unsigned long mask;
381
382 if (bitshiftcnt >= BITS_PER_LONG) {
383 bitshiftcnt = 0;
384 res_ptr++;
385 }
386 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
387
388 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
389 while(res_ptr < res_end)
390 {
391 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
392 WARN_ON(mask == 0);
393 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
394 ret = iommu_is_span_boundary(tpide, bits_wanted,
395 shift,
396 boundary_size);
397 if ((((*res_ptr) & mask) == 0) && !ret) {
398 *res_ptr |= mask;
399 pide = tpide;
400 break;
401 }
402 mask >>= o;
403 bitshiftcnt += o;
404 if (mask == 0) {
405 mask = RESMAP_MASK(bits_wanted);
406 bitshiftcnt=0;
407 res_ptr++;
408 }
409 }
410
411 ioc->res_bitshift = bitshiftcnt + bits_wanted;
412 }
413
414
415 if (res_end <= res_ptr) {
416 ioc->res_hint = (unsigned long *) ioc->res_map;
417 ioc->res_bitshift = 0;
418 } else {
419 ioc->res_hint = res_ptr;
420 }
421 return (pide);
422 }
423
424
425
426
427
428
429
430
431
432
433 static int
434 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
435 {
436 unsigned int pages_needed = size >> IOVP_SHIFT;
437 #ifdef SBA_COLLECT_STATS
438 unsigned long cr_start = mfctl(16);
439 #endif
440 unsigned long pide;
441
442 pide = sba_search_bitmap(ioc, dev, pages_needed);
443 if (pide >= (ioc->res_size << 3)) {
444 pide = sba_search_bitmap(ioc, dev, pages_needed);
445 if (pide >= (ioc->res_size << 3))
446 panic("%s: I/O MMU @ %p is out of mapping resources\n",
447 __FILE__, ioc->ioc_hpa);
448 }
449
450 #ifdef ASSERT_PDIR_SANITY
451
452 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
453 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
454 }
455 #endif
456
457 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
458 __func__, size, pages_needed, pide,
459 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
460 ioc->res_bitshift );
461
462 #ifdef SBA_COLLECT_STATS
463 {
464 unsigned long cr_end = mfctl(16);
465 unsigned long tmp = cr_end - cr_start;
466
467 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
468 }
469 ioc->avg_search[ioc->avg_idx++] = cr_start;
470 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
471
472 ioc->used_pages += pages_needed;
473 #endif
474
475 return (pide);
476 }
477
478
479
480
481
482
483
484
485
486
487 static SBA_INLINE void
488 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
489 {
490 unsigned long iovp = SBA_IOVP(ioc, iova);
491 unsigned int pide = PDIR_INDEX(iovp);
492 unsigned int ridx = pide >> 3;
493 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
494
495 int bits_not_wanted = size >> IOVP_SHIFT;
496
497
498 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
499
500 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
501 __func__, (uint) iova, size,
502 bits_not_wanted, m, pide, res_ptr, *res_ptr);
503
504 #ifdef SBA_COLLECT_STATS
505 ioc->used_pages -= bits_not_wanted;
506 #endif
507
508 *res_ptr &= ~m;
509 }
510
511
512
513
514
515
516
517
518 #ifdef SBA_HINT_SUPPORT
519 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
520 #endif
521
522 typedef unsigned long space_t;
523 #define KERNEL_SPACE 0
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565 static void SBA_INLINE
566 sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
567 unsigned long hint)
568 {
569 u64 pa;
570 register unsigned ci;
571
572 pa = lpa(vba);
573 pa &= IOVP_MASK;
574
575 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
576 pa |= (ci >> PAGE_SHIFT) & 0xff;
577
578 pa |= SBA_PDIR_VALID_BIT;
579 *pdir_ptr = cpu_to_le64(pa);
580
581
582
583
584
585
586 asm_io_fdc(pdir_ptr);
587 }
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606 static SBA_INLINE void
607 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
608 {
609 u32 iovp = (u32) SBA_IOVP(ioc,iova);
610 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
611
612 #ifdef ASSERT_PDIR_SANITY
613
614
615
616
617
618
619 if (0x80 != (((u8 *) pdir_ptr)[7])) {
620 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
621 }
622 #endif
623
624 if (byte_cnt > IOVP_SIZE)
625 {
626 #if 0
627 unsigned long entries_per_cacheline = ioc_needs_fdc ?
628 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
629 - (unsigned long) pdir_ptr;
630 : 262144;
631 #endif
632
633
634 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
635
636 do {
637
638 ((u8 *) pdir_ptr)[7] = 0;
639 asm_io_fdc(pdir_ptr);
640 if (ioc_needs_fdc) {
641 #if 0
642 entries_per_cacheline = L1_CACHE_SHIFT - 3;
643 #endif
644 }
645 pdir_ptr++;
646 byte_cnt -= IOVP_SIZE;
647 } while (byte_cnt > IOVP_SIZE);
648 } else
649 iovp |= IOVP_SHIFT;
650
651
652
653
654
655
656
657
658 ((u8 *) pdir_ptr)[7] = 0;
659 asm_io_fdc(pdir_ptr);
660
661 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
662 }
663
664
665
666
667
668
669
670
671 static int sba_dma_supported( struct device *dev, u64 mask)
672 {
673 struct ioc *ioc;
674
675 if (dev == NULL) {
676 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
677 BUG();
678 return(0);
679 }
680
681 ioc = GET_IOC(dev);
682 if (!ioc)
683 return 0;
684
685
686
687
688
689 return((int)(mask >= (ioc->ibase - 1 +
690 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
691 }
692
693
694
695
696
697
698
699
700
701
702
703 static dma_addr_t
704 sba_map_single(struct device *dev, void *addr, size_t size,
705 enum dma_data_direction direction)
706 {
707 struct ioc *ioc;
708 unsigned long flags;
709 dma_addr_t iovp;
710 dma_addr_t offset;
711 u64 *pdir_start;
712 int pide;
713
714 ioc = GET_IOC(dev);
715 if (!ioc)
716 return DMA_MAPPING_ERROR;
717
718
719 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
720
721
722 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
723
724 spin_lock_irqsave(&ioc->res_lock, flags);
725 #ifdef ASSERT_PDIR_SANITY
726 sba_check_pdir(ioc,"Check before sba_map_single()");
727 #endif
728
729 #ifdef SBA_COLLECT_STATS
730 ioc->msingle_calls++;
731 ioc->msingle_pages += size >> IOVP_SHIFT;
732 #endif
733 pide = sba_alloc_range(ioc, dev, size);
734 iovp = (dma_addr_t) pide << IOVP_SHIFT;
735
736 DBG_RUN("%s() 0x%p -> 0x%lx\n",
737 __func__, addr, (long) iovp | offset);
738
739 pdir_start = &(ioc->pdir_base[pide]);
740
741 while (size > 0) {
742 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
743
744 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
745 pdir_start,
746 (u8) (((u8 *) pdir_start)[7]),
747 (u8) (((u8 *) pdir_start)[6]),
748 (u8) (((u8 *) pdir_start)[5]),
749 (u8) (((u8 *) pdir_start)[4]),
750 (u8) (((u8 *) pdir_start)[3]),
751 (u8) (((u8 *) pdir_start)[2]),
752 (u8) (((u8 *) pdir_start)[1]),
753 (u8) (((u8 *) pdir_start)[0])
754 );
755
756 addr += IOVP_SIZE;
757 size -= IOVP_SIZE;
758 pdir_start++;
759 }
760
761
762 asm_io_sync();
763
764 #ifdef ASSERT_PDIR_SANITY
765 sba_check_pdir(ioc,"Check after sba_map_single()");
766 #endif
767 spin_unlock_irqrestore(&ioc->res_lock, flags);
768
769
770 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
771 }
772
773
774 static dma_addr_t
775 sba_map_page(struct device *dev, struct page *page, unsigned long offset,
776 size_t size, enum dma_data_direction direction,
777 unsigned long attrs)
778 {
779 return sba_map_single(dev, page_address(page) + offset, size,
780 direction);
781 }
782
783
784
785
786
787
788
789
790
791
792
793 static void
794 sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
795 enum dma_data_direction direction, unsigned long attrs)
796 {
797 struct ioc *ioc;
798 #if DELAYED_RESOURCE_CNT > 0
799 struct sba_dma_pair *d;
800 #endif
801 unsigned long flags;
802 dma_addr_t offset;
803
804 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
805
806 ioc = GET_IOC(dev);
807 if (!ioc) {
808 WARN_ON(!ioc);
809 return;
810 }
811 offset = iova & ~IOVP_MASK;
812 iova ^= offset;
813 size += offset;
814 size = ALIGN(size, IOVP_SIZE);
815
816 spin_lock_irqsave(&ioc->res_lock, flags);
817
818 #ifdef SBA_COLLECT_STATS
819 ioc->usingle_calls++;
820 ioc->usingle_pages += size >> IOVP_SHIFT;
821 #endif
822
823 sba_mark_invalid(ioc, iova, size);
824
825 #if DELAYED_RESOURCE_CNT > 0
826
827
828
829 d = &(ioc->saved[ioc->saved_cnt]);
830 d->iova = iova;
831 d->size = size;
832 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
833 int cnt = ioc->saved_cnt;
834 while (cnt--) {
835 sba_free_range(ioc, d->iova, d->size);
836 d--;
837 }
838 ioc->saved_cnt = 0;
839
840 READ_REG(ioc->ioc_hpa+IOC_PCOM);
841 }
842 #else
843 sba_free_range(ioc, iova, size);
844
845
846 asm_io_sync();
847
848 READ_REG(ioc->ioc_hpa+IOC_PCOM);
849 #endif
850
851 spin_unlock_irqrestore(&ioc->res_lock, flags);
852
853
854
855
856
857
858
859
860
861 }
862
863
864
865
866
867
868
869
870
871
872 static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
873 gfp_t gfp, unsigned long attrs)
874 {
875 void *ret;
876
877 if (!hwdev) {
878
879 *dma_handle = 0;
880 return NULL;
881 }
882
883 ret = (void *) __get_free_pages(gfp, get_order(size));
884
885 if (ret) {
886 memset(ret, 0, size);
887 *dma_handle = sba_map_single(hwdev, ret, size, 0);
888 }
889
890 return ret;
891 }
892
893
894
895
896
897
898
899
900
901
902
903 static void
904 sba_free(struct device *hwdev, size_t size, void *vaddr,
905 dma_addr_t dma_handle, unsigned long attrs)
906 {
907 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
908 free_pages((unsigned long) vaddr, get_order(size));
909 }
910
911
912
913
914
915
916
917 #define PIDE_FLAG 0x80000000UL
918
919 #ifdef SBA_COLLECT_STATS
920 #define IOMMU_MAP_STATS
921 #endif
922 #include "iommu-helpers.h"
923
924 #ifdef DEBUG_LARGE_SG_ENTRIES
925 int dump_run_sg = 0;
926 #endif
927
928
929
930
931
932
933
934
935
936
937
938 static int
939 sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
940 enum dma_data_direction direction, unsigned long attrs)
941 {
942 struct ioc *ioc;
943 int coalesced, filled = 0;
944 unsigned long flags;
945
946 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
947
948 ioc = GET_IOC(dev);
949 if (!ioc)
950 return 0;
951
952
953 if (nents == 1) {
954 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
955 sglist->length, direction);
956 sg_dma_len(sglist) = sglist->length;
957 return 1;
958 }
959
960 spin_lock_irqsave(&ioc->res_lock, flags);
961
962 #ifdef ASSERT_PDIR_SANITY
963 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
964 {
965 sba_dump_sg(ioc, sglist, nents);
966 panic("Check before sba_map_sg()");
967 }
968 #endif
969
970 #ifdef SBA_COLLECT_STATS
971 ioc->msg_calls++;
972 #endif
973
974
975
976
977
978
979
980
981
982 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
983
984
985
986
987
988
989
990
991
992 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
993
994
995 asm_io_sync();
996
997 #ifdef ASSERT_PDIR_SANITY
998 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
999 {
1000 sba_dump_sg(ioc, sglist, nents);
1001 panic("Check after sba_map_sg()\n");
1002 }
1003 #endif
1004
1005 spin_unlock_irqrestore(&ioc->res_lock, flags);
1006
1007 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1008
1009 return filled;
1010 }
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022 static void
1023 sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1024 enum dma_data_direction direction, unsigned long attrs)
1025 {
1026 struct ioc *ioc;
1027 #ifdef ASSERT_PDIR_SANITY
1028 unsigned long flags;
1029 #endif
1030
1031 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1032 __func__, nents, sg_virt(sglist), sglist->length);
1033
1034 ioc = GET_IOC(dev);
1035 if (!ioc) {
1036 WARN_ON(!ioc);
1037 return;
1038 }
1039
1040 #ifdef SBA_COLLECT_STATS
1041 ioc->usg_calls++;
1042 #endif
1043
1044 #ifdef ASSERT_PDIR_SANITY
1045 spin_lock_irqsave(&ioc->res_lock, flags);
1046 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1047 spin_unlock_irqrestore(&ioc->res_lock, flags);
1048 #endif
1049
1050 while (sg_dma_len(sglist) && nents--) {
1051
1052 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1053 direction, 0);
1054 #ifdef SBA_COLLECT_STATS
1055 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1056 ioc->usingle_calls--;
1057 #endif
1058 ++sglist;
1059 }
1060
1061 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1062
1063 #ifdef ASSERT_PDIR_SANITY
1064 spin_lock_irqsave(&ioc->res_lock, flags);
1065 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1066 spin_unlock_irqrestore(&ioc->res_lock, flags);
1067 #endif
1068
1069 }
1070
1071 static const struct dma_map_ops sba_ops = {
1072 .dma_supported = sba_dma_supported,
1073 .alloc = sba_alloc,
1074 .free = sba_free,
1075 .map_page = sba_map_page,
1076 .unmap_page = sba_unmap_page,
1077 .map_sg = sba_map_sg,
1078 .unmap_sg = sba_unmap_sg,
1079 .get_sgtable = dma_common_get_sgtable,
1080 };
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092 static void
1093 sba_get_pat_resources(struct sba_device *sba_dev)
1094 {
1095 #if 0
1096
1097
1098
1099
1100
1101
1102 PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1103 FIXME : ???
1104 PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1105 Tells where the dvi bits are located in the address.
1106 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1107 FIXME : ???
1108 #endif
1109 }
1110
1111
1112
1113
1114
1115
1116
1117 #define PIRANHA_ADDR_MASK 0x00160000UL
1118 #define PIRANHA_ADDR_VAL 0x00060000UL
1119 static void *
1120 sba_alloc_pdir(unsigned int pdir_size)
1121 {
1122 unsigned long pdir_base;
1123 unsigned long pdir_order = get_order(pdir_size);
1124
1125 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1126 if (NULL == (void *) pdir_base) {
1127 panic("%s() could not allocate I/O Page Table\n",
1128 __func__);
1129 }
1130
1131
1132
1133
1134
1135
1136
1137 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1138 || (boot_cpu_data.pdc.versions > 0x202)
1139 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1140 return (void *) pdir_base;
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160 if (pdir_order <= (19-12)) {
1161 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1162
1163 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1164
1165 free_pages(pdir_base, pdir_order);
1166
1167 pdir_base = new_pdir;
1168
1169
1170 while (pdir_order < (19-12)) {
1171 new_pdir += pdir_size;
1172 free_pages(new_pdir, pdir_order);
1173 pdir_order +=1;
1174 pdir_size <<=1;
1175 }
1176 }
1177 } else {
1178
1179
1180
1181
1182 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1);
1183
1184
1185 free_pages( pdir_base, pdir_order);
1186
1187
1188 free_pages(new_pdir, 20-12);
1189
1190 pdir_base = new_pdir + 1024*1024;
1191
1192 if (pdir_order > (20-12)) {
1193
1194
1195
1196
1197
1198
1199 piranha_bad_128k = 1;
1200
1201 new_pdir += 3*1024*1024;
1202
1203 free_pages(new_pdir, 20-12);
1204
1205
1206 free_pages(new_pdir - 128*1024 , 17-12);
1207
1208 pdir_size -= 128*1024;
1209 }
1210 }
1211
1212 memset((void *) pdir_base, 0, pdir_size);
1213 return (void *) pdir_base;
1214 }
1215
1216 struct ibase_data_struct {
1217 struct ioc *ioc;
1218 int ioc_num;
1219 };
1220
1221 static int setup_ibase_imask_callback(struct device *dev, void *data)
1222 {
1223
1224 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1225 struct parisc_device *lba = to_parisc_device(dev);
1226 struct ibase_data_struct *ibd = data;
1227 int rope_num = (lba->hpa.start >> 13) & 0xf;
1228 if (rope_num >> 3 == ibd->ioc_num)
1229 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1230 return 0;
1231 }
1232
1233
1234 static void
1235 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1236 {
1237 struct ibase_data_struct ibase_data = {
1238 .ioc = ioc,
1239 .ioc_num = ioc_num,
1240 };
1241
1242 device_for_each_child(&sba->dev, &ibase_data,
1243 setup_ibase_imask_callback);
1244 }
1245
1246 #ifdef SBA_AGP_SUPPORT
1247 static int
1248 sba_ioc_find_quicksilver(struct device *dev, void *data)
1249 {
1250 int *agp_found = data;
1251 struct parisc_device *lba = to_parisc_device(dev);
1252
1253 if (IS_QUICKSILVER(lba))
1254 *agp_found = 1;
1255 return 0;
1256 }
1257 #endif
1258
1259 static void
1260 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1261 {
1262 u32 iova_space_mask;
1263 u32 iova_space_size;
1264 int iov_order, tcnfg;
1265 #ifdef SBA_AGP_SUPPORT
1266 int agp_found = 0;
1267 #endif
1268
1269
1270
1271
1272
1273 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1274 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1275
1276 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1277 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1278 iova_space_size /= 2;
1279 }
1280
1281
1282
1283
1284
1285 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1286 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1287
1288 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1289 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1290 iov_order + PAGE_SHIFT);
1291
1292 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1293 get_order(ioc->pdir_size));
1294 if (!ioc->pdir_base)
1295 panic("Couldn't allocate I/O Page Table\n");
1296
1297 memset(ioc->pdir_base, 0, ioc->pdir_size);
1298
1299 DBG_INIT("%s() pdir %p size %x\n",
1300 __func__, ioc->pdir_base, ioc->pdir_size);
1301
1302 #ifdef SBA_HINT_SUPPORT
1303 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1304 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1305
1306 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1307 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1308 #endif
1309
1310 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1311 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1312
1313
1314 iova_space_mask = 0xffffffff;
1315 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1316 ioc->imask = iova_space_mask;
1317 #ifdef ZX1_SUPPORT
1318 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1319 #endif
1320 sba_dump_tlb(ioc->ioc_hpa);
1321
1322 setup_ibase_imask(sba, ioc, ioc_num);
1323
1324 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1325
1326 #ifdef CONFIG_64BIT
1327
1328
1329
1330
1331 ioc->imask |= 0xFFFFFFFF00000000UL;
1332 #endif
1333
1334
1335 switch (PAGE_SHIFT) {
1336 case 12: tcnfg = 0; break;
1337 case 13: tcnfg = 1; break;
1338 case 14: tcnfg = 2; break;
1339 case 16: tcnfg = 3; break;
1340 default:
1341 panic(__FILE__ "Unsupported system page size %d",
1342 1 << PAGE_SHIFT);
1343 break;
1344 }
1345 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1346
1347
1348
1349
1350
1351 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1352
1353
1354
1355
1356
1357 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1358
1359 #ifdef SBA_AGP_SUPPORT
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1370
1371 if (agp_found && sba_reserve_agpgart) {
1372 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1373 __func__, (iova_space_size/2) >> 20);
1374 ioc->pdir_size /= 2;
1375 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1376 }
1377 #endif
1378 }
1379
1380 static void
1381 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1382 {
1383 u32 iova_space_size, iova_space_mask;
1384 unsigned int pdir_size, iov_order, tcnfg;
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400 iova_space_size = (u32) (totalram_pages()/global_ioc_cnt);
1401
1402
1403 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1404 iova_space_size = 1 << (20 - PAGE_SHIFT);
1405 }
1406 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1407 iova_space_size = 1 << (30 - PAGE_SHIFT);
1408 }
1409
1410
1411
1412
1413
1414
1415 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1416
1417
1418 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1419
1420 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1421
1422 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1423 __func__,
1424 ioc->ioc_hpa,
1425 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1426 iova_space_size>>20,
1427 iov_order + PAGE_SHIFT);
1428
1429 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1430
1431 DBG_INIT("%s() pdir %p size %x\n",
1432 __func__, ioc->pdir_base, pdir_size);
1433
1434 #ifdef SBA_HINT_SUPPORT
1435
1436 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1437 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1438
1439 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1440 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1441 #endif
1442
1443 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1444
1445
1446 iova_space_mask = 0xffffffff;
1447 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1448
1449
1450
1451
1452
1453 ioc->ibase = 0;
1454 ioc->imask = iova_space_mask;
1455 #ifdef ZX1_SUPPORT
1456 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1457 #endif
1458
1459 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1460 __func__, ioc->ibase, ioc->imask);
1461
1462
1463
1464
1465
1466
1467
1468 setup_ibase_imask(sba, ioc, ioc_num);
1469
1470
1471
1472
1473 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1474 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1475
1476
1477 switch (PAGE_SHIFT) {
1478 case 12: tcnfg = 0; break;
1479 case 13: tcnfg = 1; break;
1480 case 14: tcnfg = 2; break;
1481 case 16: tcnfg = 3; break;
1482 default:
1483 panic(__FILE__ "Unsupported system page size %d",
1484 1 << PAGE_SHIFT);
1485 break;
1486 }
1487
1488 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1489
1490
1491
1492
1493
1494 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1495
1496 ioc->ibase = 0;
1497
1498 DBG_INIT("%s() DONE\n", __func__);
1499 }
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514 static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1515 {
1516 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1517 }
1518
1519 static void sba_hw_init(struct sba_device *sba_dev)
1520 {
1521 int i;
1522 int num_ioc;
1523 u64 ioc_ctl;
1524
1525 if (!is_pdc_pat()) {
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1544 pdc_io_reset_devices();
1545 }
1546
1547 }
1548
1549
1550 #if 0
1551 printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1552 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1564 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1565 pdc_io_reset();
1566 }
1567 #endif
1568
1569 if (!IS_PLUTO(sba_dev->dev)) {
1570 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1571 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1572 __func__, sba_dev->sba_hpa, ioc_ctl);
1573 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1574 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1575
1576
1577
1578 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1579
1580 #ifdef DEBUG_SBA_INIT
1581 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1582 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1583 #endif
1584 }
1585
1586 if (IS_ASTRO(sba_dev->dev)) {
1587 int err;
1588 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1589 num_ioc = 1;
1590
1591 sba_dev->chip_resv.name = "Astro Intr Ack";
1592 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1593 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1594 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1595 BUG_ON(err < 0);
1596
1597 } else if (IS_PLUTO(sba_dev->dev)) {
1598 int err;
1599
1600 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1601 num_ioc = 1;
1602
1603 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1604 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1605 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1606 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1607 WARN_ON(err < 0);
1608
1609 sba_dev->iommu_resv.name = "IOVA Space";
1610 sba_dev->iommu_resv.start = 0x40000000UL;
1611 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1612 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1613 WARN_ON(err < 0);
1614 } else {
1615
1616 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1617 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1618 num_ioc = 2;
1619
1620
1621 }
1622
1623
1624 sba_dev->num_ioc = num_ioc;
1625 for (i = 0; i < num_ioc; i++) {
1626 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1627 unsigned int j;
1628
1629 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1630
1631
1632
1633
1634
1635
1636
1637 if (IS_PLUTO(sba_dev->dev)) {
1638 void __iomem *rope_cfg;
1639 unsigned long cfg_val;
1640
1641 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1642 cfg_val = READ_REG(rope_cfg);
1643 cfg_val &= ~IOC_ROPE_AO;
1644 WRITE_REG(cfg_val, rope_cfg);
1645 }
1646
1647
1648
1649
1650 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1651 }
1652
1653
1654 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1655
1656 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1657 i,
1658 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1659 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1660 );
1661 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1662 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1663 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1664 );
1665
1666 if (IS_PLUTO(sba_dev->dev)) {
1667 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1668 } else {
1669 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1670 }
1671 }
1672 }
1673
1674 static void
1675 sba_common_init(struct sba_device *sba_dev)
1676 {
1677 int i;
1678
1679
1680
1681
1682 sba_dev->next = sba_list;
1683 sba_list = sba_dev;
1684
1685 for(i=0; i< sba_dev->num_ioc; i++) {
1686 int res_size;
1687 #ifdef DEBUG_DMB_TRAP
1688 extern void iterate_pages(unsigned long , unsigned long ,
1689 void (*)(pte_t * , unsigned long),
1690 unsigned long );
1691 void set_data_memory_break(pte_t * , unsigned long);
1692 #endif
1693
1694 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64);
1695
1696
1697 if (piranha_bad_128k) {
1698 res_size -= (128*1024)/sizeof(u64);
1699 }
1700
1701 res_size >>= 3;
1702 DBG_INIT("%s() res_size 0x%x\n",
1703 __func__, res_size);
1704
1705 sba_dev->ioc[i].res_size = res_size;
1706 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1707
1708 #ifdef DEBUG_DMB_TRAP
1709 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1710 set_data_memory_break, 0);
1711 #endif
1712
1713 if (NULL == sba_dev->ioc[i].res_map)
1714 {
1715 panic("%s:%s() could not allocate resource map\n",
1716 __FILE__, __func__ );
1717 }
1718
1719 memset(sba_dev->ioc[i].res_map, 0, res_size);
1720
1721 sba_dev->ioc[i].res_hint = (unsigned long *)
1722 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1723
1724 #ifdef ASSERT_PDIR_SANITY
1725
1726 sba_dev->ioc[i].res_map[0] = 0x80;
1727 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1728 #endif
1729
1730
1731 if (piranha_bad_128k) {
1732
1733
1734 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1735 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1736 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1737 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1738
1739
1740 while (p_start < p_end)
1741 *p_start++ = -1;
1742
1743 }
1744
1745 #ifdef DEBUG_DMB_TRAP
1746 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1747 set_data_memory_break, 0);
1748 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1749 set_data_memory_break, 0);
1750 #endif
1751
1752 DBG_INIT("%s() %d res_map %x %p\n",
1753 __func__, i, res_size, sba_dev->ioc[i].res_map);
1754 }
1755
1756 spin_lock_init(&sba_dev->sba_lock);
1757 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1758
1759 #ifdef DEBUG_SBA_INIT
1760
1761
1762
1763
1764
1765 if (ioc_needs_fdc) {
1766 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1767 } else {
1768 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1769 }
1770 #endif
1771 }
1772
1773 #ifdef CONFIG_PROC_FS
1774 static int sba_proc_info(struct seq_file *m, void *p)
1775 {
1776 struct sba_device *sba_dev = sba_list;
1777 struct ioc *ioc = &sba_dev->ioc[0];
1778 int total_pages = (int) (ioc->res_size << 3);
1779 #ifdef SBA_COLLECT_STATS
1780 unsigned long avg = 0, min, max;
1781 #endif
1782 int i;
1783
1784 seq_printf(m, "%s rev %d.%d\n",
1785 sba_dev->name,
1786 (sba_dev->hw_rev & 0x7) + 1,
1787 (sba_dev->hw_rev & 0x18) >> 3);
1788 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1789 (int)((ioc->res_size << 3) * sizeof(u64)),
1790 total_pages);
1791
1792 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1793 ioc->res_size, ioc->res_size << 3);
1794
1795 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1796 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1797 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1798 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1799
1800 for (i=0; i<4; i++)
1801 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1802 i,
1803 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1804 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1805 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1806
1807 #ifdef SBA_COLLECT_STATS
1808 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1809 total_pages - ioc->used_pages, ioc->used_pages,
1810 (int)(ioc->used_pages * 100 / total_pages));
1811
1812 min = max = ioc->avg_search[0];
1813 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1814 avg += ioc->avg_search[i];
1815 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1816 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1817 }
1818 avg /= SBA_SEARCH_SAMPLE;
1819 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1820 min, avg, max);
1821
1822 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1823 ioc->msingle_calls, ioc->msingle_pages,
1824 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1825
1826
1827 min = ioc->usingle_calls;
1828 max = ioc->usingle_pages - ioc->usg_pages;
1829 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1830 min, max, (int)((max * 1000)/min));
1831
1832 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1833 ioc->msg_calls, ioc->msg_pages,
1834 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1835
1836 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1837 ioc->usg_calls, ioc->usg_pages,
1838 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1839 #endif
1840
1841 return 0;
1842 }
1843
1844 static int
1845 sba_proc_bitmap_info(struct seq_file *m, void *p)
1846 {
1847 struct sba_device *sba_dev = sba_list;
1848 struct ioc *ioc = &sba_dev->ioc[0];
1849
1850 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1851 ioc->res_size, false);
1852 seq_putc(m, '\n');
1853
1854 return 0;
1855 }
1856 #endif
1857
1858 static const struct parisc_device_id sba_tbl[] __initconst = {
1859 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1860 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1861 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1862 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1863 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1864 { 0, }
1865 };
1866
1867 static int sba_driver_callback(struct parisc_device *);
1868
1869 static struct parisc_driver sba_driver __refdata = {
1870 .name = MODULE_NAME,
1871 .id_table = sba_tbl,
1872 .probe = sba_driver_callback,
1873 };
1874
1875
1876
1877
1878
1879
1880 static int __init sba_driver_callback(struct parisc_device *dev)
1881 {
1882 struct sba_device *sba_dev;
1883 u32 func_class;
1884 int i;
1885 char *version;
1886 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1887 #ifdef CONFIG_PROC_FS
1888 struct proc_dir_entry *root;
1889 #endif
1890
1891 sba_dump_ranges(sba_addr);
1892
1893
1894 func_class = READ_REG(sba_addr + SBA_FCLASS);
1895
1896 if (IS_ASTRO(dev)) {
1897 unsigned long fclass;
1898 static char astro_rev[]="Astro ?.?";
1899
1900
1901 fclass = READ_REG(sba_addr);
1902
1903 astro_rev[6] = '1' + (char) (fclass & 0x7);
1904 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1905 version = astro_rev;
1906
1907 } else if (IS_IKE(dev)) {
1908 static char ike_rev[] = "Ike rev ?";
1909 ike_rev[8] = '0' + (char) (func_class & 0xff);
1910 version = ike_rev;
1911 } else if (IS_PLUTO(dev)) {
1912 static char pluto_rev[]="Pluto ?.?";
1913 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1914 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1915 version = pluto_rev;
1916 } else {
1917 static char reo_rev[] = "REO rev ?";
1918 reo_rev[8] = '0' + (char) (func_class & 0xff);
1919 version = reo_rev;
1920 }
1921
1922 if (!global_ioc_cnt) {
1923 global_ioc_cnt = count_parisc_driver(&sba_driver);
1924
1925
1926 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1927 global_ioc_cnt *= 2;
1928 }
1929
1930 printk(KERN_INFO "%s found %s at 0x%llx\n",
1931 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1932
1933 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1934 if (!sba_dev) {
1935 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1936 return -ENOMEM;
1937 }
1938
1939 parisc_set_drvdata(dev, sba_dev);
1940
1941 for(i=0; i<MAX_IOC; i++)
1942 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1943
1944 sba_dev->dev = dev;
1945 sba_dev->hw_rev = func_class;
1946 sba_dev->name = dev->name;
1947 sba_dev->sba_hpa = sba_addr;
1948
1949 sba_get_pat_resources(sba_dev);
1950 sba_hw_init(sba_dev);
1951 sba_common_init(sba_dev);
1952
1953 hppa_dma_ops = &sba_ops;
1954
1955 #ifdef CONFIG_PROC_FS
1956 switch (dev->id.hversion) {
1957 case PLUTO_MCKINLEY_PORT:
1958 root = proc_mckinley_root;
1959 break;
1960 case ASTRO_RUNWAY_PORT:
1961 case IKE_MERCED_PORT:
1962 default:
1963 root = proc_runway_root;
1964 break;
1965 }
1966
1967 proc_create_single("sba_iommu", 0, root, sba_proc_info);
1968 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
1969 #endif
1970 return 0;
1971 }
1972
1973
1974
1975
1976
1977
1978 void __init sba_init(void)
1979 {
1980 register_parisc_driver(&sba_driver);
1981 }
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991 void * sba_get_iommu(struct parisc_device *pci_hba)
1992 {
1993 struct parisc_device *sba_dev = parisc_parent(pci_hba);
1994 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
1995 char t = sba_dev->id.hw_type;
1996 int iocnum = (pci_hba->hw_path >> 3);
1997
1998 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
1999
2000 return &(sba->ioc[iocnum]);
2001 }
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012 void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2013 {
2014 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2015 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2016 char t = sba_dev->id.hw_type;
2017 int i;
2018 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2019
2020 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2021
2022 r->start = r->end = 0;
2023
2024
2025 for (i=0; i<4; i++) {
2026 int base, size;
2027 void __iomem *reg = sba->sba_hpa + i*0x18;
2028
2029 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2030 if ((base & 1) == 0)
2031 continue;
2032
2033 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2034
2035 if ((size & (ROPES_PER_IOC-1)) != rope)
2036 continue;
2037
2038 r->start = (base & ~1UL) | PCI_F_EXTEND;
2039 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2040 r->end = r->start + size;
2041 r->flags = IORESOURCE_MEM;
2042 }
2043 }
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055 void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2056 {
2057 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2058 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2059 char t = sba_dev->id.hw_type;
2060 int base, size;
2061 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1));
2062
2063 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2064
2065 r->start = r->end = 0;
2066
2067 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2068 if ((base & 1) == 0) {
2069 BUG();
2070 return;
2071 }
2072
2073 r->start = (base & ~1UL) | PCI_F_EXTEND;
2074
2075 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2076 r->start += rope * (size + 1);
2077 r->end = r->start + size;
2078 r->flags = IORESOURCE_MEM;
2079 }