This source file includes following definitions.
- LBA_DEV
- lba_dump_res
- lba_device_present
- lba_rd_cfg
- elroy_cfg_read
- lba_wr_cfg
- elroy_cfg_write
- mercury_cfg_read
- mercury_cfg_write
- lba_bios_init
- truncate_pat_collision
- extend_lmmio_len
- pcibios_allocate_bridge_resources
- pcibios_allocate_bus_resources
- lba_fixup_bus
- lba_pat_resources
- lba_legacy_resources
- lba_hw_init
- lba_driver_probe
- lba_init
- lba_set_iregs
- quirk_diva_ati_card
- quirk_diva_aux_disable
- quirk_tosca_aux_disable
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 #include <linux/delay.h>
31 #include <linux/types.h>
32 #include <linux/kernel.h>
33 #include <linux/spinlock.h>
34 #include <linux/init.h>
35 #include <linux/pci.h>
36 #include <linux/ioport.h>
37 #include <linux/slab.h>
38
39 #include <asm/byteorder.h>
40 #include <asm/pdc.h>
41 #include <asm/pdcpat.h>
42 #include <asm/page.h>
43
44 #include <asm/ropes.h>
45 #include <asm/hardware.h>
46 #include <asm/parisc-device.h>
47 #include <asm/io.h>
48
49 #include "iommu.h"
50
51 #undef DEBUG_LBA
52 #undef DEBUG_LBA_PORT
53 #undef DEBUG_LBA_CFG
54 #undef DEBUG_LBA_PAT
55
56 #undef FBB_SUPPORT
57
58
59 #ifdef DEBUG_LBA
60 #define DBG(x...) printk(x)
61 #else
62 #define DBG(x...)
63 #endif
64
65 #ifdef DEBUG_LBA_PORT
66 #define DBG_PORT(x...) printk(x)
67 #else
68 #define DBG_PORT(x...)
69 #endif
70
71 #ifdef DEBUG_LBA_CFG
72 #define DBG_CFG(x...) printk(x)
73 #else
74 #define DBG_CFG(x...)
75 #endif
76
77 #ifdef DEBUG_LBA_PAT
78 #define DBG_PAT(x...) printk(x)
79 #else
80 #define DBG_PAT(x...)
81 #endif
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98 #define MODULE_NAME "LBA"
99
100
101 #define LBA_PORT_BASE (PCI_F_EXTEND | 0xfee00000UL)
102 static void __iomem *astro_iop_base __read_mostly;
103
104 static u32 lba_t32;
105
106
107 #define LBA_FLAG_SKIP_PROBE 0x10
108
109 #define LBA_SKIP_PROBE(d) ((d)->flags & LBA_FLAG_SKIP_PROBE)
110
111 static inline struct lba_device *LBA_DEV(struct pci_hba_data *hba)
112 {
113 return container_of(hba, struct lba_device, hba);
114 }
115
116
117
118
119
120 #define LBA_MAX_NUM_BUSES 8
121
122
123
124
125
126
127
128 #define READ_U8(addr) __raw_readb(addr)
129 #define READ_U16(addr) __raw_readw(addr)
130 #define READ_U32(addr) __raw_readl(addr)
131 #define WRITE_U8(value, addr) __raw_writeb(value, addr)
132 #define WRITE_U16(value, addr) __raw_writew(value, addr)
133 #define WRITE_U32(value, addr) __raw_writel(value, addr)
134
135 #define READ_REG8(addr) readb(addr)
136 #define READ_REG16(addr) readw(addr)
137 #define READ_REG32(addr) readl(addr)
138 #define READ_REG64(addr) readq(addr)
139 #define WRITE_REG8(value, addr) writeb(value, addr)
140 #define WRITE_REG16(value, addr) writew(value, addr)
141 #define WRITE_REG32(value, addr) writel(value, addr)
142
143
144 #define LBA_CFG_TOK(bus,dfn) ((u32) ((bus)<<16 | (dfn)<<8))
145 #define LBA_CFG_BUS(tok) ((u8) ((tok)>>16))
146 #define LBA_CFG_DEV(tok) ((u8) ((tok)>>11) & 0x1f)
147 #define LBA_CFG_FUNC(tok) ((u8) ((tok)>>8 ) & 0x7)
148
149
150
151
152
153
154 #define ROPES_PER_IOC 8
155 #define LBA_NUM(x) ((((unsigned long) x) >> 13) & (ROPES_PER_IOC-1))
156
157
158 static void
159 lba_dump_res(struct resource *r, int d)
160 {
161 int i;
162
163 if (NULL == r)
164 return;
165
166 printk(KERN_DEBUG "(%p)", r->parent);
167 for (i = d; i ; --i) printk(" ");
168 printk(KERN_DEBUG "%p [%lx,%lx]/%lx\n", r,
169 (long)r->start, (long)r->end, r->flags);
170 lba_dump_res(r->child, d+2);
171 lba_dump_res(r->sibling, d);
172 }
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189 static int lba_device_present(u8 bus, u8 dfn, struct lba_device *d)
190 {
191 u8 first_bus = d->hba.hba_bus->busn_res.start;
192 u8 last_sub_bus = d->hba.hba_bus->busn_res.end;
193
194 if ((bus < first_bus) ||
195 (bus > last_sub_bus) ||
196 ((bus - first_bus) >= LBA_MAX_NUM_BUSES)) {
197 return 0;
198 }
199
200 return 1;
201 }
202
203
204
205 #define LBA_CFG_SETUP(d, tok) { \
206 \
207 error_config = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG); \
208 \
209 \
210 status_control = READ_REG32(d->hba.base_addr + LBA_STAT_CTL); \
211 \
212
213
214 \
215 \
216 arb_mask = READ_REG32(d->hba.base_addr + LBA_ARB_MASK); \
217 \
218
219
220
221 \
222 WRITE_REG32(0x1, d->hba.base_addr + LBA_ARB_MASK); \
223 \
224
225
226
227 \
228 WRITE_REG32(error_config | LBA_SMART_MODE, d->hba.base_addr + LBA_ERROR_CONFIG); \
229 }
230
231
232 #define LBA_CFG_PROBE(d, tok) { \
233
234
235
236 \
237 WRITE_REG32(tok | PCI_VENDOR_ID, (d)->hba.base_addr + LBA_PCI_CFG_ADDR);\
238
239
240
241 \
242 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
243
244
245
246 \
247 WRITE_REG32(~0, (d)->hba.base_addr + LBA_PCI_CFG_DATA); \
248
249
250
251 \
252 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
253 }
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280 #define LBA_MASTER_ABORT_ERROR 0xc
281 #define LBA_FATAL_ERROR 0x10
282
283 #define LBA_CFG_MASTER_ABORT_CHECK(d, base, tok, error) { \
284 u32 error_status = 0; \
285
286
287
288 \
289 WRITE_REG32(status_control | CLEAR_ERRLOG_ENABLE, base + LBA_STAT_CTL); \
290 error_status = READ_REG32(base + LBA_ERROR_STATUS); \
291 if ((error_status & 0x1f) != 0) { \
292
293
294 \
295 error = 1; \
296 if ((error_status & LBA_FATAL_ERROR) == 0) { \
297
298
299
300 \
301 WRITE_REG32(status_control | CLEAR_ERRLOG, base + LBA_STAT_CTL); \
302 } \
303 } \
304 }
305
306 #define LBA_CFG_TR4_ADDR_SETUP(d, addr) \
307 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR);
308
309 #define LBA_CFG_ADDR_SETUP(d, addr) { \
310 WRITE_REG32(((addr) & ~3), (d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
311
312
313
314 \
315 lba_t32 = READ_REG32((d)->hba.base_addr + LBA_PCI_CFG_ADDR); \
316 }
317
318
319 #define LBA_CFG_RESTORE(d, base) { \
320
321
322 \
323 WRITE_REG32(status_control, base + LBA_STAT_CTL); \
324
325
326 \
327 WRITE_REG32(error_config, base + LBA_ERROR_CONFIG); \
328
329
330 \
331 WRITE_REG32(arb_mask, base + LBA_ARB_MASK); \
332 }
333
334
335
336 static unsigned int
337 lba_rd_cfg(struct lba_device *d, u32 tok, u8 reg, u32 size)
338 {
339 u32 data = ~0U;
340 int error = 0;
341 u32 arb_mask = 0;
342 u32 error_config = 0;
343 u32 status_control = 0;
344
345 LBA_CFG_SETUP(d, tok);
346 LBA_CFG_PROBE(d, tok);
347 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
348 if (!error) {
349 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
350
351 LBA_CFG_ADDR_SETUP(d, tok | reg);
352 switch (size) {
353 case 1: data = (u32) READ_REG8(data_reg + (reg & 3)); break;
354 case 2: data = (u32) READ_REG16(data_reg+ (reg & 2)); break;
355 case 4: data = READ_REG32(data_reg); break;
356 }
357 }
358 LBA_CFG_RESTORE(d, d->hba.base_addr);
359 return(data);
360 }
361
362
363 static int elroy_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
364 {
365 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
366 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
367 u32 tok = LBA_CFG_TOK(local_bus, devfn);
368 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
369
370 if ((pos > 255) || (devfn > 255))
371 return -EINVAL;
372
373
374 {
375
376
377 *data = lba_rd_cfg(d, tok, pos, size);
378 DBG_CFG("%s(%x+%2x) -> 0x%x (a)\n", __func__, tok, pos, *data);
379 return 0;
380 }
381
382 if (LBA_SKIP_PROBE(d) && !lba_device_present(bus->busn_res.start, devfn, d)) {
383 DBG_CFG("%s(%x+%2x) -> -1 (b)\n", __func__, tok, pos);
384
385 *data = ~0U;
386 return(0);
387 }
388
389
390
391
392
393 LBA_CFG_ADDR_SETUP(d, tok | pos);
394 switch(size) {
395 case 1: *data = READ_REG8 (data_reg + (pos & 3)); break;
396 case 2: *data = READ_REG16(data_reg + (pos & 2)); break;
397 case 4: *data = READ_REG32(data_reg); break;
398 }
399 DBG_CFG("%s(%x+%2x) -> 0x%x (c)\n", __func__, tok, pos, *data);
400 return 0;
401 }
402
403
404 static void
405 lba_wr_cfg(struct lba_device *d, u32 tok, u8 reg, u32 data, u32 size)
406 {
407 int error = 0;
408 u32 arb_mask = 0;
409 u32 error_config = 0;
410 u32 status_control = 0;
411 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
412
413 LBA_CFG_SETUP(d, tok);
414 LBA_CFG_ADDR_SETUP(d, tok | reg);
415 switch (size) {
416 case 1: WRITE_REG8 (data, data_reg + (reg & 3)); break;
417 case 2: WRITE_REG16(data, data_reg + (reg & 2)); break;
418 case 4: WRITE_REG32(data, data_reg); break;
419 }
420 LBA_CFG_MASTER_ABORT_CHECK(d, d->hba.base_addr, tok, error);
421 LBA_CFG_RESTORE(d, d->hba.base_addr);
422 }
423
424
425
426
427
428
429
430 static int elroy_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
431 {
432 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
433 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
434 u32 tok = LBA_CFG_TOK(local_bus,devfn);
435
436 if ((pos > 255) || (devfn > 255))
437 return -EINVAL;
438
439 if (!LBA_SKIP_PROBE(d)) {
440
441 lba_wr_cfg(d, tok, pos, (u32) data, size);
442 DBG_CFG("%s(%x+%2x) = 0x%x (a)\n", __func__, tok, pos,data);
443 return 0;
444 }
445
446 if (LBA_SKIP_PROBE(d) && (!lba_device_present(bus->busn_res.start, devfn, d))) {
447 DBG_CFG("%s(%x+%2x) = 0x%x (b)\n", __func__, tok, pos,data);
448 return 1;
449 }
450
451 DBG_CFG("%s(%x+%2x) = 0x%x (c)\n", __func__, tok, pos, data);
452
453
454 LBA_CFG_ADDR_SETUP(d, tok | pos);
455 switch(size) {
456 case 1: WRITE_REG8 (data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 3));
457 break;
458 case 2: WRITE_REG16(data, d->hba.base_addr + LBA_PCI_CFG_DATA + (pos & 2));
459 break;
460 case 4: WRITE_REG32(data, d->hba.base_addr + LBA_PCI_CFG_DATA);
461 break;
462 }
463
464 lba_t32 = READ_REG32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
465 return 0;
466 }
467
468
469 static struct pci_ops elroy_cfg_ops = {
470 .read = elroy_cfg_read,
471 .write = elroy_cfg_write,
472 };
473
474
475
476
477
478
479
480 static int mercury_cfg_read(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 *data)
481 {
482 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
483 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
484 u32 tok = LBA_CFG_TOK(local_bus, devfn);
485 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
486
487 if ((pos > 255) || (devfn > 255))
488 return -EINVAL;
489
490 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
491 switch(size) {
492 case 1:
493 *data = READ_REG8(data_reg + (pos & 3));
494 break;
495 case 2:
496 *data = READ_REG16(data_reg + (pos & 2));
497 break;
498 case 4:
499 *data = READ_REG32(data_reg); break;
500 break;
501 }
502
503 DBG_CFG("mercury_cfg_read(%x+%2x) -> 0x%x\n", tok, pos, *data);
504 return 0;
505 }
506
507
508
509
510
511
512 static int mercury_cfg_write(struct pci_bus *bus, unsigned int devfn, int pos, int size, u32 data)
513 {
514 struct lba_device *d = LBA_DEV(parisc_walk_tree(bus->bridge));
515 void __iomem *data_reg = d->hba.base_addr + LBA_PCI_CFG_DATA;
516 u32 local_bus = (bus->parent == NULL) ? 0 : bus->busn_res.start;
517 u32 tok = LBA_CFG_TOK(local_bus,devfn);
518
519 if ((pos > 255) || (devfn > 255))
520 return -EINVAL;
521
522 DBG_CFG("%s(%x+%2x) <- 0x%x (c)\n", __func__, tok, pos, data);
523
524 LBA_CFG_TR4_ADDR_SETUP(d, tok | pos);
525 switch(size) {
526 case 1:
527 WRITE_REG8 (data, data_reg + (pos & 3));
528 break;
529 case 2:
530 WRITE_REG16(data, data_reg + (pos & 2));
531 break;
532 case 4:
533 WRITE_REG32(data, data_reg);
534 break;
535 }
536
537
538 lba_t32 = READ_U32(d->hba.base_addr + LBA_PCI_CFG_ADDR);
539 return 0;
540 }
541
542 static struct pci_ops mercury_cfg_ops = {
543 .read = mercury_cfg_read,
544 .write = mercury_cfg_write,
545 };
546
547
548 static void
549 lba_bios_init(void)
550 {
551 DBG(MODULE_NAME ": lba_bios_init\n");
552 }
553
554
555 #ifdef CONFIG_64BIT
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570 static unsigned long
571 truncate_pat_collision(struct resource *root, struct resource *new)
572 {
573 unsigned long start = new->start;
574 unsigned long end = new->end;
575 struct resource *tmp = root->child;
576
577 if (end <= start || start < root->start || !tmp)
578 return 0;
579
580
581 while (tmp && tmp->end < start)
582 tmp = tmp->sibling;
583
584
585 if (!tmp) return 0;
586
587
588
589
590 if (tmp->start >= end) return 0;
591
592 if (tmp->start <= start) {
593
594 new->start = tmp->end + 1;
595
596 if (tmp->end >= end) {
597
598 return 1;
599 }
600 }
601
602 if (tmp->end < end ) {
603
604 new->end = tmp->start - 1;
605 }
606
607 printk(KERN_WARNING "LBA: Truncating lmmio_space [%lx/%lx] "
608 "to [%lx,%lx]\n",
609 start, end,
610 (long)new->start, (long)new->end );
611
612 return 0;
613 }
614
615
616
617
618
619
620
621 static unsigned long
622 extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len)
623 {
624 struct resource *tmp;
625
626
627 if (boot_cpu_data.cpu_type < mako)
628 return end;
629
630 pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n",
631 end - start, lba_len);
632
633 lba_len = min(lba_len+1, 256UL*1024*1024);
634
635 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end);
636
637
638 end += lba_len;
639 if (end < start)
640 end = -1ULL;
641
642 pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end);
643
644
645 for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
646 pr_debug("LBA: testing %pR\n", tmp);
647 if (tmp->start == start)
648 continue;
649 if (tmp->end < start)
650 continue;
651 if (tmp->start > end)
652 continue;
653 if (end >= tmp->start)
654 end = tmp->start - 1;
655 }
656
657 pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end);
658
659
660 return end;
661 }
662
663 #else
664 #define truncate_pat_collision(r,n) (0)
665 #endif
666
667 static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
668 {
669 int idx;
670 struct resource *r;
671
672 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
673 r = &dev->resource[idx];
674 if (!r->flags)
675 continue;
676 if (r->parent)
677 continue;
678 if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
679
680
681
682
683
684
685 r->start = r->end = 0;
686 r->flags = 0;
687 }
688 }
689 }
690
691 static void pcibios_allocate_bus_resources(struct pci_bus *bus)
692 {
693 struct pci_bus *child;
694
695
696 if (bus->self)
697 pcibios_allocate_bridge_resources(bus->self);
698 list_for_each_entry(child, &bus->children, node)
699 pcibios_allocate_bus_resources(child);
700 }
701
702
703
704
705
706
707
708
709
710
711
712 static void
713 lba_fixup_bus(struct pci_bus *bus)
714 {
715 struct pci_dev *dev;
716 #ifdef FBB_SUPPORT
717 u16 status;
718 #endif
719 struct lba_device *ldev = LBA_DEV(parisc_walk_tree(bus->bridge));
720
721 DBG("lba_fixup_bus(0x%p) bus %d platform_data 0x%p\n",
722 bus, (int)bus->busn_res.start, bus->bridge->platform_data);
723
724
725
726
727
728 if (bus->parent) {
729
730 pci_read_bridge_bases(bus);
731
732
733 pcibios_allocate_bus_resources(bus);
734 } else {
735
736 int err;
737
738 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
739 ldev->hba.io_space.name,
740 ldev->hba.io_space.start, ldev->hba.io_space.end,
741 ldev->hba.io_space.flags);
742 DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
743 ldev->hba.lmmio_space.name,
744 ldev->hba.lmmio_space.start, ldev->hba.lmmio_space.end,
745 ldev->hba.lmmio_space.flags);
746
747 err = request_resource(&ioport_resource, &(ldev->hba.io_space));
748 if (err < 0) {
749 lba_dump_res(&ioport_resource, 2);
750 BUG();
751 }
752
753 if (ldev->hba.elmmio_space.flags) {
754 err = request_resource(&iomem_resource,
755 &(ldev->hba.elmmio_space));
756 if (err < 0) {
757
758 printk("FAILED: lba_fixup_bus() request for "
759 "elmmio_space [%lx/%lx]\n",
760 (long)ldev->hba.elmmio_space.start,
761 (long)ldev->hba.elmmio_space.end);
762
763
764
765 }
766 }
767
768 if (ldev->hba.lmmio_space.flags) {
769 err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
770 if (err < 0) {
771 printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
772 "lmmio_space [%lx/%lx]\n",
773 (long)ldev->hba.lmmio_space.start,
774 (long)ldev->hba.lmmio_space.end);
775 }
776 }
777
778 #ifdef CONFIG_64BIT
779
780 if (ldev->hba.gmmio_space.flags) {
781 err = request_resource(&iomem_resource, &(ldev->hba.gmmio_space));
782 if (err < 0) {
783 printk("FAILED: lba_fixup_bus() request for "
784 "gmmio_space [%lx/%lx]\n",
785 (long)ldev->hba.gmmio_space.start,
786 (long)ldev->hba.gmmio_space.end);
787 lba_dump_res(&iomem_resource, 2);
788 BUG();
789 }
790 }
791 #endif
792
793 }
794
795 list_for_each_entry(dev, &bus->devices, bus_list) {
796 int i;
797
798 DBG("lba_fixup_bus() %s\n", pci_name(dev));
799
800
801 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
802 struct resource *res = &dev->resource[i];
803
804
805 if (!res->start)
806 continue;
807
808
809
810
811
812
813 pci_claim_resource(dev, i);
814 }
815
816 #ifdef FBB_SUPPORT
817
818
819
820
821 (void) pci_read_config_word(dev, PCI_STATUS, &status);
822 bus->bridge_ctl &= ~(status & PCI_STATUS_FAST_BACK);
823 #endif
824
825
826
827
828 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
829 pcibios_init_bridge(dev);
830 continue;
831 }
832
833
834 iosapic_fixup_irq(ldev->iosapic_obj, dev);
835 }
836
837 #ifdef FBB_SUPPORT
838
839
840
841
842 if (fbb_enable) {
843 if (bus->parent) {
844 u8 control;
845
846 (void) pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &control);
847 (void) pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, control | PCI_STATUS_FAST_BACK);
848
849 } else {
850
851 }
852 fbb_enable = PCI_COMMAND_FAST_BACK;
853 }
854
855
856 list_for_each_entry(dev, &bus->devices, bus_list) {
857 (void) pci_read_config_word(dev, PCI_COMMAND, &status);
858 status |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR | fbb_enable;
859 (void) pci_write_config_word(dev, PCI_COMMAND, status);
860 }
861 #endif
862 }
863
864
865 static struct pci_bios_ops lba_bios_ops = {
866 .init = lba_bios_init,
867 .fixup_bus = lba_fixup_bus,
868 };
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887 #define LBA_PORT_IN(size, mask) \
888 static u##size lba_astro_in##size (struct pci_hba_data *d, u16 addr) \
889 { \
890 u##size t; \
891 t = READ_REG##size(astro_iop_base + addr); \
892 DBG_PORT(" 0x%x\n", t); \
893 return (t); \
894 }
895
896 LBA_PORT_IN( 8, 3)
897 LBA_PORT_IN(16, 2)
898 LBA_PORT_IN(32, 0)
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928 #define LBA_PORT_OUT(size, mask) \
929 static void lba_astro_out##size (struct pci_hba_data *d, u16 addr, u##size val) \
930 { \
931 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, d, addr, val); \
932 WRITE_REG##size(val, astro_iop_base + addr); \
933 if (LBA_DEV(d)->hw_rev < 3) \
934 lba_t32 = READ_U32(d->base_addr + LBA_FUNC_ID); \
935 }
936
937 LBA_PORT_OUT( 8, 3)
938 LBA_PORT_OUT(16, 2)
939 LBA_PORT_OUT(32, 0)
940
941
942 static struct pci_port_ops lba_astro_port_ops = {
943 .inb = lba_astro_in8,
944 .inw = lba_astro_in16,
945 .inl = lba_astro_in32,
946 .outb = lba_astro_out8,
947 .outw = lba_astro_out16,
948 .outl = lba_astro_out32
949 };
950
951
952 #ifdef CONFIG_64BIT
953 #define PIOP_TO_GMMIO(lba, addr) \
954 ((lba)->iop_base + (((addr)&0xFFFC)<<10) + ((addr)&3))
955
956
957
958
959
960
961
962
963
964
965
966
967
968 #undef LBA_PORT_IN
969 #define LBA_PORT_IN(size, mask) \
970 static u##size lba_pat_in##size (struct pci_hba_data *l, u16 addr) \
971 { \
972 u##size t; \
973 DBG_PORT("%s(0x%p, 0x%x) ->", __func__, l, addr); \
974 t = READ_REG##size(PIOP_TO_GMMIO(LBA_DEV(l), addr)); \
975 DBG_PORT(" 0x%x\n", t); \
976 return (t); \
977 }
978
979 LBA_PORT_IN( 8, 3)
980 LBA_PORT_IN(16, 2)
981 LBA_PORT_IN(32, 0)
982
983
984 #undef LBA_PORT_OUT
985 #define LBA_PORT_OUT(size, mask) \
986 static void lba_pat_out##size (struct pci_hba_data *l, u16 addr, u##size val) \
987 { \
988 void __iomem *where = PIOP_TO_GMMIO(LBA_DEV(l), addr); \
989 DBG_PORT("%s(0x%p, 0x%x, 0x%x)\n", __func__, l, addr, val); \
990 WRITE_REG##size(val, where); \
991 \
992 lba_t32 = READ_U32(l->base_addr + LBA_FUNC_ID); \
993 }
994
995 LBA_PORT_OUT( 8, 3)
996 LBA_PORT_OUT(16, 2)
997 LBA_PORT_OUT(32, 0)
998
999
1000 static struct pci_port_ops lba_pat_port_ops = {
1001 .inb = lba_pat_in8,
1002 .inw = lba_pat_in16,
1003 .inl = lba_pat_in32,
1004 .outb = lba_pat_out8,
1005 .outw = lba_pat_out16,
1006 .outl = lba_pat_out32
1007 };
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017 static void
1018 lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1019 {
1020 unsigned long bytecnt;
1021 long io_count;
1022 long status;
1023 long pa_count;
1024 pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell;
1025 pdc_pat_cell_mod_maddr_block_t *io_pdc_cell;
1026 int i;
1027
1028 pa_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
1029 if (!pa_pdc_cell)
1030 return;
1031
1032 io_pdc_cell = kzalloc(sizeof(pdc_pat_cell_mod_maddr_block_t), GFP_KERNEL);
1033 if (!io_pdc_cell) {
1034 kfree(pa_pdc_cell);
1035 return;
1036 }
1037
1038
1039 status = pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1040 PA_VIEW, pa_pdc_cell);
1041 pa_count = pa_pdc_cell->mod[1];
1042
1043 status |= pdc_pat_cell_module(&bytecnt, pa_dev->pcell_loc, pa_dev->mod_index,
1044 IO_VIEW, io_pdc_cell);
1045 io_count = io_pdc_cell->mod[1];
1046
1047
1048 if (status != PDC_OK) {
1049 panic("pdc_pat_cell_module() call failed for LBA!\n");
1050 }
1051
1052 if (PAT_GET_ENTITY(pa_pdc_cell->mod_info) != PAT_ENTITY_LBA) {
1053 panic("pdc_pat_cell_module() entity returned != PAT_ENTITY_LBA!\n");
1054 }
1055
1056
1057
1058
1059 for (i = 0; i < pa_count; i++) {
1060 struct {
1061 unsigned long type;
1062 unsigned long start;
1063 unsigned long end;
1064 } *p, *io;
1065 struct resource *r;
1066
1067 p = (void *) &(pa_pdc_cell->mod[2+i*3]);
1068 io = (void *) &(io_pdc_cell->mod[2+i*3]);
1069
1070
1071 switch(p->type & 0xff) {
1072 case PAT_PBNUM:
1073 lba_dev->hba.bus_num.start = p->start;
1074 lba_dev->hba.bus_num.end = p->end;
1075 lba_dev->hba.bus_num.flags = IORESOURCE_BUS;
1076 break;
1077
1078 case PAT_LMMIO:
1079
1080 if (!lba_dev->hba.lmmio_space.flags) {
1081 unsigned long lba_len;
1082
1083 lba_len = ~READ_REG32(lba_dev->hba.base_addr
1084 + LBA_LMMIO_MASK);
1085 if ((p->end - p->start) != lba_len)
1086 p->end = extend_lmmio_len(p->start,
1087 p->end, lba_len);
1088
1089 sprintf(lba_dev->hba.lmmio_name,
1090 "PCI%02x LMMIO",
1091 (int)lba_dev->hba.bus_num.start);
1092 lba_dev->hba.lmmio_space_offset = p->start -
1093 io->start;
1094 r = &lba_dev->hba.lmmio_space;
1095 r->name = lba_dev->hba.lmmio_name;
1096 } else if (!lba_dev->hba.elmmio_space.flags) {
1097 sprintf(lba_dev->hba.elmmio_name,
1098 "PCI%02x ELMMIO",
1099 (int)lba_dev->hba.bus_num.start);
1100 r = &lba_dev->hba.elmmio_space;
1101 r->name = lba_dev->hba.elmmio_name;
1102 } else {
1103 printk(KERN_WARNING MODULE_NAME
1104 " only supports 2 LMMIO resources!\n");
1105 break;
1106 }
1107
1108 r->start = p->start;
1109 r->end = p->end;
1110 r->flags = IORESOURCE_MEM;
1111 r->parent = r->sibling = r->child = NULL;
1112 break;
1113
1114 case PAT_GMMIO:
1115
1116 sprintf(lba_dev->hba.gmmio_name, "PCI%02x GMMIO",
1117 (int)lba_dev->hba.bus_num.start);
1118 r = &lba_dev->hba.gmmio_space;
1119 r->name = lba_dev->hba.gmmio_name;
1120 r->start = p->start;
1121 r->end = p->end;
1122 r->flags = IORESOURCE_MEM;
1123 r->parent = r->sibling = r->child = NULL;
1124 break;
1125
1126 case PAT_NPIOP:
1127 printk(KERN_WARNING MODULE_NAME
1128 " range[%d] : ignoring NPIOP (0x%lx)\n",
1129 i, p->start);
1130 break;
1131
1132 case PAT_PIOP:
1133
1134
1135
1136
1137 lba_dev->iop_base = ioremap_nocache(p->start, 64 * 1024 * 1024);
1138
1139 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1140 (int)lba_dev->hba.bus_num.start);
1141 r = &lba_dev->hba.io_space;
1142 r->name = lba_dev->hba.io_name;
1143 r->start = HBA_PORT_BASE(lba_dev->hba.hba_num);
1144 r->end = r->start + HBA_PORT_SPACE_SIZE - 1;
1145 r->flags = IORESOURCE_IO;
1146 r->parent = r->sibling = r->child = NULL;
1147 break;
1148
1149 default:
1150 printk(KERN_WARNING MODULE_NAME
1151 " range[%d] : unknown pat range type (0x%lx)\n",
1152 i, p->type & 0xff);
1153 break;
1154 }
1155 }
1156
1157 kfree(pa_pdc_cell);
1158 kfree(io_pdc_cell);
1159 }
1160 #else
1161
1162 #define lba_pat_port_ops lba_astro_port_ops
1163 #define lba_pat_resources(pa_dev, lba_dev)
1164 #endif
1165
1166
1167 extern void sba_distributed_lmmio(struct parisc_device *, struct resource *);
1168 extern void sba_directed_lmmio(struct parisc_device *, struct resource *);
1169
1170
1171 static void
1172 lba_legacy_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev)
1173 {
1174 struct resource *r;
1175 int lba_num;
1176
1177 lba_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
1178
1179
1180
1181
1182
1183
1184
1185
1186 lba_num = READ_REG32(lba_dev->hba.base_addr + LBA_FW_SCRATCH);
1187 r = &(lba_dev->hba.bus_num);
1188 r->name = "LBA PCI Busses";
1189 r->start = lba_num & 0xff;
1190 r->end = (lba_num>>8) & 0xff;
1191 r->flags = IORESOURCE_BUS;
1192
1193
1194
1195
1196 r = &(lba_dev->hba.lmmio_space);
1197 sprintf(lba_dev->hba.lmmio_name, "PCI%02x LMMIO",
1198 (int)lba_dev->hba.bus_num.start);
1199 r->name = lba_dev->hba.lmmio_name;
1200
1201 #if 1
1202
1203
1204
1205
1206 sba_distributed_lmmio(pa_dev, r);
1207 #else
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_BASE);
1269 if (r->start & 1) {
1270 unsigned long rsize;
1271
1272 r->flags = IORESOURCE_MEM;
1273
1274 r->start &= mmio_mask;
1275 r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start);
1276 rsize = ~ READ_REG32(lba_dev->hba.base_addr + LBA_LMMIO_MASK);
1277
1278
1279
1280
1281
1282 rsize /= ROPES_PER_IOC;
1283 r->start += (rsize + 1) * LBA_NUM(pa_dev->hpa.start);
1284 r->end = r->start + rsize;
1285 } else {
1286 r->end = r->start = 0;
1287 }
1288 #endif
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 r = &(lba_dev->hba.elmmio_space);
1306 sprintf(lba_dev->hba.elmmio_name, "PCI%02x ELMMIO",
1307 (int)lba_dev->hba.bus_num.start);
1308 r->name = lba_dev->hba.elmmio_name;
1309
1310 #if 1
1311
1312 sba_directed_lmmio(pa_dev, r);
1313 #else
1314 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_BASE);
1315
1316 if (r->start & 1) {
1317 unsigned long rsize;
1318 r->flags = IORESOURCE_MEM;
1319
1320 r->start &= mmio_mask;
1321 r->start = PCI_HOST_ADDR(&lba_dev->hba, r->start);
1322 rsize = READ_REG32(lba_dev->hba.base_addr + LBA_ELMMIO_MASK);
1323 r->end = r->start + ~rsize;
1324 }
1325 #endif
1326
1327 r = &(lba_dev->hba.io_space);
1328 sprintf(lba_dev->hba.io_name, "PCI%02x Ports",
1329 (int)lba_dev->hba.bus_num.start);
1330 r->name = lba_dev->hba.io_name;
1331 r->flags = IORESOURCE_IO;
1332 r->start = READ_REG32(lba_dev->hba.base_addr + LBA_IOS_BASE) & ~1L;
1333 r->end = r->start + (READ_REG32(lba_dev->hba.base_addr + LBA_IOS_MASK) ^ (HBA_PORT_SPACE_SIZE - 1));
1334
1335
1336 lba_num = HBA_PORT_BASE(lba_dev->hba.hba_num);
1337 r->start |= lba_num;
1338 r->end |= lba_num;
1339 }
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354 static int __init
1355 lba_hw_init(struct lba_device *d)
1356 {
1357 u32 stat;
1358 u32 bus_reset;
1359
1360 #if 0
1361 printk(KERN_DEBUG "LBA %lx STAT_CTL %Lx ERROR_CFG %Lx STATUS %Lx DMA_CTL %Lx\n",
1362 d->hba.base_addr,
1363 READ_REG64(d->hba.base_addr + LBA_STAT_CTL),
1364 READ_REG64(d->hba.base_addr + LBA_ERROR_CONFIG),
1365 READ_REG64(d->hba.base_addr + LBA_ERROR_STATUS),
1366 READ_REG64(d->hba.base_addr + LBA_DMA_CTL) );
1367 printk(KERN_DEBUG " ARB mask %Lx pri %Lx mode %Lx mtlt %Lx\n",
1368 READ_REG64(d->hba.base_addr + LBA_ARB_MASK),
1369 READ_REG64(d->hba.base_addr + LBA_ARB_PRI),
1370 READ_REG64(d->hba.base_addr + LBA_ARB_MODE),
1371 READ_REG64(d->hba.base_addr + LBA_ARB_MTLT) );
1372 printk(KERN_DEBUG " HINT cfg 0x%Lx\n",
1373 READ_REG64(d->hba.base_addr + LBA_HINT_CFG));
1374 printk(KERN_DEBUG " HINT reg ");
1375 { int i;
1376 for (i=LBA_HINT_BASE; i< (14*8 + LBA_HINT_BASE); i+=8)
1377 printk(" %Lx", READ_REG64(d->hba.base_addr + i));
1378 }
1379 printk("\n");
1380 #endif
1381
1382 #ifdef CONFIG_64BIT
1383
1384
1385
1386
1387
1388 #endif
1389
1390
1391 bus_reset = READ_REG32(d->hba.base_addr + LBA_STAT_CTL + 4) & 1;
1392 if (bus_reset) {
1393 printk(KERN_DEBUG "NOTICE: PCI bus reset still asserted! (clearing)\n");
1394 }
1395
1396 stat = READ_REG32(d->hba.base_addr + LBA_ERROR_CONFIG);
1397 if (stat & LBA_SMART_MODE) {
1398 printk(KERN_DEBUG "NOTICE: LBA in SMART mode! (cleared)\n");
1399 stat &= ~LBA_SMART_MODE;
1400 WRITE_REG32(stat, d->hba.base_addr + LBA_ERROR_CONFIG);
1401 }
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418 stat = READ_REG32(d->hba.base_addr + LBA_STAT_CTL);
1419 #if defined(ENABLE_HARDFAIL)
1420 WRITE_REG32(stat | HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1421 #else
1422 WRITE_REG32(stat & ~HF_ENABLE, d->hba.base_addr + LBA_STAT_CTL);
1423 #endif
1424
1425
1426
1427
1428
1429
1430 if (bus_reset)
1431 mdelay(pci_post_reset_delay);
1432
1433 if (0 == READ_REG32(d->hba.base_addr + LBA_ARB_MASK)) {
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443 printk(KERN_DEBUG "NOTICE: Enabling PCI Arbitration\n");
1444 WRITE_REG32(0x3, d->hba.base_addr + LBA_ARB_MASK);
1445 }
1446
1447
1448
1449
1450
1451
1452 return 0;
1453 }
1454
1455
1456
1457
1458
1459
1460
1461
1462 static unsigned int lba_next_bus = 0;
1463
1464
1465
1466
1467
1468
1469 static int __init
1470 lba_driver_probe(struct parisc_device *dev)
1471 {
1472 struct lba_device *lba_dev;
1473 LIST_HEAD(resources);
1474 struct pci_bus *lba_bus;
1475 struct pci_ops *cfg_ops;
1476 u32 func_class;
1477 void *tmp_obj;
1478 char *version;
1479 void __iomem *addr = ioremap_nocache(dev->hpa.start, 4096);
1480 int max;
1481
1482
1483 func_class = READ_REG32(addr + LBA_FCLASS);
1484
1485 if (IS_ELROY(dev)) {
1486 func_class &= 0xf;
1487 switch (func_class) {
1488 case 0: version = "TR1.0"; break;
1489 case 1: version = "TR2.0"; break;
1490 case 2: version = "TR2.1"; break;
1491 case 3: version = "TR2.2"; break;
1492 case 4: version = "TR3.0"; break;
1493 case 5: version = "TR4.0"; break;
1494 default: version = "TR4+";
1495 }
1496
1497 printk(KERN_INFO "Elroy version %s (0x%x) found at 0x%lx\n",
1498 version, func_class & 0xf, (long)dev->hpa.start);
1499
1500 if (func_class < 2) {
1501 printk(KERN_WARNING "Can't support LBA older than "
1502 "TR2.1 - continuing under adversity.\n");
1503 }
1504
1505 #if 0
1506
1507
1508
1509 if (func_class > 4) {
1510 cfg_ops = &mercury_cfg_ops;
1511 } else
1512 #endif
1513 {
1514 cfg_ops = &elroy_cfg_ops;
1515 }
1516
1517 } else if (IS_MERCURY(dev) || IS_QUICKSILVER(dev)) {
1518 int major, minor;
1519
1520 func_class &= 0xff;
1521 major = func_class >> 4, minor = func_class & 0xf;
1522
1523
1524
1525
1526 printk(KERN_INFO "%s version TR%d.%d (0x%x) found at 0x%lx\n",
1527 IS_MERCURY(dev) ? "Mercury" : "Quicksilver", major,
1528 minor, func_class, (long)dev->hpa.start);
1529
1530 cfg_ops = &mercury_cfg_ops;
1531 } else {
1532 printk(KERN_ERR "Unknown LBA found at 0x%lx\n",
1533 (long)dev->hpa.start);
1534 return -ENODEV;
1535 }
1536
1537
1538 tmp_obj = iosapic_register(dev->hpa.start + LBA_IOSAPIC_BASE);
1539
1540
1541
1542
1543
1544 lba_dev = kzalloc(sizeof(struct lba_device), GFP_KERNEL);
1545 if (!lba_dev) {
1546 printk(KERN_ERR "lba_init_chip - couldn't alloc lba_device\n");
1547 return(1);
1548 }
1549
1550
1551
1552
1553 lba_dev->hw_rev = func_class;
1554 lba_dev->hba.base_addr = addr;
1555 lba_dev->hba.dev = dev;
1556 lba_dev->iosapic_obj = tmp_obj;
1557 lba_dev->hba.iommu = sba_get_iommu(dev);
1558 parisc_set_drvdata(dev, lba_dev);
1559
1560
1561 pci_bios = &lba_bios_ops;
1562 pcibios_register_hba(&lba_dev->hba);
1563 spin_lock_init(&lba_dev->lba_lock);
1564
1565 if (lba_hw_init(lba_dev))
1566 return(1);
1567
1568
1569
1570 if (is_pdc_pat()) {
1571
1572 pci_port = &lba_pat_port_ops;
1573
1574 lba_pat_resources(dev, lba_dev);
1575 } else {
1576 if (!astro_iop_base) {
1577
1578 astro_iop_base = ioremap_nocache(LBA_PORT_BASE, 64 * 1024);
1579 pci_port = &lba_astro_port_ops;
1580 }
1581
1582
1583 lba_legacy_resources(dev, lba_dev);
1584 }
1585
1586 if (lba_dev->hba.bus_num.start < lba_next_bus)
1587 lba_dev->hba.bus_num.start = lba_next_bus;
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598 if (truncate_pat_collision(&iomem_resource,
1599 &(lba_dev->hba.lmmio_space))) {
1600 printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
1601 (long)lba_dev->hba.lmmio_space.start,
1602 (long)lba_dev->hba.lmmio_space.end);
1603 lba_dev->hba.lmmio_space.flags = 0;
1604 }
1605
1606 pci_add_resource_offset(&resources, &lba_dev->hba.io_space,
1607 HBA_PORT_BASE(lba_dev->hba.hba_num));
1608 if (lba_dev->hba.elmmio_space.flags)
1609 pci_add_resource_offset(&resources, &lba_dev->hba.elmmio_space,
1610 lba_dev->hba.lmmio_space_offset);
1611 if (lba_dev->hba.lmmio_space.flags)
1612 pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space,
1613 lba_dev->hba.lmmio_space_offset);
1614 if (lba_dev->hba.gmmio_space.flags) {
1615
1616
1617
1618 }
1619
1620 pci_add_resource(&resources, &lba_dev->hba.bus_num);
1621
1622 dev->dev.platform_data = lba_dev;
1623 lba_bus = lba_dev->hba.hba_bus =
1624 pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
1625 cfg_ops, NULL, &resources);
1626 if (!lba_bus) {
1627 pci_free_resource_list(&resources);
1628 return 0;
1629 }
1630
1631 max = pci_scan_child_bus(lba_bus);
1632
1633
1634 if (is_pdc_pat()) {
1635
1636
1637 DBG_PAT("LBA pci_bus_size_bridges()\n");
1638 pci_bus_size_bridges(lba_bus);
1639
1640 DBG_PAT("LBA pci_bus_assign_resources()\n");
1641 pci_bus_assign_resources(lba_bus);
1642
1643 #ifdef DEBUG_LBA_PAT
1644 DBG_PAT("\nLBA PIOP resource tree\n");
1645 lba_dump_res(&lba_dev->hba.io_space, 2);
1646 DBG_PAT("\nLBA LMMIO resource tree\n");
1647 lba_dump_res(&lba_dev->hba.lmmio_space, 2);
1648 #endif
1649 }
1650
1651
1652
1653
1654
1655
1656 if (cfg_ops == &elroy_cfg_ops) {
1657 lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
1658 }
1659
1660 lba_next_bus = max + 1;
1661 pci_bus_add_devices(lba_bus);
1662
1663
1664 return 0;
1665 }
1666
1667 static const struct parisc_device_id lba_tbl[] __initconst = {
1668 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, ELROY_HVERS, 0xa },
1669 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, MERCURY_HVERS, 0xa },
1670 { HPHW_BRIDGE, HVERSION_REV_ANY_ID, QUICKSILVER_HVERS, 0xa },
1671 { 0, }
1672 };
1673
1674 static struct parisc_driver lba_driver __refdata = {
1675 .name = MODULE_NAME,
1676 .id_table = lba_tbl,
1677 .probe = lba_driver_probe,
1678 };
1679
1680
1681
1682
1683
1684 void __init lba_init(void)
1685 {
1686 register_parisc_driver(&lba_driver);
1687 }
1688
1689
1690
1691
1692
1693
1694 void lba_set_iregs(struct parisc_device *lba, u32 ibase, u32 imask)
1695 {
1696 void __iomem * base_addr = ioremap_nocache(lba->hpa.start, 4096);
1697
1698 imask <<= 2;
1699
1700
1701 WARN_ON((ibase & 0x001fffff) != 0);
1702 WARN_ON((imask & 0x001fffff) != 0);
1703
1704 DBG("%s() ibase 0x%x imask 0x%x\n", __func__, ibase, imask);
1705 WRITE_REG32( imask, base_addr + LBA_IMASK);
1706 WRITE_REG32( ibase, base_addr + LBA_IBASE);
1707 iounmap(base_addr);
1708 }
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720 static void quirk_diva_ati_card(struct pci_dev *dev)
1721 {
1722 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1723 dev->subsystem_device != 0x1292)
1724 return;
1725
1726 dev_info(&dev->dev, "Hiding Diva built-in ATI card");
1727 dev->device = 0;
1728 }
1729 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RADEON_QY,
1730 quirk_diva_ati_card);
1731
1732 static void quirk_diva_aux_disable(struct pci_dev *dev)
1733 {
1734 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1735 dev->subsystem_device != 0x1291)
1736 return;
1737
1738 dev_info(&dev->dev, "Hiding Diva built-in AUX serial device");
1739 dev->device = 0;
1740 }
1741 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA_AUX,
1742 quirk_diva_aux_disable);
1743
1744 static void quirk_tosca_aux_disable(struct pci_dev *dev)
1745 {
1746 if (dev->subsystem_vendor != PCI_VENDOR_ID_HP ||
1747 dev->subsystem_device != 0x104a)
1748 return;
1749
1750 dev_info(&dev->dev, "Hiding Tosca secondary built-in AUX serial device");
1751 dev->device = 0;
1752 }
1753 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_DIVA,
1754 quirk_tosca_aux_disable);