This source file includes following definitions.
- pci_fixup_i450nx
- pci_fixup_i450gx
- pci_fixup_umc_ide
- pci_fixup_latency
- pci_fixup_piix4_acpi
- pci_fixup_via_northbridge_bug
- pci_fixup_transparent_bridge
- pci_fixup_nforce2
- quirk_pcie_aspm_read
- quirk_pcie_aspm_write
- pcie_rootport_aspm_quirk
- pci_fixup_video
- pci_fixup_msi_k8t_onboard_sound
- pci_pre_fixup_toshiba_ohci1394
- pci_post_fixup_toshiba_ohci1394
- pci_early_fixup_cyrix_5530
- pci_siemens_interrupt_controller
- sb600_disable_hpet_bar
- sb600_hpet_quirk
- twinhead_reserve_killing_zone
- pci_invalid_bar
- pci_fixup_amd_ehci_pme
- pci_fixup_amd_fch_xhci_pme
- quirk_apple_mbp_poweroff
- quirk_no_aersid
- quirk_intel_th_dnv
- pci_amd_enable_64bit_bar
1
2
3
4
5
6 #include <linux/delay.h>
7 #include <linux/dmi.h>
8 #include <linux/pci.h>
9 #include <linux/vgaarb.h>
10 #include <asm/hpet.h>
11 #include <asm/pci_x86.h>
12
13 static void pci_fixup_i450nx(struct pci_dev *d)
14 {
15
16
17
18 int pxb, reg;
19 u8 busno, suba, subb;
20
21 dev_warn(&d->dev, "Searching for i450NX host bridges\n");
22 reg = 0xd0;
23 for(pxb = 0; pxb < 2; pxb++) {
24 pci_read_config_byte(d, reg++, &busno);
25 pci_read_config_byte(d, reg++, &suba);
26 pci_read_config_byte(d, reg++, &subb);
27 dev_dbg(&d->dev, "i450NX PXB %d: %02x/%02x/%02x\n", pxb, busno,
28 suba, subb);
29 if (busno)
30 pcibios_scan_root(busno);
31 if (suba < subb)
32 pcibios_scan_root(suba+1);
33 }
34 pcibios_last_bus = -1;
35 }
36 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82451NX, pci_fixup_i450nx);
37
38 static void pci_fixup_i450gx(struct pci_dev *d)
39 {
40
41
42
43
44 u8 busno;
45 pci_read_config_byte(d, 0x4a, &busno);
46 dev_info(&d->dev, "i440KX/GX host bridge; secondary bus %02x\n", busno);
47 pcibios_scan_root(busno);
48 pcibios_last_bus = -1;
49 }
50 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454GX, pci_fixup_i450gx);
51
52 static void pci_fixup_umc_ide(struct pci_dev *d)
53 {
54
55
56
57
58 int i;
59
60 dev_warn(&d->dev, "Fixing base address flags\n");
61 for(i = 0; i < 4; i++)
62 d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
63 }
64 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
65
66 static void pci_fixup_latency(struct pci_dev *d)
67 {
68
69
70
71
72 dev_dbg(&d->dev, "Setting max latency to 32\n");
73 pcibios_max_latency = 32;
74 }
75 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
76 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
77
78 static void pci_fixup_piix4_acpi(struct pci_dev *d)
79 {
80
81
82
83 d->irq = 9;
84 }
85 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, pci_fixup_piix4_acpi);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104 #define VIA_8363_KL133_REVISION_ID 0x81
105 #define VIA_8363_KM133_REVISION_ID 0x84
106
107 static void pci_fixup_via_northbridge_bug(struct pci_dev *d)
108 {
109 u8 v;
110 int where = 0x55;
111 int mask = 0x1f;
112
113 if (d->device == PCI_DEVICE_ID_VIA_8367_0) {
114
115
116
117 pci_write_config_byte(d, PCI_LATENCY_TIMER, 0);
118
119 where = 0x95;
120
121 } else if (d->device == PCI_DEVICE_ID_VIA_8363_0 &&
122 (d->revision == VIA_8363_KL133_REVISION_ID ||
123 d->revision == VIA_8363_KM133_REVISION_ID)) {
124 mask = 0x3f;
125
126 }
127
128 pci_read_config_byte(d, where, &v);
129 if (v & ~mask) {
130 dev_warn(&d->dev, "Disabling VIA memory write queue (PCI ID %04x, rev %02x): [%02x] %02x & %02x -> %02x\n", \
131 d->device, d->revision, where, v, mask, v & mask);
132 v &= mask;
133 pci_write_config_byte(d, where, v);
134 }
135 }
136 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
138 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
139 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
140 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, pci_fixup_via_northbridge_bug);
141 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8622, pci_fixup_via_northbridge_bug);
142 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, pci_fixup_via_northbridge_bug);
143 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8367_0, pci_fixup_via_northbridge_bug);
144
145
146
147
148
149
150
151
152
153
154 static void pci_fixup_transparent_bridge(struct pci_dev *dev)
155 {
156 if ((dev->device & 0xff00) == 0x2400)
157 dev->transparent = 1;
158 }
159 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
160 PCI_CLASS_BRIDGE_PCI, 8, pci_fixup_transparent_bridge);
161
162
163
164
165
166
167
168
169
170
171
172
173
174 static void pci_fixup_nforce2(struct pci_dev *dev)
175 {
176 u32 val;
177
178
179
180
181
182
183
184
185
186 pci_read_config_dword(dev, 0x6c, &val);
187
188
189
190
191 if ((val & 0x00FF0000) != 0x00010000) {
192 dev_warn(&dev->dev, "nForce2 C1 Halt Disconnect fixup\n");
193 pci_write_config_dword(dev, 0x6c, (val & 0xFF00FFFF) | 0x00010000);
194 }
195 }
196 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
197 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2, pci_fixup_nforce2);
198
199
200 #define MAX_PCIEROOT 6
201 static int quirk_aspm_offset[MAX_PCIEROOT << 3];
202
203 #define GET_INDEX(a, b) ((((a) - PCI_DEVICE_ID_INTEL_MCH_PA) << 3) + ((b) & 7))
204
205 static int quirk_pcie_aspm_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
206 {
207 return raw_pci_read(pci_domain_nr(bus), bus->number,
208 devfn, where, size, value);
209 }
210
211
212
213
214
215 static int quirk_pcie_aspm_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
216 {
217 u8 offset;
218
219 offset = quirk_aspm_offset[GET_INDEX(bus->self->device, devfn)];
220
221 if ((offset) && (where == offset))
222 value = value & ~PCI_EXP_LNKCTL_ASPMC;
223
224 return raw_pci_write(pci_domain_nr(bus), bus->number,
225 devfn, where, size, value);
226 }
227
228 static struct pci_ops quirk_pcie_aspm_ops = {
229 .read = quirk_pcie_aspm_read,
230 .write = quirk_pcie_aspm_write,
231 };
232
233
234
235
236
237
238
239
240
241 static void pcie_rootport_aspm_quirk(struct pci_dev *pdev)
242 {
243 int i;
244 struct pci_bus *pbus;
245 struct pci_dev *dev;
246
247 if ((pbus = pdev->subordinate) == NULL)
248 return;
249
250
251
252
253
254
255 if ((pdev->device < PCI_DEVICE_ID_INTEL_MCH_PA) ||
256 (pdev->device > PCI_DEVICE_ID_INTEL_MCH_PC1))
257 return;
258
259 if (list_empty(&pbus->devices)) {
260
261
262
263
264
265
266 for (i = GET_INDEX(pdev->device, 0); i <= GET_INDEX(pdev->device, 7); ++i)
267 quirk_aspm_offset[i] = 0;
268
269 pci_bus_set_ops(pbus, pbus->parent->ops);
270 } else {
271
272
273
274
275
276
277 list_for_each_entry(dev, &pbus->devices, bus_list)
278
279 quirk_aspm_offset[GET_INDEX(pdev->device, dev->devfn)] =
280 dev->pcie_cap + PCI_EXP_LNKCTL;
281
282 pci_bus_set_ops(pbus, &quirk_pcie_aspm_ops);
283 dev_info(&pbus->dev, "writes to ASPM control bits will be ignored\n");
284 }
285
286 }
287 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA, pcie_rootport_aspm_quirk);
288 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PA1, pcie_rootport_aspm_quirk);
289 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB, pcie_rootport_aspm_quirk);
290 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PB1, pcie_rootport_aspm_quirk);
291 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC, pcie_rootport_aspm_quirk);
292 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MCH_PC1, pcie_rootport_aspm_quirk);
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311 static void pci_fixup_video(struct pci_dev *pdev)
312 {
313 struct pci_dev *bridge;
314 struct pci_bus *bus;
315 u16 config;
316 struct resource *res;
317
318
319 bus = pdev->bus;
320 while (bus) {
321 bridge = bus->self;
322
323
324
325
326
327
328
329
330 if (bridge && (pci_is_bridge(bridge))) {
331 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
332 &config);
333 if (!(config & PCI_BRIDGE_CTL_VGA))
334 return;
335 }
336 bus = bus->parent;
337 }
338 if (!vga_default_device() || pdev == vga_default_device()) {
339 pci_read_config_word(pdev, PCI_COMMAND, &config);
340 if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
341 res = &pdev->resource[PCI_ROM_RESOURCE];
342
343 pci_disable_rom(pdev);
344 if (res->parent)
345 release_resource(res);
346
347 res->start = 0xC0000;
348 res->end = res->start + 0x20000 - 1;
349 res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
350 IORESOURCE_PCI_FIXED;
351 dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n",
352 res);
353 }
354 }
355 }
356 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
357 PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
358
359
360 static const struct dmi_system_id msi_k8t_dmi_table[] = {
361 {
362 .ident = "MSI-K8T-Neo2Fir",
363 .matches = {
364 DMI_MATCH(DMI_SYS_VENDOR, "MSI"),
365 DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"),
366 },
367 },
368 {}
369 };
370
371
372
373
374
375
376
377
378
379
380
381 static void pci_fixup_msi_k8t_onboard_sound(struct pci_dev *dev)
382 {
383 unsigned char val;
384 if (!dmi_check_system(msi_k8t_dmi_table))
385 return;
386
387 pci_read_config_byte(dev, 0x50, &val);
388 if (val & 0x40) {
389 pci_write_config_byte(dev, 0x50, val & (~0x40));
390
391
392 pci_read_config_byte(dev, 0x50, &val);
393 if (val & 0x40)
394 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
395 "can't enable onboard soundcard!\n");
396 else
397 dev_info(&dev->dev, "Detected MSI K8T Neo2-FIR; "
398 "enabled onboard soundcard\n");
399 }
400 }
401 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
402 pci_fixup_msi_k8t_onboard_sound);
403 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
404 pci_fixup_msi_k8t_onboard_sound);
405
406
407
408
409
410
411
412
413
414
415 static u16 toshiba_line_size;
416
417 static const struct dmi_system_id toshiba_ohci1394_dmi_table[] = {
418 {
419 .ident = "Toshiba PS5 based laptop",
420 .matches = {
421 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
422 DMI_MATCH(DMI_PRODUCT_VERSION, "PS5"),
423 },
424 },
425 {
426 .ident = "Toshiba PSM4 based laptop",
427 .matches = {
428 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
429 DMI_MATCH(DMI_PRODUCT_VERSION, "PSM4"),
430 },
431 },
432 {
433 .ident = "Toshiba A40 based laptop",
434 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
436 DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"),
437 },
438 },
439 { }
440 };
441
442 static void pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev)
443 {
444 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
445 return;
446
447 dev->current_state = PCI_D3cold;
448 pci_read_config_word(dev, PCI_CACHE_LINE_SIZE, &toshiba_line_size);
449 }
450 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0x8032,
451 pci_pre_fixup_toshiba_ohci1394);
452
453 static void pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
454 {
455 if (!dmi_check_system(toshiba_ohci1394_dmi_table))
456 return;
457
458
459 pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
460 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
461 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
462 pci_resource_start(dev, 0));
463 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
464 pci_resource_start(dev, 1));
465 }
466 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_TI, 0x8032,
467 pci_post_fixup_toshiba_ohci1394);
468
469
470
471
472
473
474 static void pci_early_fixup_cyrix_5530(struct pci_dev *dev)
475 {
476 u8 r;
477
478 pci_read_config_byte(dev, 0x42, &r);
479 r &= 0xfd;
480 pci_write_config_byte(dev, 0x42, r);
481 }
482 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
483 pci_early_fixup_cyrix_5530);
484 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY,
485 pci_early_fixup_cyrix_5530);
486
487
488
489
490
491 static void pci_siemens_interrupt_controller(struct pci_dev *dev)
492 {
493 dev->resource[0].flags |= IORESOURCE_PCI_FIXED;
494 }
495 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
496 pci_siemens_interrupt_controller);
497
498
499
500
501
502 static void sb600_disable_hpet_bar(struct pci_dev *dev)
503 {
504 u8 val;
505
506
507
508
509
510
511
512
513 pci_read_config_byte(dev, 0x08, &val);
514
515 if (val < 0x2F) {
516 outb(0x55, 0xCD6);
517 val = inb(0xCD7);
518
519
520 outb(0x55, 0xCD6);
521 outb(val | 0x80, 0xCD7);
522 }
523 }
524 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, 0x4385, sb600_disable_hpet_bar);
525
526 #ifdef CONFIG_HPET_TIMER
527 static void sb600_hpet_quirk(struct pci_dev *dev)
528 {
529 struct resource *r = &dev->resource[1];
530
531 if (r->flags & IORESOURCE_MEM && r->start == hpet_address) {
532 r->flags |= IORESOURCE_PCI_FIXED;
533 dev_info(&dev->dev, "reg 0x14 contains HPET; making it immovable\n");
534 }
535 }
536 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, 0x4385, sb600_hpet_quirk);
537 #endif
538
539
540
541
542
543
544
545
546
547 static void twinhead_reserve_killing_zone(struct pci_dev *dev)
548 {
549 if (dev->subsystem_vendor == 0x14FF && dev->subsystem_device == 0xA003) {
550 pr_info("Reserving memory on Twinhead H12Y\n");
551 request_mem_region(0xFFB00000, 0x100000, "twinhead");
552 }
553 }
554 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x27B9, twinhead_reserve_killing_zone);
555
556
557
558
559
560
561
562
563
564
565
566
567 static void pci_invalid_bar(struct pci_dev *dev)
568 {
569 dev->non_compliant_bars = 1;
570 }
571 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, pci_invalid_bar);
572 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6f60, pci_invalid_bar);
573 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fa0, pci_invalid_bar);
574 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, pci_invalid_bar);
575 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ec, pci_invalid_bar);
576 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa1ed, pci_invalid_bar);
577 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26c, pci_invalid_bar);
578 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0xa26d, pci_invalid_bar);
579
580
581
582
583
584
585
586
587 static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
588 {
589 dev_info(&dev->dev, "PME# does not work under D3, disabling it\n");
590 dev->pme_support &= ~((PCI_PM_CAP_PME_D3 | PCI_PM_CAP_PME_D3cold)
591 >> PCI_PM_CAP_PME_SHIFT);
592 }
593 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
594
595
596
597
598
599 static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
600 {
601 dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
602 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
603 }
604 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619 static void quirk_apple_mbp_poweroff(struct pci_dev *pdev)
620 {
621 struct device *dev = &pdev->dev;
622 struct resource *res;
623
624 if ((!dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,4") &&
625 !dmi_match(DMI_PRODUCT_NAME, "MacBookPro11,5")) ||
626 pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x1c, 0))
627 return;
628
629 res = request_mem_region(0x7fa00000, 0x200000,
630 "MacBook Pro poweroff workaround");
631 if (res)
632 dev_info(dev, "claimed %s %pR\n", res->name, res);
633 else
634 dev_info(dev, "can't work around MacBook Pro poweroff issue\n");
635 }
636 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8c10, quirk_apple_mbp_poweroff);
637
638
639
640
641
642
643
644 static void quirk_no_aersid(struct pci_dev *pdev)
645 {
646
647 if (is_vmd(pdev->bus) && pci_is_root_bus(pdev->bus))
648 pdev->bus->bus_flags |= PCI_BUS_FLAGS_NO_AERSID;
649 }
650 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
651 PCI_CLASS_BRIDGE_PCI, 8, quirk_no_aersid);
652
653 static void quirk_intel_th_dnv(struct pci_dev *dev)
654 {
655 struct resource *r = &dev->resource[4];
656
657
658
659
660
661 if (r->end == r->start + 0x7ff) {
662 r->start = 0;
663 r->end = 0x3fffff;
664 r->flags |= IORESOURCE_UNSET;
665 }
666 }
667 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
668
669 #ifdef CONFIG_PHYS_ADDR_T_64BIT
670
671 #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
672 #define AMD_141b_MMIO_BASE_RE_MASK BIT(0)
673 #define AMD_141b_MMIO_BASE_WE_MASK BIT(1)
674 #define AMD_141b_MMIO_BASE_MMIOBASE_MASK GENMASK(31,8)
675
676 #define AMD_141b_MMIO_LIMIT(x) (0x84 + (x) * 0x8)
677 #define AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK GENMASK(31,8)
678
679 #define AMD_141b_MMIO_HIGH(x) (0x180 + (x) * 0x4)
680 #define AMD_141b_MMIO_HIGH_MMIOBASE_MASK GENMASK(7,0)
681 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT 16
682 #define AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK GENMASK(23,16)
683
684
685
686
687
688
689
690
691
692 static void pci_amd_enable_64bit_bar(struct pci_dev *dev)
693 {
694 static const char *name = "PCI Bus 0000:00";
695 struct resource *res, *conflict;
696 u32 base, limit, high;
697 struct pci_dev *other;
698 unsigned i;
699
700 if (!(pci_probe & PCI_BIG_ROOT_WINDOW))
701 return;
702
703
704 other = pci_get_device(dev->vendor, dev->device, NULL);
705 if (other != dev ||
706 (other = pci_get_device(dev->vendor, dev->device, other))) {
707
708 pci_dev_put(other);
709 return;
710 }
711
712 for (i = 0; i < 8; i++) {
713 pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base);
714 pci_read_config_dword(dev, AMD_141b_MMIO_HIGH(i), &high);
715
716
717 if (!(base & (AMD_141b_MMIO_BASE_RE_MASK |
718 AMD_141b_MMIO_BASE_WE_MASK)))
719 break;
720
721 base >>= 8;
722 base |= high << 24;
723
724
725 if (base > 0x10000)
726 return;
727 }
728 if (i == 8)
729 return;
730
731 res = kzalloc(sizeof(*res), GFP_KERNEL);
732 if (!res)
733 return;
734
735
736
737
738
739 res->name = name;
740 res->flags = IORESOURCE_PREFETCH | IORESOURCE_MEM |
741 IORESOURCE_MEM_64 | IORESOURCE_WINDOW;
742 res->start = 0xbd00000000ull;
743 res->end = 0xfd00000000ull - 1;
744
745 conflict = request_resource_conflict(&iomem_resource, res);
746 if (conflict) {
747 kfree(res);
748 if (conflict->name != name)
749 return;
750
751
752 res = conflict;
753 } else {
754 dev_info(&dev->dev, "adding root bus resource %pR (tainting kernel)\n",
755 res);
756 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
757 pci_bus_add_resource(dev->bus, res, 0);
758 }
759
760 base = ((res->start >> 8) & AMD_141b_MMIO_BASE_MMIOBASE_MASK) |
761 AMD_141b_MMIO_BASE_RE_MASK | AMD_141b_MMIO_BASE_WE_MASK;
762 limit = ((res->end + 1) >> 8) & AMD_141b_MMIO_LIMIT_MMIOLIMIT_MASK;
763 high = ((res->start >> 40) & AMD_141b_MMIO_HIGH_MMIOBASE_MASK) |
764 ((((res->end + 1) >> 40) << AMD_141b_MMIO_HIGH_MMIOLIMIT_SHIFT)
765 & AMD_141b_MMIO_HIGH_MMIOLIMIT_MASK);
766
767 pci_write_config_dword(dev, AMD_141b_MMIO_HIGH(i), high);
768 pci_write_config_dword(dev, AMD_141b_MMIO_LIMIT(i), limit);
769 pci_write_config_dword(dev, AMD_141b_MMIO_BASE(i), base);
770 }
771 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
772 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
773 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
774 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
775 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
776 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar);
777 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar);
778 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
779 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
780 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
781
782 #endif