This source file includes following definitions.
- fixup_debug_start
- fixup_debug_report
- pci_do_fixups
- pci_fixup_device
- pci_apply_final_quirks
- quirk_mmio_always_on
- quirk_mellanox_tavor
- quirk_passive_release
- quirk_isa_dma_hangs
- quirk_tigerpoint_bm_sts
- quirk_nopcipci
- quirk_nopciamd
- quirk_triton
- quirk_vialatency
- quirk_viaetbf
- quirk_vsfx
- quirk_alimagik
- quirk_natoma
- quirk_citrine
- quirk_nfp6000
- quirk_extend_bar_to_page
- quirk_s3_64M
- quirk_io
- quirk_cs5536_vsa
- quirk_io_region
- quirk_ati_exploding_mce
- quirk_amd_nl_class
- quirk_synopsys_haps
- quirk_ali7101_acpi
- piix4_io_quirk
- piix4_mem_quirk
- quirk_piix4_acpi
- quirk_ich4_lpc_acpi
- ich6_lpc_acpi_gpio
- ich6_lpc_generic_decode
- quirk_ich6_lpc
- ich7_lpc_generic_decode
- quirk_ich7_lpc
- quirk_vt82c586_acpi
- quirk_vt82c686_acpi
- quirk_vt8235_acpi
- quirk_xio2000a
- quirk_via_ioapic
- quirk_via_vt8237_bypass_apic_deassert
- quirk_amd_ioapic
- quirk_cavium_sriov_rnm_link
- quirk_amd_8131_mmrbc
- quirk_via_acpi
- quirk_via_bridge
- quirk_via_vlink
- quirk_vt82c598_id
- quirk_cardbus_legacy
- quirk_amd_ordering
- quirk_dunord
- quirk_transparent_bridge
- quirk_mediagx_master
- quirk_disable_pxb
- quirk_amd_ide_mode
- quirk_svwks_csb5ide
- quirk_ide_samemode
- quirk_no_ata_d3
- quirk_eisa_bridge
- asus_hides_smbus_hostbridge
- asus_hides_smbus_lpc
- asus_hides_smbus_lpc_ich6_suspend
- asus_hides_smbus_lpc_ich6_resume_early
- asus_hides_smbus_lpc_ich6_resume
- asus_hides_smbus_lpc_ich6
- quirk_sis_96x_smbus
- quirk_sis_503
- asus_hides_ac97_lpc
- quirk_jmicron_ata
- quirk_jmicron_async_suspend
- quirk_alder_ioapic
- quirk_pcie_mch
- quirk_pcie_pxh
- quirk_intel_pcie_pm
- quirk_d3hot_delay
- quirk_radeon_pm
- quirk_ryzen_xhci_d3hot
- dmi_disable_ioapicreroute
- quirk_reroute_to_boot_interrupts_intel
- quirk_disable_intel_boot_interrupt
- quirk_disable_broadcom_boot_interrupt
- quirk_disable_amd_813x_boot_interrupt
- quirk_disable_amd_8111_boot_interrupt
- quirk_tc86c001_ide
- quirk_plx_pci9050
- quirk_netmos
- quirk_e100_interrupt
- quirk_disable_aspm_l0s
- quirk_enable_clear_retrain_link
- fixup_rev1_53c810
- quirk_p64h2_1k_io
- quirk_nvidia_ck804_pcie_aer_ext_cap
- quirk_via_cx700_pci_parking_caching
- quirk_brcm_5719_limit_mrrs
- quirk_unhide_mch_dev6
- quirk_disable_all_msi
- quirk_disable_msi
- quirk_amd_780_apc_msi
- msi_ht_cap_enabled
- quirk_msi_ht_cap
- quirk_nvidia_ck804_msi_ht_cap
- ht_enable_msi_mapping
- nvenet_msi_disable
- pci_quirk_nvidia_tegra_disable_rp_msi
- nvbridge_check_legacy_irq_routing
- ht_check_msi_mapping
- host_bridge_with_leaf
- is_end_of_ht_chain
- nv_ht_enable_msi_mapping
- ht_disable_msi_mapping
- __nv_msi_ht_cap_quirk
- nv_msi_ht_cap_quirk_all
- nv_msi_ht_cap_quirk_leaf
- quirk_msi_intx_disable_bug
- quirk_msi_intx_disable_ati_bug
- quirk_msi_intx_disable_qca_bug
- quirk_al_msi_disable
- quirk_hotplug_bridge
- ricoh_mmc_fixup_rl5c476
- ricoh_mmc_fixup_r5c832
- vtd_mask_spec_errors
- fixup_ti816x_class
- fixup_mpss_256
- quirk_intel_mc_errata
- quirk_intel_ntb
- disable_igfx_irq
- quirk_remove_d3_delay
- quirk_broken_intx_masking
- mellanox_check_broken_intx_masking
- quirk_no_bus_reset
- quirk_no_pm_reset
- quirk_thunderbolt_hotplug_msi
- quirk_apple_poweroff_thunderbolt
- quirk_apple_wait_for_thunderbolt
- reset_intel_82599_sfp_virtfn
- reset_ivb_igd
- reset_chelsio_generic_dev
- nvme_disable_and_flr
- delay_250ms_after_flr
- pci_dev_specific_reset
- quirk_dma_func0_alias
- quirk_dma_func1_alias
- quirk_fixed_dma_alias
- quirk_use_pcie_bridge_dma_alias
- quirk_mic_x200_dma_alias
- quirk_pex_vca_alias
- quirk_bridge_cavm_thrx2_pcie_root
- quirk_tw686x_class
- quirk_relaxedordering_disable
- quirk_disable_root_port_attributes
- quirk_chelsio_T5_disable_root_port_attributes
- pci_acs_ctrl_enabled
- pci_quirk_zhaoxin_pcie_ports_acs
- pci_quirk_amd_sb_acs
- pci_quirk_cavium_acs_match
- pci_quirk_cavium_acs
- pci_quirk_xgene_acs
- pci_quirk_intel_pch_acs_match
- pci_quirk_intel_pch_acs
- pci_quirk_qcom_rp_acs
- pci_quirk_al_acs
- pci_quirk_intel_spt_pch_acs_match
- pci_quirk_intel_spt_pch_acs
- pci_quirk_mf_endpoint_acs
- pci_quirk_brcm_acs
- pci_dev_specific_acs_enabled
- pci_quirk_enable_intel_lpc_acs
- pci_quirk_enable_intel_rp_mpc_acs
- pci_quirk_enable_intel_pch_acs
- pci_quirk_enable_intel_spt_pch_acs
- pci_quirk_disable_intel_spt_pch_acs_redir
- pci_dev_specific_enable_acs
- pci_dev_specific_disable_acs_redir
- quirk_intel_qat_vf_cap
- quirk_intel_no_flr
- quirk_no_ext_tags
- quirk_amd_harvest_no_ats
- quirk_fsl_no_msi
- pci_create_device_link
- quirk_gpu_hda
- quirk_gpu_usb
- quirk_gpu_usb_typec_ucsi
- quirk_nvidia_hda
- pci_idt_bus_quirk
- quirk_switchtec_ntb_dma_alias
- quirk_plx_ntb_dma_alias
- quirk_reset_lenovo_thinkpad_p50_nvgpu
- pci_fixup_no_d0_pme
- apex_pci_fixup_class
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/export.h>
18 #include <linux/pci.h>
19 #include <linux/init.h>
20 #include <linux/delay.h>
21 #include <linux/acpi.h>
22 #include <linux/dmi.h>
23 #include <linux/ioport.h>
24 #include <linux/sched.h>
25 #include <linux/ktime.h>
26 #include <linux/mm.h>
27 #include <linux/nvme.h>
28 #include <linux/platform_data/x86/apple.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/switchtec.h>
31 #include <asm/dma.h>
32 #include "pci.h"
33
34 static ktime_t fixup_debug_start(struct pci_dev *dev,
35 void (*fn)(struct pci_dev *dev))
36 {
37 if (initcall_debug)
38 pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current));
39
40 return ktime_get();
41 }
42
43 static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime,
44 void (*fn)(struct pci_dev *dev))
45 {
46 ktime_t delta, rettime;
47 unsigned long long duration;
48
49 rettime = ktime_get();
50 delta = ktime_sub(rettime, calltime);
51 duration = (unsigned long long) ktime_to_ns(delta) >> 10;
52 if (initcall_debug || duration > 10000)
53 pci_info(dev, "%pS took %lld usecs\n", fn, duration);
54 }
55
56 static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
57 struct pci_fixup *end)
58 {
59 ktime_t calltime;
60
61 for (; f < end; f++)
62 if ((f->class == (u32) (dev->class >> f->class_shift) ||
63 f->class == (u32) PCI_ANY_ID) &&
64 (f->vendor == dev->vendor ||
65 f->vendor == (u16) PCI_ANY_ID) &&
66 (f->device == dev->device ||
67 f->device == (u16) PCI_ANY_ID)) {
68 void (*hook)(struct pci_dev *dev);
69 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
70 hook = offset_to_ptr(&f->hook_offset);
71 #else
72 hook = f->hook;
73 #endif
74 calltime = fixup_debug_start(dev, hook);
75 hook(dev);
76 fixup_debug_report(dev, calltime, hook);
77 }
78 }
79
80 extern struct pci_fixup __start_pci_fixups_early[];
81 extern struct pci_fixup __end_pci_fixups_early[];
82 extern struct pci_fixup __start_pci_fixups_header[];
83 extern struct pci_fixup __end_pci_fixups_header[];
84 extern struct pci_fixup __start_pci_fixups_final[];
85 extern struct pci_fixup __end_pci_fixups_final[];
86 extern struct pci_fixup __start_pci_fixups_enable[];
87 extern struct pci_fixup __end_pci_fixups_enable[];
88 extern struct pci_fixup __start_pci_fixups_resume[];
89 extern struct pci_fixup __end_pci_fixups_resume[];
90 extern struct pci_fixup __start_pci_fixups_resume_early[];
91 extern struct pci_fixup __end_pci_fixups_resume_early[];
92 extern struct pci_fixup __start_pci_fixups_suspend[];
93 extern struct pci_fixup __end_pci_fixups_suspend[];
94 extern struct pci_fixup __start_pci_fixups_suspend_late[];
95 extern struct pci_fixup __end_pci_fixups_suspend_late[];
96
97 static bool pci_apply_fixup_final_quirks;
98
99 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
100 {
101 struct pci_fixup *start, *end;
102
103 switch (pass) {
104 case pci_fixup_early:
105 start = __start_pci_fixups_early;
106 end = __end_pci_fixups_early;
107 break;
108
109 case pci_fixup_header:
110 start = __start_pci_fixups_header;
111 end = __end_pci_fixups_header;
112 break;
113
114 case pci_fixup_final:
115 if (!pci_apply_fixup_final_quirks)
116 return;
117 start = __start_pci_fixups_final;
118 end = __end_pci_fixups_final;
119 break;
120
121 case pci_fixup_enable:
122 start = __start_pci_fixups_enable;
123 end = __end_pci_fixups_enable;
124 break;
125
126 case pci_fixup_resume:
127 start = __start_pci_fixups_resume;
128 end = __end_pci_fixups_resume;
129 break;
130
131 case pci_fixup_resume_early:
132 start = __start_pci_fixups_resume_early;
133 end = __end_pci_fixups_resume_early;
134 break;
135
136 case pci_fixup_suspend:
137 start = __start_pci_fixups_suspend;
138 end = __end_pci_fixups_suspend;
139 break;
140
141 case pci_fixup_suspend_late:
142 start = __start_pci_fixups_suspend_late;
143 end = __end_pci_fixups_suspend_late;
144 break;
145
146 default:
147
148 return;
149 }
150 pci_do_fixups(dev, start, end);
151 }
152 EXPORT_SYMBOL(pci_fixup_device);
153
154 static int __init pci_apply_final_quirks(void)
155 {
156 struct pci_dev *dev = NULL;
157 u8 cls = 0;
158 u8 tmp;
159
160 if (pci_cache_line_size)
161 pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2);
162
163 pci_apply_fixup_final_quirks = true;
164 for_each_pci_dev(dev) {
165 pci_fixup_device(pci_fixup_final, dev);
166
167
168
169
170
171 if (!pci_cache_line_size) {
172 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp);
173 if (!cls)
174 cls = tmp;
175 if (!tmp || cls == tmp)
176 continue;
177
178 pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n",
179 cls << 2, tmp << 2,
180 pci_dfl_cache_line_size << 2);
181 pci_cache_line_size = pci_dfl_cache_line_size;
182 }
183 }
184
185 if (!pci_cache_line_size) {
186 pr_info("PCI: CLS %u bytes, default %u\n", cls << 2,
187 pci_dfl_cache_line_size << 2);
188 pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
189 }
190
191 return 0;
192 }
193 fs_initcall_sync(pci_apply_final_quirks);
194
195
196
197
198
199
200
201 static void quirk_mmio_always_on(struct pci_dev *dev)
202 {
203 dev->mmio_always_on = 1;
204 }
205 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID,
206 PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on);
207
208
209
210
211
212
213 static void quirk_mellanox_tavor(struct pci_dev *dev)
214 {
215 dev->broken_parity_status = 1;
216 }
217 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, quirk_mellanox_tavor);
218 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, quirk_mellanox_tavor);
219
220
221
222
223
224 static void quirk_passive_release(struct pci_dev *dev)
225 {
226 struct pci_dev *d = NULL;
227 unsigned char dlc;
228
229
230
231
232
233 while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) {
234 pci_read_config_byte(d, 0x82, &dlc);
235 if (!(dlc & 1<<1)) {
236 pci_info(d, "PIIX3: Enabling Passive Release\n");
237 dlc |= 1<<1;
238 pci_write_config_byte(d, 0x82, dlc);
239 }
240 }
241 }
242 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
243 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release);
244
245
246
247
248
249
250
251
252
253 static void quirk_isa_dma_hangs(struct pci_dev *dev)
254 {
255 if (!isa_dma_bridge_buggy) {
256 isa_dma_bridge_buggy = 1;
257 pci_info(dev, "Activating ISA DMA hang workarounds\n");
258 }
259 }
260
261
262
263
264 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs);
265 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs);
266 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs);
267 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs);
268 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs);
269 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs);
270 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs);
271
272
273
274
275
276 static void quirk_tigerpoint_bm_sts(struct pci_dev *dev)
277 {
278 u32 pmbase;
279 u16 pm1a;
280
281 pci_read_config_dword(dev, 0x40, &pmbase);
282 pmbase = pmbase & 0xff80;
283 pm1a = inw(pmbase);
284
285 if (pm1a & 0x10) {
286 pci_info(dev, FW_BUG "TigerPoint LPC.BM_STS cleared\n");
287 outw(0x10, pmbase);
288 }
289 }
290 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts);
291
292
293 static void quirk_nopcipci(struct pci_dev *dev)
294 {
295 if ((pci_pci_problems & PCIPCI_FAIL) == 0) {
296 pci_info(dev, "Disabling direct PCI/PCI transfers\n");
297 pci_pci_problems |= PCIPCI_FAIL;
298 }
299 }
300 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci);
301 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci);
302
303 static void quirk_nopciamd(struct pci_dev *dev)
304 {
305 u8 rev;
306 pci_read_config_byte(dev, 0x08, &rev);
307 if (rev == 0x13) {
308
309 pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n");
310 pci_pci_problems |= PCIAGP_FAIL;
311 }
312 }
313 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd);
314
315
316 static void quirk_triton(struct pci_dev *dev)
317 {
318 if ((pci_pci_problems&PCIPCI_TRITON) == 0) {
319 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
320 pci_pci_problems |= PCIPCI_TRITON;
321 }
322 }
323 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton);
324 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton);
325 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton);
326 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton);
327
328
329
330
331
332
333
334
335
336
337
338 static void quirk_vialatency(struct pci_dev *dev)
339 {
340 struct pci_dev *p;
341 u8 busarb;
342
343
344
345
346
347 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL);
348 if (p != NULL) {
349
350
351
352
353
354
355 if (p->revision < 0x40 || p->revision > 0x42)
356 goto exit;
357 } else {
358 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL);
359 if (p == NULL)
360 goto exit;
361
362
363 if (p->revision < 0x10 || p->revision > 0x12)
364 goto exit;
365 }
366
367
368
369
370
371
372
373
374
375
376
377
378
379 pci_read_config_byte(dev, 0x76, &busarb);
380
381
382
383
384
385 busarb &= ~(1<<5);
386 busarb |= (1<<4);
387 pci_write_config_byte(dev, 0x76, busarb);
388 pci_info(dev, "Applying VIA southbridge workaround\n");
389 exit:
390 pci_dev_put(p);
391 }
392 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
393 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
394 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
395
396 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency);
397 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency);
398 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency);
399
400
401 static void quirk_viaetbf(struct pci_dev *dev)
402 {
403 if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) {
404 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
405 pci_pci_problems |= PCIPCI_VIAETBF;
406 }
407 }
408 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf);
409
410 static void quirk_vsfx(struct pci_dev *dev)
411 {
412 if ((pci_pci_problems&PCIPCI_VSFX) == 0) {
413 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
414 pci_pci_problems |= PCIPCI_VSFX;
415 }
416 }
417 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx);
418
419
420
421
422
423
424 static void quirk_alimagik(struct pci_dev *dev)
425 {
426 if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) {
427 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
428 pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON;
429 }
430 }
431 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik);
432 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik);
433
434
435 static void quirk_natoma(struct pci_dev *dev)
436 {
437 if ((pci_pci_problems&PCIPCI_NATOMA) == 0) {
438 pci_info(dev, "Limiting direct PCI/PCI transfers\n");
439 pci_pci_problems |= PCIPCI_NATOMA;
440 }
441 }
442 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma);
443 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma);
444 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma);
445 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma);
446 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma);
447 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma);
448
449
450
451
452
453 static void quirk_citrine(struct pci_dev *dev)
454 {
455 dev->cfg_size = 0xA0;
456 }
457 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine);
458
459
460
461
462
463 static void quirk_nfp6000(struct pci_dev *dev)
464 {
465 dev->cfg_size = 0x600;
466 }
467 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000);
468 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000);
469 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000);
470 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000);
471
472
473 static void quirk_extend_bar_to_page(struct pci_dev *dev)
474 {
475 int i;
476
477 for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
478 struct resource *r = &dev->resource[i];
479
480 if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
481 r->end = PAGE_SIZE - 1;
482 r->start = 0;
483 r->flags |= IORESOURCE_UNSET;
484 pci_info(dev, "expanded BAR %d to page size: %pR\n",
485 i, r);
486 }
487 }
488 }
489 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
490
491
492
493
494
495 static void quirk_s3_64M(struct pci_dev *dev)
496 {
497 struct resource *r = &dev->resource[0];
498
499 if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) {
500 r->flags |= IORESOURCE_UNSET;
501 r->start = 0;
502 r->end = 0x3ffffff;
503 }
504 }
505 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M);
506 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M);
507
508 static void quirk_io(struct pci_dev *dev, int pos, unsigned size,
509 const char *name)
510 {
511 u32 region;
512 struct pci_bus_region bus_region;
513 struct resource *res = dev->resource + pos;
514
515 pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), ®ion);
516
517 if (!region)
518 return;
519
520 res->name = pci_name(dev);
521 res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK;
522 res->flags |=
523 (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN);
524 region &= ~(size - 1);
525
526
527 bus_region.start = region;
528 bus_region.end = region + size - 1;
529 pcibios_bus_to_resource(dev->bus, res, &bus_region);
530
531 pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n",
532 name, PCI_BASE_ADDRESS_0 + (pos << 2), res);
533 }
534
535
536
537
538
539
540
541
542
543
544 static void quirk_cs5536_vsa(struct pci_dev *dev)
545 {
546 static char *name = "CS5536 ISA bridge";
547
548 if (pci_resource_len(dev, 0) != 8) {
549 quirk_io(dev, 0, 8, name);
550 quirk_io(dev, 1, 256, name);
551 quirk_io(dev, 2, 64, name);
552 pci_info(dev, "%s bug detected (incorrect header); workaround applied\n",
553 name);
554 }
555 }
556 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa);
557
558 static void quirk_io_region(struct pci_dev *dev, int port,
559 unsigned size, int nr, const char *name)
560 {
561 u16 region;
562 struct pci_bus_region bus_region;
563 struct resource *res = dev->resource + nr;
564
565 pci_read_config_word(dev, port, ®ion);
566 region &= ~(size - 1);
567
568 if (!region)
569 return;
570
571 res->name = pci_name(dev);
572 res->flags = IORESOURCE_IO;
573
574
575 bus_region.start = region;
576 bus_region.end = region + size - 1;
577 pcibios_bus_to_resource(dev->bus, res, &bus_region);
578
579 if (!pci_claim_resource(dev, nr))
580 pci_info(dev, "quirk: %pR claimed by %s\n", res, name);
581 }
582
583
584
585
586
587 static void quirk_ati_exploding_mce(struct pci_dev *dev)
588 {
589 pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n");
590
591 request_region(0x3b0, 0x0C, "RadeonIGP");
592 request_region(0x3d3, 0x01, "RadeonIGP");
593 }
594 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce);
595
596
597
598
599
600
601
602
603
604
605
606
607 static void quirk_amd_nl_class(struct pci_dev *pdev)
608 {
609 u32 class = pdev->class;
610
611
612 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
613 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
614 class, pdev->class);
615 }
616 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB,
617 quirk_amd_nl_class);
618
619
620
621
622
623
624
625
626 static void quirk_synopsys_haps(struct pci_dev *pdev)
627 {
628 u32 class = pdev->class;
629
630 switch (pdev->device) {
631 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3:
632 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI:
633 case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31:
634 pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
635 pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
636 class, pdev->class);
637 break;
638 }
639 }
640 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID,
641 PCI_CLASS_SERIAL_USB_XHCI, 0,
642 quirk_synopsys_haps);
643
644
645
646
647
648
649
650
651
652
653
654 static void quirk_ali7101_acpi(struct pci_dev *dev)
655 {
656 quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI");
657 quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB");
658 }
659 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi);
660
661 static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
662 {
663 u32 devres;
664 u32 mask, size, base;
665
666 pci_read_config_dword(dev, port, &devres);
667 if ((devres & enable) != enable)
668 return;
669 mask = (devres >> 16) & 15;
670 base = devres & 0xffff;
671 size = 16;
672 for (;;) {
673 unsigned bit = size >> 1;
674 if ((bit & mask) == bit)
675 break;
676 size = bit;
677 }
678
679
680
681
682
683 base &= -size;
684 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1);
685 }
686
687 static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable)
688 {
689 u32 devres;
690 u32 mask, size, base;
691
692 pci_read_config_dword(dev, port, &devres);
693 if ((devres & enable) != enable)
694 return;
695 base = devres & 0xffff0000;
696 mask = (devres & 0x3f) << 16;
697 size = 128 << 16;
698 for (;;) {
699 unsigned bit = size >> 1;
700 if ((bit & mask) == bit)
701 break;
702 size = bit;
703 }
704
705
706
707
708
709 base &= -size;
710 pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1);
711 }
712
713
714
715
716
717
718
719 static void quirk_piix4_acpi(struct pci_dev *dev)
720 {
721 u32 res_a;
722
723 quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI");
724 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB");
725
726
727 pci_read_config_dword(dev, 0x5c, &res_a);
728
729 piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21);
730 piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21);
731
732
733
734
735 if (res_a & (1 << 29)) {
736 piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20);
737 piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7);
738 }
739
740 if (res_a & (1 << 30)) {
741 piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20);
742 piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7);
743 }
744 piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20);
745 piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20);
746 }
747 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi);
748 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi);
749
750 #define ICH_PMBASE 0x40
751 #define ICH_ACPI_CNTL 0x44
752 #define ICH4_ACPI_EN 0x10
753 #define ICH6_ACPI_EN 0x80
754 #define ICH4_GPIOBASE 0x58
755 #define ICH4_GPIO_CNTL 0x5c
756 #define ICH4_GPIO_EN 0x10
757 #define ICH6_GPIOBASE 0x48
758 #define ICH6_GPIO_CNTL 0x4c
759 #define ICH6_GPIO_EN 0x10
760
761
762
763
764
765
766 static void quirk_ich4_lpc_acpi(struct pci_dev *dev)
767 {
768 u8 enable;
769
770
771
772
773
774
775
776
777 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
778 if (enable & ICH4_ACPI_EN)
779 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
780 "ICH4 ACPI/GPIO/TCO");
781
782 pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable);
783 if (enable & ICH4_GPIO_EN)
784 quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
785 "ICH4 GPIO");
786 }
787 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi);
788 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi);
789 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi);
790 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi);
791 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi);
792 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi);
793 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi);
794 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi);
795 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi);
796 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi);
797
798 static void ich6_lpc_acpi_gpio(struct pci_dev *dev)
799 {
800 u8 enable;
801
802 pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable);
803 if (enable & ICH6_ACPI_EN)
804 quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES,
805 "ICH6 ACPI/GPIO/TCO");
806
807 pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable);
808 if (enable & ICH6_GPIO_EN)
809 quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1,
810 "ICH6 GPIO");
811 }
812
813 static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
814 const char *name, int dynsize)
815 {
816 u32 val;
817 u32 size, base;
818
819 pci_read_config_dword(dev, reg, &val);
820
821
822 if (!(val & 1))
823 return;
824 base = val & 0xfffc;
825 if (dynsize) {
826
827
828
829
830
831
832 size = 16;
833 } else {
834 size = 128;
835 }
836 base &= ~(size-1);
837
838
839
840
841
842 pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1);
843 }
844
845 static void quirk_ich6_lpc(struct pci_dev *dev)
846 {
847
848 ich6_lpc_acpi_gpio(dev);
849
850
851 ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0);
852 ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1);
853 }
854 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc);
855 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc);
856
857 static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned reg,
858 const char *name)
859 {
860 u32 val;
861 u32 mask, base;
862
863 pci_read_config_dword(dev, reg, &val);
864
865
866 if (!(val & 1))
867 return;
868
869
870 base = val & 0xfffc;
871 mask = (val >> 16) & 0xfc;
872 mask |= 3;
873
874
875
876
877
878 pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask);
879 }
880
881
882 static void quirk_ich7_lpc(struct pci_dev *dev)
883 {
884
885 ich6_lpc_acpi_gpio(dev);
886
887
888 ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1");
889 ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2");
890 ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3");
891 ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4");
892 }
893 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc);
894 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc);
895 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc);
896 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc);
897 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc);
898 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc);
899 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc);
900 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc);
901 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc);
902 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc);
903 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc);
904 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc);
905 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc);
906
907
908
909
910
911 static void quirk_vt82c586_acpi(struct pci_dev *dev)
912 {
913 if (dev->revision & 0x10)
914 quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES,
915 "vt82c586 ACPI");
916 }
917 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi);
918
919
920
921
922
923
924
925 static void quirk_vt82c686_acpi(struct pci_dev *dev)
926 {
927 quirk_vt82c586_acpi(dev);
928
929 quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1,
930 "vt82c686 HW-mon");
931
932 quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB");
933 }
934 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi);
935
936
937
938
939
940
941 static void quirk_vt8235_acpi(struct pci_dev *dev)
942 {
943 quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM");
944 quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB");
945 }
946 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi);
947
948
949
950
951
952 static void quirk_xio2000a(struct pci_dev *dev)
953 {
954 struct pci_dev *pdev;
955 u16 command;
956
957 pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n");
958 list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) {
959 pci_read_config_word(pdev, PCI_COMMAND, &command);
960 if (command & PCI_COMMAND_FAST_BACK)
961 pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK);
962 }
963 }
964 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A,
965 quirk_xio2000a);
966
967 #ifdef CONFIG_X86_IO_APIC
968
969 #include <asm/io_apic.h>
970
971
972
973
974
975
976
977
978 static void quirk_via_ioapic(struct pci_dev *dev)
979 {
980 u8 tmp;
981
982 if (nr_ioapics < 1)
983 tmp = 0;
984 else
985 tmp = 0x1f;
986
987 pci_info(dev, "%sbling VIA external APIC routing\n",
988 tmp == 0 ? "Disa" : "Ena");
989
990
991 pci_write_config_byte(dev, 0x58, tmp);
992 }
993 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
994 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic);
995
996
997
998
999
1000
1001
1002 static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev)
1003 {
1004 u8 misc_control2;
1005 #define BYPASS_APIC_DEASSERT 8
1006
1007 pci_read_config_byte(dev, 0x5B, &misc_control2);
1008 if (!(misc_control2 & BYPASS_APIC_DEASSERT)) {
1009 pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n");
1010 pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT);
1011 }
1012 }
1013 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1014 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert);
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 static void quirk_amd_ioapic(struct pci_dev *dev)
1026 {
1027 if (dev->revision >= 0x02) {
1028 pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n");
1029 pci_warn(dev, " : booting with the \"noapic\" option\n");
1030 }
1031 }
1032 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic);
1033 #endif
1034
1035 #if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS)
1036
1037 static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev)
1038 {
1039
1040 if (dev->subsystem_device == 0xa118)
1041 dev->sriov->link = dev->devfn;
1042 }
1043 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link);
1044 #endif
1045
1046
1047
1048
1049
1050 static void quirk_amd_8131_mmrbc(struct pci_dev *dev)
1051 {
1052 if (dev->subordinate && dev->revision <= 0x12) {
1053 pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n",
1054 dev->revision);
1055 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC;
1056 }
1057 }
1058 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc);
1059
1060
1061
1062
1063
1064
1065
1066
1067 static void quirk_via_acpi(struct pci_dev *d)
1068 {
1069 u8 irq;
1070
1071
1072 pci_read_config_byte(d, 0x42, &irq);
1073 irq &= 0xf;
1074 if (irq && (irq != 2))
1075 d->irq = irq;
1076 }
1077 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi);
1078 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi);
1079
1080
1081 static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18;
1082
1083 static void quirk_via_bridge(struct pci_dev *dev)
1084 {
1085
1086 switch (dev->device) {
1087 case PCI_DEVICE_ID_VIA_82C686:
1088
1089
1090
1091
1092
1093 via_vlink_dev_lo = PCI_SLOT(dev->devfn);
1094 via_vlink_dev_hi = PCI_SLOT(dev->devfn);
1095 break;
1096 case PCI_DEVICE_ID_VIA_8237:
1097 case PCI_DEVICE_ID_VIA_8237A:
1098 via_vlink_dev_lo = 15;
1099 break;
1100 case PCI_DEVICE_ID_VIA_8235:
1101 via_vlink_dev_lo = 16;
1102 break;
1103 case PCI_DEVICE_ID_VIA_8231:
1104 case PCI_DEVICE_ID_VIA_8233_0:
1105 case PCI_DEVICE_ID_VIA_8233A:
1106 case PCI_DEVICE_ID_VIA_8233C_0:
1107 via_vlink_dev_lo = 17;
1108 break;
1109 }
1110 }
1111 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge);
1112 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge);
1113 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge);
1114 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge);
1115 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge);
1116 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge);
1117 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge);
1118 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge);
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131 static void quirk_via_vlink(struct pci_dev *dev)
1132 {
1133 u8 irq, new_irq;
1134
1135
1136 if (via_vlink_dev_lo == -1)
1137 return;
1138
1139 new_irq = dev->irq;
1140
1141
1142 if (!new_irq || new_irq > 15)
1143 return;
1144
1145
1146 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi ||
1147 PCI_SLOT(dev->devfn) < via_vlink_dev_lo)
1148 return;
1149
1150
1151
1152
1153
1154 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1155 if (new_irq != irq) {
1156 pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n",
1157 irq, new_irq);
1158 udelay(15);
1159 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq);
1160 }
1161 }
1162 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink);
1163
1164
1165
1166
1167
1168
1169 static void quirk_vt82c598_id(struct pci_dev *dev)
1170 {
1171 pci_write_config_byte(dev, 0xfc, 0);
1172 pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device);
1173 }
1174 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id);
1175
1176
1177
1178
1179
1180
1181
1182 static void quirk_cardbus_legacy(struct pci_dev *dev)
1183 {
1184 pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0);
1185 }
1186 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
1187 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1188 DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID,
1189 PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy);
1190
1191
1192
1193
1194
1195
1196
1197
1198 static void quirk_amd_ordering(struct pci_dev *dev)
1199 {
1200 u32 pcic;
1201 pci_read_config_dword(dev, 0x4C, &pcic);
1202 if ((pcic & 6) != 6) {
1203 pcic |= 6;
1204 pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n");
1205 pci_write_config_dword(dev, 0x4C, pcic);
1206 pci_read_config_dword(dev, 0x84, &pcic);
1207 pcic |= (1 << 23);
1208 pci_write_config_dword(dev, 0x84, pcic);
1209 }
1210 }
1211 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1212 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering);
1213
1214
1215
1216
1217
1218
1219
1220
1221 static void quirk_dunord(struct pci_dev *dev)
1222 {
1223 struct resource *r = &dev->resource[1];
1224
1225 r->flags |= IORESOURCE_UNSET;
1226 r->start = 0;
1227 r->end = 0xffffff;
1228 }
1229 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord);
1230
1231
1232
1233
1234
1235
1236 static void quirk_transparent_bridge(struct pci_dev *dev)
1237 {
1238 dev->transparent = 1;
1239 }
1240 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge);
1241 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge);
1242
1243
1244
1245
1246
1247
1248
1249 static void quirk_mediagx_master(struct pci_dev *dev)
1250 {
1251 u8 reg;
1252
1253 pci_read_config_byte(dev, 0x41, ®);
1254 if (reg & 2) {
1255 reg &= ~2;
1256 pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n",
1257 reg);
1258 pci_write_config_byte(dev, 0x41, reg);
1259 }
1260 }
1261 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1262 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master);
1263
1264
1265
1266
1267
1268
1269 static void quirk_disable_pxb(struct pci_dev *pdev)
1270 {
1271 u16 config;
1272
1273 if (pdev->revision != 0x04)
1274 return;
1275 pci_read_config_word(pdev, 0x40, &config);
1276 if (config & (1<<6)) {
1277 config &= ~(1<<6);
1278 pci_write_config_word(pdev, 0x40, config);
1279 pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n");
1280 }
1281 }
1282 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1283 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb);
1284
1285 static void quirk_amd_ide_mode(struct pci_dev *pdev)
1286 {
1287
1288 u8 tmp;
1289
1290 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp);
1291 if (tmp == 0x01) {
1292 pci_read_config_byte(pdev, 0x40, &tmp);
1293 pci_write_config_byte(pdev, 0x40, tmp|1);
1294 pci_write_config_byte(pdev, 0x9, 1);
1295 pci_write_config_byte(pdev, 0xa, 6);
1296 pci_write_config_byte(pdev, 0x40, tmp);
1297
1298 pdev->class = PCI_CLASS_STORAGE_SATA_AHCI;
1299 pci_info(pdev, "set SATA to AHCI mode\n");
1300 }
1301 }
1302 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1303 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode);
1304 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1305 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode);
1306 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1307 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode);
1308 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1309 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode);
1310
1311
1312 static void quirk_svwks_csb5ide(struct pci_dev *pdev)
1313 {
1314 u8 prog;
1315 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1316 if (prog & 5) {
1317 prog &= ~5;
1318 pdev->class &= ~5;
1319 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1320
1321 }
1322 }
1323 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide);
1324
1325
1326 static void quirk_ide_samemode(struct pci_dev *pdev)
1327 {
1328 u8 prog;
1329
1330 pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
1331
1332 if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) {
1333 pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n");
1334 prog &= ~5;
1335 pdev->class &= ~5;
1336 pci_write_config_byte(pdev, PCI_CLASS_PROG, prog);
1337 }
1338 }
1339 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode);
1340
1341
1342 static void quirk_no_ata_d3(struct pci_dev *pdev)
1343 {
1344 pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3;
1345 }
1346
1347 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID,
1348 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1349 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
1350 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1351
1352 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID,
1353 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1354
1355
1356 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
1357 PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3);
1358
1359
1360
1361
1362
1363 static void quirk_eisa_bridge(struct pci_dev *dev)
1364 {
1365 dev->class = PCI_CLASS_BRIDGE_EISA << 8;
1366 }
1367 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge);
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394 static int asus_hides_smbus;
1395
1396 static void asus_hides_smbus_hostbridge(struct pci_dev *dev)
1397 {
1398 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1399 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
1400 switch (dev->subsystem_device) {
1401 case 0x8025:
1402 case 0x8070:
1403 case 0x8088:
1404 case 0x1626:
1405 asus_hides_smbus = 1;
1406 }
1407 else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB)
1408 switch (dev->subsystem_device) {
1409 case 0x80b1:
1410 case 0x80b2:
1411 case 0x8093:
1412 asus_hides_smbus = 1;
1413 }
1414 else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB)
1415 switch (dev->subsystem_device) {
1416 case 0x8030:
1417 asus_hides_smbus = 1;
1418 }
1419 else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0)
1420 switch (dev->subsystem_device) {
1421 case 0x8070:
1422 asus_hides_smbus = 1;
1423 }
1424 else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH)
1425 switch (dev->subsystem_device) {
1426 case 0x80c9:
1427 asus_hides_smbus = 1;
1428 }
1429 else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB)
1430 switch (dev->subsystem_device) {
1431 case 0x1751:
1432 case 0x1821:
1433 case 0x1897:
1434 asus_hides_smbus = 1;
1435 }
1436 else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1437 switch (dev->subsystem_device) {
1438 case 0x184b:
1439 case 0x186a:
1440 asus_hides_smbus = 1;
1441 }
1442 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1443 switch (dev->subsystem_device) {
1444 case 0x80f2:
1445 asus_hides_smbus = 1;
1446 }
1447 else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB)
1448 switch (dev->subsystem_device) {
1449 case 0x1882:
1450 case 0x1977:
1451 asus_hides_smbus = 1;
1452 }
1453 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
1454 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1455 switch (dev->subsystem_device) {
1456 case 0x088C:
1457 case 0x0890:
1458 asus_hides_smbus = 1;
1459 }
1460 else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB)
1461 switch (dev->subsystem_device) {
1462 case 0x12bc:
1463 case 0x12bd:
1464 case 0x006a:
1465 asus_hides_smbus = 1;
1466 }
1467 else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB)
1468 switch (dev->subsystem_device) {
1469 case 0x12bf:
1470 asus_hides_smbus = 1;
1471 }
1472 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) {
1473 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1474 switch (dev->subsystem_device) {
1475 case 0xC00C:
1476 asus_hides_smbus = 1;
1477 }
1478 } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) {
1479 if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB)
1480 switch (dev->subsystem_device) {
1481 case 0x0058:
1482 asus_hides_smbus = 1;
1483 }
1484 else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3)
1485 switch (dev->subsystem_device) {
1486 case 0xB16C:
1487
1488
1489
1490 asus_hides_smbus = 1;
1491 }
1492 else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2)
1493 switch (dev->subsystem_device) {
1494 case 0x00b8:
1495 case 0x00b9:
1496 case 0x00ba:
1497
1498
1499
1500
1501
1502 asus_hides_smbus = 1;
1503 }
1504 else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC)
1505 switch (dev->subsystem_device) {
1506 case 0x001A:
1507
1508
1509
1510 asus_hides_smbus = 1;
1511 }
1512 }
1513 }
1514 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge);
1515 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge);
1516 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge);
1517 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge);
1518 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge);
1519 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge);
1520 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge);
1521 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge);
1522 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge);
1523 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge);
1524
1525 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge);
1526 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge);
1527 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge);
1528
1529 static void asus_hides_smbus_lpc(struct pci_dev *dev)
1530 {
1531 u16 val;
1532
1533 if (likely(!asus_hides_smbus))
1534 return;
1535
1536 pci_read_config_word(dev, 0xF2, &val);
1537 if (val & 0x8) {
1538 pci_write_config_word(dev, 0xF2, val & (~0x8));
1539 pci_read_config_word(dev, 0xF2, &val);
1540 if (val & 0x8)
1541 pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n",
1542 val);
1543 else
1544 pci_info(dev, "Enabled i801 SMBus device\n");
1545 }
1546 }
1547 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1548 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1549 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1550 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1551 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1552 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1553 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1554 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc);
1555 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc);
1556 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc);
1557 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc);
1558 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc);
1559 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc);
1560 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc);
1561
1562
1563 static void __iomem *asus_rcba_base;
1564 static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev)
1565 {
1566 u32 rcba;
1567
1568 if (likely(!asus_hides_smbus))
1569 return;
1570 WARN_ON(asus_rcba_base);
1571
1572 pci_read_config_dword(dev, 0xF0, &rcba);
1573
1574 asus_rcba_base = ioremap_nocache(rcba & 0xFFFFC000, 0x4000);
1575 if (asus_rcba_base == NULL)
1576 return;
1577 }
1578
1579 static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev)
1580 {
1581 u32 val;
1582
1583 if (likely(!asus_hides_smbus || !asus_rcba_base))
1584 return;
1585
1586
1587 val = readl(asus_rcba_base + 0x3418);
1588
1589
1590 writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418);
1591 }
1592
1593 static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev)
1594 {
1595 if (likely(!asus_hides_smbus || !asus_rcba_base))
1596 return;
1597
1598 iounmap(asus_rcba_base);
1599 asus_rcba_base = NULL;
1600 pci_info(dev, "Enabled ICH6/i801 SMBus device\n");
1601 }
1602
1603 static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev)
1604 {
1605 asus_hides_smbus_lpc_ich6_suspend(dev);
1606 asus_hides_smbus_lpc_ich6_resume_early(dev);
1607 asus_hides_smbus_lpc_ich6_resume(dev);
1608 }
1609 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6);
1610 DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend);
1611 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume);
1612 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early);
1613
1614
1615 static void quirk_sis_96x_smbus(struct pci_dev *dev)
1616 {
1617 u8 val = 0;
1618 pci_read_config_byte(dev, 0x77, &val);
1619 if (val & 0x10) {
1620 pci_info(dev, "Enabling SiS 96x SMBus\n");
1621 pci_write_config_byte(dev, 0x77, val & ~0x10);
1622 }
1623 }
1624 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1625 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1626 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1627 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1628 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus);
1629 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus);
1630 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus);
1631 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus);
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641 #define SIS_DETECT_REGISTER 0x40
1642
1643 static void quirk_sis_503(struct pci_dev *dev)
1644 {
1645 u8 reg;
1646 u16 devid;
1647
1648 pci_read_config_byte(dev, SIS_DETECT_REGISTER, ®);
1649 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6));
1650 pci_read_config_word(dev, PCI_DEVICE_ID, &devid);
1651 if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) {
1652 pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg);
1653 return;
1654 }
1655
1656
1657
1658
1659
1660
1661 dev->device = devid;
1662 quirk_sis_96x_smbus(dev);
1663 }
1664 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1665 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503);
1666
1667
1668
1669
1670
1671
1672
1673 static void asus_hides_ac97_lpc(struct pci_dev *dev)
1674 {
1675 u8 val;
1676 int asus_hides_ac97 = 0;
1677
1678 if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
1679 if (dev->device == PCI_DEVICE_ID_VIA_8237)
1680 asus_hides_ac97 = 1;
1681 }
1682
1683 if (!asus_hides_ac97)
1684 return;
1685
1686 pci_read_config_byte(dev, 0x50, &val);
1687 if (val & 0xc0) {
1688 pci_write_config_byte(dev, 0x50, val & (~0xc0));
1689 pci_read_config_byte(dev, 0x50, &val);
1690 if (val & 0xc0)
1691 pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n",
1692 val);
1693 else
1694 pci_info(dev, "Enabled onboard AC97/MC97 devices\n");
1695 }
1696 }
1697 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1698 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc);
1699
1700 #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
1701
1702
1703
1704
1705
1706 static void quirk_jmicron_ata(struct pci_dev *pdev)
1707 {
1708 u32 conf1, conf5, class;
1709 u8 hdr;
1710
1711
1712 if (PCI_FUNC(pdev->devfn))
1713 return;
1714
1715 pci_read_config_dword(pdev, 0x40, &conf1);
1716 pci_read_config_dword(pdev, 0x80, &conf5);
1717
1718 conf1 &= ~0x00CFF302;
1719 conf5 &= ~(1 << 24);
1720
1721 switch (pdev->device) {
1722 case PCI_DEVICE_ID_JMICRON_JMB360:
1723 case PCI_DEVICE_ID_JMICRON_JMB362:
1724 case PCI_DEVICE_ID_JMICRON_JMB364:
1725
1726 conf1 |= 0x0002A100;
1727 break;
1728
1729 case PCI_DEVICE_ID_JMICRON_JMB365:
1730 case PCI_DEVICE_ID_JMICRON_JMB366:
1731
1732 conf5 |= (1 << 24);
1733
1734 case PCI_DEVICE_ID_JMICRON_JMB361:
1735 case PCI_DEVICE_ID_JMICRON_JMB363:
1736 case PCI_DEVICE_ID_JMICRON_JMB369:
1737
1738
1739 conf1 |= 0x00C2A1B3;
1740 break;
1741
1742 case PCI_DEVICE_ID_JMICRON_JMB368:
1743
1744 conf1 |= 0x00C00000;
1745 break;
1746 }
1747
1748 pci_write_config_dword(pdev, 0x40, conf1);
1749 pci_write_config_dword(pdev, 0x80, conf5);
1750
1751
1752 pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr);
1753 pdev->hdr_type = hdr & 0x7f;
1754 pdev->multifunction = !!(hdr & 0x80);
1755
1756 pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class);
1757 pdev->class = class >> 8;
1758 }
1759 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1760 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1761 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1762 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1763 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1764 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1765 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1766 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1767 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1768 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata);
1769 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata);
1770 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata);
1771 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata);
1772 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata);
1773 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata);
1774 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata);
1775 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata);
1776 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata);
1777
1778 #endif
1779
1780 static void quirk_jmicron_async_suspend(struct pci_dev *dev)
1781 {
1782 if (dev->multifunction) {
1783 device_disable_async_suspend(&dev->dev);
1784 pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n");
1785 }
1786 }
1787 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend);
1788 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend);
1789 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend);
1790 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend);
1791
1792 #ifdef CONFIG_X86_IO_APIC
1793 static void quirk_alder_ioapic(struct pci_dev *pdev)
1794 {
1795 int i;
1796
1797 if ((pdev->class >> 8) != 0xff00)
1798 return;
1799
1800
1801
1802
1803
1804
1805 if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0))
1806 insert_resource(&iomem_resource, &pdev->resource[0]);
1807
1808
1809
1810
1811
1812 for (i = 1; i < 6; i++)
1813 memset(&pdev->resource[i], 0, sizeof(pdev->resource[i]));
1814 }
1815 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic);
1816 #endif
1817
1818 static void quirk_pcie_mch(struct pci_dev *pdev)
1819 {
1820 pdev->no_msi = 1;
1821 }
1822 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch);
1823 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch);
1824 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch);
1825
1826 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch);
1827
1828
1829
1830
1831
1832 static void quirk_pcie_pxh(struct pci_dev *dev)
1833 {
1834 dev->no_msi = 1;
1835 pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n");
1836 }
1837 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh);
1838 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh);
1839 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh);
1840 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh);
1841 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh);
1842
1843
1844
1845
1846
1847 static void quirk_intel_pcie_pm(struct pci_dev *dev)
1848 {
1849 pci_pm_d3_delay = 120;
1850 dev->no_d1d2 = 1;
1851 }
1852 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
1853 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm);
1854 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm);
1855 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm);
1856 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm);
1857 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm);
1858 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm);
1859 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm);
1860 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm);
1861 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm);
1862 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm);
1863 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm);
1864 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm);
1865 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm);
1866 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm);
1867 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm);
1868 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm);
1869 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm);
1870 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm);
1871 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm);
1872 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
1873
1874 static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
1875 {
1876 if (dev->d3_delay >= delay)
1877 return;
1878
1879 dev->d3_delay = delay;
1880 pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
1881 dev->d3_delay);
1882 }
1883
1884 static void quirk_radeon_pm(struct pci_dev *dev)
1885 {
1886 if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1887 dev->subsystem_device == 0x00e2)
1888 quirk_d3hot_delay(dev, 20);
1889 }
1890 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm);
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901 static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1902 {
1903 quirk_d3hot_delay(dev, 20);
1904 }
1905 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1906 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1907
1908 #ifdef CONFIG_X86_IO_APIC
1909 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1910 {
1911 noioapicreroute = 1;
1912 pr_info("%s detected: disable boot interrupt reroute\n", d->ident);
1913
1914 return 0;
1915 }
1916
1917 static const struct dmi_system_id boot_interrupt_dmi_table[] = {
1918
1919
1920
1921 {
1922 .callback = dmi_disable_ioapicreroute,
1923 .ident = "ASUSTek Computer INC. M2N-LR",
1924 .matches = {
1925 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."),
1926 DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"),
1927 },
1928 },
1929 {}
1930 };
1931
1932
1933
1934
1935
1936
1937
1938 static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev)
1939 {
1940 dmi_check_system(boot_interrupt_dmi_table);
1941 if (noioapicquirk || noioapicreroute)
1942 return;
1943
1944 dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT;
1945 pci_info(dev, "rerouting interrupts for [%04x:%04x]\n",
1946 dev->vendor, dev->device);
1947 }
1948 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1949 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1950 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1951 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1952 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1953 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1954 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1955 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1956 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel);
1957 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel);
1958 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel);
1959 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel);
1960 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel);
1961 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel);
1962 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel);
1963 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel);
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984 #define INTEL_6300_IOAPIC_ABAR 0x40
1985 #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14)
1986
1987 #define INTEL_CIPINTRC_CFG_OFFSET 0x14C
1988 #define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25)
1989
1990 static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev)
1991 {
1992 u16 pci_config_word;
1993 u32 pci_config_dword;
1994
1995 if (noioapicquirk)
1996 return;
1997
1998 switch (dev->device) {
1999 case PCI_DEVICE_ID_INTEL_ESB_10:
2000 pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2001 &pci_config_word);
2002 pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ;
2003 pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR,
2004 pci_config_word);
2005 break;
2006 case 0x3c28:
2007 case 0x0e28:
2008 case 0x2f28:
2009 case 0x6f28:
2010 case 0x2034:
2011 pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2012 &pci_config_dword);
2013 pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH;
2014 pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET,
2015 pci_config_dword);
2016 break;
2017 default:
2018 return;
2019 }
2020 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2021 dev->vendor, dev->device);
2022 }
2023
2024
2025
2026
2027 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2028 quirk_disable_intel_boot_interrupt);
2029 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10,
2030 quirk_disable_intel_boot_interrupt);
2031
2032
2033
2034
2035
2036
2037
2038
2039 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28,
2040 quirk_disable_intel_boot_interrupt);
2041 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28,
2042 quirk_disable_intel_boot_interrupt);
2043 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28,
2044 quirk_disable_intel_boot_interrupt);
2045 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28,
2046 quirk_disable_intel_boot_interrupt);
2047 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034,
2048 quirk_disable_intel_boot_interrupt);
2049 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28,
2050 quirk_disable_intel_boot_interrupt);
2051 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28,
2052 quirk_disable_intel_boot_interrupt);
2053 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28,
2054 quirk_disable_intel_boot_interrupt);
2055 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28,
2056 quirk_disable_intel_boot_interrupt);
2057 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034,
2058 quirk_disable_intel_boot_interrupt);
2059
2060
2061 #define BC_HT1000_FEATURE_REG 0x64
2062 #define BC_HT1000_PIC_REGS_ENABLE (1<<0)
2063 #define BC_HT1000_MAP_IDX 0xC00
2064 #define BC_HT1000_MAP_DATA 0xC01
2065
2066 static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev)
2067 {
2068 u32 pci_config_dword;
2069 u8 irq;
2070
2071 if (noioapicquirk)
2072 return;
2073
2074 pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword);
2075 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword |
2076 BC_HT1000_PIC_REGS_ENABLE);
2077
2078 for (irq = 0x10; irq < 0x10 + 32; irq++) {
2079 outb(irq, BC_HT1000_MAP_IDX);
2080 outb(0x00, BC_HT1000_MAP_DATA);
2081 }
2082
2083 pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword);
2084
2085 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2086 dev->vendor, dev->device);
2087 }
2088 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2089 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt);
2090
2091
2092
2093
2094
2095
2096
2097
2098 #define AMD_813X_MISC 0x40
2099 #define AMD_813X_NOIOAMODE (1<<0)
2100 #define AMD_813X_REV_B1 0x12
2101 #define AMD_813X_REV_B2 0x13
2102
2103 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev)
2104 {
2105 u32 pci_config_dword;
2106
2107 if (noioapicquirk)
2108 return;
2109 if ((dev->revision == AMD_813X_REV_B1) ||
2110 (dev->revision == AMD_813X_REV_B2))
2111 return;
2112
2113 pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword);
2114 pci_config_dword &= ~AMD_813X_NOIOAMODE;
2115 pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword);
2116
2117 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2118 dev->vendor, dev->device);
2119 }
2120 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2121 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2122 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2123 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt);
2124
2125 #define AMD_8111_PCI_IRQ_ROUTING 0x56
2126
2127 static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev)
2128 {
2129 u16 pci_config_word;
2130
2131 if (noioapicquirk)
2132 return;
2133
2134 pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word);
2135 if (!pci_config_word) {
2136 pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n",
2137 dev->vendor, dev->device);
2138 return;
2139 }
2140 pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0);
2141 pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n",
2142 dev->vendor, dev->device);
2143 }
2144 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2145 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt);
2146 #endif
2147
2148
2149
2150
2151
2152
2153 static void quirk_tc86c001_ide(struct pci_dev *dev)
2154 {
2155 struct resource *r = &dev->resource[0];
2156
2157 if (r->start & 0x8) {
2158 r->flags |= IORESOURCE_UNSET;
2159 r->start = 0;
2160 r->end = 0xf;
2161 }
2162 }
2163 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
2164 PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE,
2165 quirk_tc86c001_ide);
2166
2167
2168
2169
2170
2171
2172
2173
2174 static void quirk_plx_pci9050(struct pci_dev *dev)
2175 {
2176 unsigned int bar;
2177
2178
2179 if (dev->revision >= 2)
2180 return;
2181 for (bar = 0; bar <= 1; bar++)
2182 if (pci_resource_len(dev, bar) == 0x80 &&
2183 (pci_resource_start(dev, bar) & 0x80)) {
2184 struct resource *r = &dev->resource[bar];
2185 pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n",
2186 bar);
2187 r->flags |= IORESOURCE_UNSET;
2188 r->start = 0;
2189 r->end = 0xff;
2190 }
2191 }
2192 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
2193 quirk_plx_pci9050);
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203 DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050);
2204 DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050);
2205
2206 static void quirk_netmos(struct pci_dev *dev)
2207 {
2208 unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4;
2209 unsigned int num_serial = dev->subsystem_device & 0xf;
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221 switch (dev->device) {
2222 case PCI_DEVICE_ID_NETMOS_9835:
2223
2224 if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
2225 dev->subsystem_device == 0x0299)
2226 return;
2227
2228 case PCI_DEVICE_ID_NETMOS_9735:
2229 case PCI_DEVICE_ID_NETMOS_9745:
2230 case PCI_DEVICE_ID_NETMOS_9845:
2231 case PCI_DEVICE_ID_NETMOS_9855:
2232 if (num_parallel) {
2233 pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n",
2234 dev->device, num_parallel, num_serial);
2235 dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) |
2236 (dev->class & 0xff);
2237 }
2238 }
2239 }
2240 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
2241 PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
2242
2243 static void quirk_e100_interrupt(struct pci_dev *dev)
2244 {
2245 u16 command, pmcsr;
2246 u8 __iomem *csr;
2247 u8 cmd_hi;
2248
2249 switch (dev->device) {
2250
2251 case 0x1029:
2252 case 0x1030 ... 0x1034:
2253 case 0x1038 ... 0x103E:
2254 case 0x1050 ... 0x1057:
2255 case 0x1059:
2256 case 0x1064 ... 0x106B:
2257 case 0x1091 ... 0x1095:
2258 case 0x1209:
2259 case 0x1229:
2260 case 0x2449:
2261 case 0x2459:
2262 case 0x245D:
2263 case 0x27DC:
2264 break;
2265 default:
2266 return;
2267 }
2268
2269
2270
2271
2272
2273
2274
2275
2276 pci_read_config_word(dev, PCI_COMMAND, &command);
2277
2278 if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0))
2279 return;
2280
2281
2282
2283
2284
2285 if (dev->pm_cap) {
2286 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2287 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0)
2288 return;
2289 }
2290
2291
2292 csr = ioremap(pci_resource_start(dev, 0), 8);
2293 if (!csr) {
2294 pci_warn(dev, "Can't map e100 registers\n");
2295 return;
2296 }
2297
2298 cmd_hi = readb(csr + 3);
2299 if (cmd_hi == 0) {
2300 pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n");
2301 writeb(1, csr + 3);
2302 }
2303
2304 iounmap(csr);
2305 }
2306 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
2307 PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt);
2308
2309
2310
2311
2312
2313 static void quirk_disable_aspm_l0s(struct pci_dev *dev)
2314 {
2315 pci_info(dev, "Disabling L0s\n");
2316 pci_disable_link_state(dev, PCIE_LINK_STATE_L0S);
2317 }
2318 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s);
2319 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s);
2320 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s);
2321 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s);
2322 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s);
2323 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s);
2324 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s);
2325 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s);
2326 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s);
2327 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s);
2328 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s);
2329 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s);
2330 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s);
2331 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s);
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 static void quirk_enable_clear_retrain_link(struct pci_dev *dev)
2342 {
2343 dev->clear_retrain_link = 1;
2344 pci_info(dev, "Enable PCIe Retrain Link quirk\n");
2345 }
2346 DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link);
2347 DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link);
2348 DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link);
2349
2350 static void fixup_rev1_53c810(struct pci_dev *dev)
2351 {
2352 u32 class = dev->class;
2353
2354
2355
2356
2357
2358 if (class)
2359 return;
2360
2361 dev->class = PCI_CLASS_STORAGE_SCSI << 8;
2362 pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n",
2363 class, dev->class);
2364 }
2365 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
2366
2367
2368 static void quirk_p64h2_1k_io(struct pci_dev *dev)
2369 {
2370 u16 en1k;
2371
2372 pci_read_config_word(dev, 0x40, &en1k);
2373
2374 if (en1k & 0x200) {
2375 pci_info(dev, "Enable I/O Space to 1KB granularity\n");
2376 dev->io_window_1k = 1;
2377 }
2378 }
2379 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io);
2380
2381
2382
2383
2384
2385
2386 static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev)
2387 {
2388 uint8_t b;
2389
2390 if (pci_read_config_byte(dev, 0xf41, &b) == 0) {
2391 if (!(b & 0x20)) {
2392 pci_write_config_byte(dev, 0xf41, b | 0x20);
2393 pci_info(dev, "Linking AER extended capability\n");
2394 }
2395 }
2396 }
2397 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2398 quirk_nvidia_ck804_pcie_aer_ext_cap);
2399 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2400 quirk_nvidia_ck804_pcie_aer_ext_cap);
2401
2402 static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
2403 {
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415 struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
2416 PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
2417 uint8_t b;
2418
2419
2420
2421
2422
2423 p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
2424 if (!p)
2425 return;
2426 pci_dev_put(p);
2427
2428 if (pci_read_config_byte(dev, 0x76, &b) == 0) {
2429 if (b & 0x40) {
2430
2431 pci_write_config_byte(dev, 0x76, b ^ 0x40);
2432
2433 pci_info(dev, "Disabling VIA CX700 PCI parking\n");
2434 }
2435 }
2436
2437 if (pci_read_config_byte(dev, 0x72, &b) == 0) {
2438 if (b != 0) {
2439
2440 pci_write_config_byte(dev, 0x72, 0x0);
2441
2442
2443 pci_write_config_byte(dev, 0x75, 0x1);
2444
2445
2446 pci_write_config_byte(dev, 0x77, 0x0);
2447
2448 pci_info(dev, "Disabling VIA CX700 PCI caching\n");
2449 }
2450 }
2451 }
2452 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
2453
2454 static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev)
2455 {
2456 u32 rev;
2457
2458 pci_read_config_dword(dev, 0xf4, &rev);
2459
2460
2461 if (rev == 0x05719000) {
2462 int readrq = pcie_get_readrq(dev);
2463 if (readrq > 2048)
2464 pcie_set_readrq(dev, 2048);
2465 }
2466 }
2467 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM,
2468 PCI_DEVICE_ID_TIGON3_5719,
2469 quirk_brcm_5719_limit_mrrs);
2470
2471
2472
2473
2474
2475
2476
2477 static void quirk_unhide_mch_dev6(struct pci_dev *dev)
2478 {
2479 u8 reg;
2480
2481 if (pci_read_config_byte(dev, 0xF4, ®) == 0 && !(reg & 0x02)) {
2482 pci_info(dev, "Enabling MCH 'Overflow' Device\n");
2483 pci_write_config_byte(dev, 0xF4, reg | 0x02);
2484 }
2485 }
2486 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2487 quirk_unhide_mch_dev6);
2488 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2489 quirk_unhide_mch_dev6);
2490
2491 #ifdef CONFIG_PCI_MSI
2492
2493
2494
2495
2496
2497
2498
2499 static void quirk_disable_all_msi(struct pci_dev *dev)
2500 {
2501 pci_no_msi();
2502 pci_warn(dev, "MSI quirk detected; MSI disabled\n");
2503 }
2504 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi);
2505 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi);
2506 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi);
2507 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi);
2508 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi);
2509 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi);
2510 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi);
2511 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi);
2512
2513
2514 static void quirk_disable_msi(struct pci_dev *dev)
2515 {
2516 if (dev->subordinate) {
2517 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2518 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2519 }
2520 }
2521 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
2522 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
2523 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi);
2524
2525
2526
2527
2528
2529
2530
2531 static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge)
2532 {
2533 struct pci_dev *apc_bridge;
2534
2535 apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0));
2536 if (apc_bridge) {
2537 if (apc_bridge->device == 0x9602)
2538 quirk_disable_msi(apc_bridge);
2539 pci_dev_put(apc_bridge);
2540 }
2541 }
2542 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi);
2543 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi);
2544
2545
2546
2547
2548
2549 static int msi_ht_cap_enabled(struct pci_dev *dev)
2550 {
2551 int pos, ttl = PCI_FIND_CAP_TTL;
2552
2553 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2554 while (pos && ttl--) {
2555 u8 flags;
2556
2557 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2558 &flags) == 0) {
2559 pci_info(dev, "Found %s HT MSI Mapping\n",
2560 flags & HT_MSI_FLAGS_ENABLE ?
2561 "enabled" : "disabled");
2562 return (flags & HT_MSI_FLAGS_ENABLE) != 0;
2563 }
2564
2565 pos = pci_find_next_ht_capability(dev, pos,
2566 HT_CAPTYPE_MSI_MAPPING);
2567 }
2568 return 0;
2569 }
2570
2571
2572 static void quirk_msi_ht_cap(struct pci_dev *dev)
2573 {
2574 if (dev->subordinate && !msi_ht_cap_enabled(dev)) {
2575 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2576 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2577 }
2578 }
2579 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE,
2580 quirk_msi_ht_cap);
2581
2582
2583
2584
2585
2586 static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev)
2587 {
2588 struct pci_dev *pdev;
2589
2590 if (!dev->subordinate)
2591 return;
2592
2593
2594
2595
2596
2597 pdev = pci_get_slot(dev->bus, 0);
2598 if (!pdev)
2599 return;
2600 if (!msi_ht_cap_enabled(dev) && !msi_ht_cap_enabled(pdev)) {
2601 pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n");
2602 dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI;
2603 }
2604 pci_dev_put(pdev);
2605 }
2606 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE,
2607 quirk_nvidia_ck804_msi_ht_cap);
2608
2609
2610 static void ht_enable_msi_mapping(struct pci_dev *dev)
2611 {
2612 int pos, ttl = PCI_FIND_CAP_TTL;
2613
2614 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2615 while (pos && ttl--) {
2616 u8 flags;
2617
2618 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2619 &flags) == 0) {
2620 pci_info(dev, "Enabling HT MSI Mapping\n");
2621
2622 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2623 flags | HT_MSI_FLAGS_ENABLE);
2624 }
2625 pos = pci_find_next_ht_capability(dev, pos,
2626 HT_CAPTYPE_MSI_MAPPING);
2627 }
2628 }
2629 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS,
2630 PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB,
2631 ht_enable_msi_mapping);
2632 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE,
2633 ht_enable_msi_mapping);
2634
2635
2636
2637
2638
2639
2640 static void nvenet_msi_disable(struct pci_dev *dev)
2641 {
2642 const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
2643
2644 if (board_name &&
2645 (strstr(board_name, "P5N32-SLI PREMIUM") ||
2646 strstr(board_name, "P5N32-E SLI"))) {
2647 pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n");
2648 dev->no_msi = 1;
2649 }
2650 }
2651 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2652 PCI_DEVICE_ID_NVIDIA_NVENET_15,
2653 nvenet_msi_disable);
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664 static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev)
2665 {
2666 dev->no_msi = 1;
2667 }
2668 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0,
2669 PCI_CLASS_BRIDGE_PCI, 8,
2670 pci_quirk_nvidia_tegra_disable_rp_msi);
2671 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1,
2672 PCI_CLASS_BRIDGE_PCI, 8,
2673 pci_quirk_nvidia_tegra_disable_rp_msi);
2674 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2,
2675 PCI_CLASS_BRIDGE_PCI, 8,
2676 pci_quirk_nvidia_tegra_disable_rp_msi);
2677 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0,
2678 PCI_CLASS_BRIDGE_PCI, 8,
2679 pci_quirk_nvidia_tegra_disable_rp_msi);
2680 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1,
2681 PCI_CLASS_BRIDGE_PCI, 8,
2682 pci_quirk_nvidia_tegra_disable_rp_msi);
2683 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c,
2684 PCI_CLASS_BRIDGE_PCI, 8,
2685 pci_quirk_nvidia_tegra_disable_rp_msi);
2686 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d,
2687 PCI_CLASS_BRIDGE_PCI, 8,
2688 pci_quirk_nvidia_tegra_disable_rp_msi);
2689 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12,
2690 PCI_CLASS_BRIDGE_PCI, 8,
2691 pci_quirk_nvidia_tegra_disable_rp_msi);
2692 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13,
2693 PCI_CLASS_BRIDGE_PCI, 8,
2694 pci_quirk_nvidia_tegra_disable_rp_msi);
2695 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae,
2696 PCI_CLASS_BRIDGE_PCI, 8,
2697 pci_quirk_nvidia_tegra_disable_rp_msi);
2698 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf,
2699 PCI_CLASS_BRIDGE_PCI, 8,
2700 pci_quirk_nvidia_tegra_disable_rp_msi);
2701 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5,
2702 PCI_CLASS_BRIDGE_PCI, 8,
2703 pci_quirk_nvidia_tegra_disable_rp_msi);
2704 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6,
2705 PCI_CLASS_BRIDGE_PCI, 8,
2706 pci_quirk_nvidia_tegra_disable_rp_msi);
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718 static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev)
2719 {
2720 u32 cfg;
2721
2722 if (!pci_find_capability(dev, PCI_CAP_ID_HT))
2723 return;
2724
2725 pci_read_config_dword(dev, 0x74, &cfg);
2726
2727 if (cfg & ((1 << 2) | (1 << 15))) {
2728 pr_info("Rewriting IRQ routing register on MCP55\n");
2729 cfg &= ~((1 << 2) | (1 << 15));
2730 pci_write_config_dword(dev, 0x74, cfg);
2731 }
2732 }
2733 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2734 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0,
2735 nvbridge_check_legacy_irq_routing);
2736 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA,
2737 PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4,
2738 nvbridge_check_legacy_irq_routing);
2739
2740 static int ht_check_msi_mapping(struct pci_dev *dev)
2741 {
2742 int pos, ttl = PCI_FIND_CAP_TTL;
2743 int found = 0;
2744
2745
2746 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2747 while (pos && ttl--) {
2748 u8 flags;
2749
2750 if (found < 1)
2751 found = 1;
2752 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2753 &flags) == 0) {
2754 if (flags & HT_MSI_FLAGS_ENABLE) {
2755 if (found < 2) {
2756 found = 2;
2757 break;
2758 }
2759 }
2760 }
2761 pos = pci_find_next_ht_capability(dev, pos,
2762 HT_CAPTYPE_MSI_MAPPING);
2763 }
2764
2765 return found;
2766 }
2767
2768 static int host_bridge_with_leaf(struct pci_dev *host_bridge)
2769 {
2770 struct pci_dev *dev;
2771 int pos;
2772 int i, dev_no;
2773 int found = 0;
2774
2775 dev_no = host_bridge->devfn >> 3;
2776 for (i = dev_no + 1; i < 0x20; i++) {
2777 dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0));
2778 if (!dev)
2779 continue;
2780
2781
2782 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2783 if (pos != 0) {
2784 pci_dev_put(dev);
2785 break;
2786 }
2787
2788 if (ht_check_msi_mapping(dev)) {
2789 found = 1;
2790 pci_dev_put(dev);
2791 break;
2792 }
2793 pci_dev_put(dev);
2794 }
2795
2796 return found;
2797 }
2798
2799 #define PCI_HT_CAP_SLAVE_CTRL0 4
2800 #define PCI_HT_CAP_SLAVE_CTRL1 8
2801
2802 static int is_end_of_ht_chain(struct pci_dev *dev)
2803 {
2804 int pos, ctrl_off;
2805 int end = 0;
2806 u16 flags, ctrl;
2807
2808 pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE);
2809
2810 if (!pos)
2811 goto out;
2812
2813 pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags);
2814
2815 ctrl_off = ((flags >> 10) & 1) ?
2816 PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1;
2817 pci_read_config_word(dev, pos + ctrl_off, &ctrl);
2818
2819 if (ctrl & (1 << 6))
2820 end = 1;
2821
2822 out:
2823 return end;
2824 }
2825
2826 static void nv_ht_enable_msi_mapping(struct pci_dev *dev)
2827 {
2828 struct pci_dev *host_bridge;
2829 int pos;
2830 int i, dev_no;
2831 int found = 0;
2832
2833 dev_no = dev->devfn >> 3;
2834 for (i = dev_no; i >= 0; i--) {
2835 host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0));
2836 if (!host_bridge)
2837 continue;
2838
2839 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2840 if (pos != 0) {
2841 found = 1;
2842 break;
2843 }
2844 pci_dev_put(host_bridge);
2845 }
2846
2847 if (!found)
2848 return;
2849
2850
2851 if (host_bridge == dev && is_end_of_ht_chain(host_bridge) &&
2852 host_bridge_with_leaf(host_bridge))
2853 goto out;
2854
2855
2856 if (msi_ht_cap_enabled(host_bridge))
2857 goto out;
2858
2859 ht_enable_msi_mapping(dev);
2860
2861 out:
2862 pci_dev_put(host_bridge);
2863 }
2864
2865 static void ht_disable_msi_mapping(struct pci_dev *dev)
2866 {
2867 int pos, ttl = PCI_FIND_CAP_TTL;
2868
2869 pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING);
2870 while (pos && ttl--) {
2871 u8 flags;
2872
2873 if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS,
2874 &flags) == 0) {
2875 pci_info(dev, "Disabling HT MSI Mapping\n");
2876
2877 pci_write_config_byte(dev, pos + HT_MSI_FLAGS,
2878 flags & ~HT_MSI_FLAGS_ENABLE);
2879 }
2880 pos = pci_find_next_ht_capability(dev, pos,
2881 HT_CAPTYPE_MSI_MAPPING);
2882 }
2883 }
2884
2885 static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all)
2886 {
2887 struct pci_dev *host_bridge;
2888 int pos;
2889 int found;
2890
2891 if (!pci_msi_enabled())
2892 return;
2893
2894
2895 found = ht_check_msi_mapping(dev);
2896
2897
2898 if (found == 0)
2899 return;
2900
2901
2902
2903
2904
2905 host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0,
2906 PCI_DEVFN(0, 0));
2907 if (host_bridge == NULL) {
2908 pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n");
2909 return;
2910 }
2911
2912 pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE);
2913 if (pos != 0) {
2914
2915 if (found == 1) {
2916
2917 if (all)
2918 ht_enable_msi_mapping(dev);
2919 else
2920 nv_ht_enable_msi_mapping(dev);
2921 }
2922 goto out;
2923 }
2924
2925
2926 if (found == 1)
2927 goto out;
2928
2929
2930 ht_disable_msi_mapping(dev);
2931
2932 out:
2933 pci_dev_put(host_bridge);
2934 }
2935
2936 static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev)
2937 {
2938 return __nv_msi_ht_cap_quirk(dev, 1);
2939 }
2940 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2941 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
2942
2943 static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
2944 {
2945 return __nv_msi_ht_cap_quirk(dev, 0);
2946 }
2947 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2948 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
2949
2950 static void quirk_msi_intx_disable_bug(struct pci_dev *dev)
2951 {
2952 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2953 }
2954
2955 static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev)
2956 {
2957 struct pci_dev *p;
2958
2959
2960
2961
2962
2963
2964 p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
2965 NULL);
2966 if (!p)
2967 return;
2968
2969 if ((p->revision < 0x3B) && (p->revision >= 0x30))
2970 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2971 pci_dev_put(p);
2972 }
2973
2974 static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev)
2975 {
2976
2977 if (dev->revision < 0x18) {
2978 pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n");
2979 dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
2980 }
2981 }
2982 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2983 PCI_DEVICE_ID_TIGON3_5780,
2984 quirk_msi_intx_disable_bug);
2985 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2986 PCI_DEVICE_ID_TIGON3_5780S,
2987 quirk_msi_intx_disable_bug);
2988 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2989 PCI_DEVICE_ID_TIGON3_5714,
2990 quirk_msi_intx_disable_bug);
2991 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2992 PCI_DEVICE_ID_TIGON3_5714S,
2993 quirk_msi_intx_disable_bug);
2994 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2995 PCI_DEVICE_ID_TIGON3_5715,
2996 quirk_msi_intx_disable_bug);
2997 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM,
2998 PCI_DEVICE_ID_TIGON3_5715S,
2999 quirk_msi_intx_disable_bug);
3000
3001 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390,
3002 quirk_msi_intx_disable_ati_bug);
3003 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391,
3004 quirk_msi_intx_disable_ati_bug);
3005 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392,
3006 quirk_msi_intx_disable_ati_bug);
3007 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393,
3008 quirk_msi_intx_disable_ati_bug);
3009 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394,
3010 quirk_msi_intx_disable_ati_bug);
3011
3012 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373,
3013 quirk_msi_intx_disable_bug);
3014 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
3015 quirk_msi_intx_disable_bug);
3016 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
3017 quirk_msi_intx_disable_bug);
3018
3019 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
3020 quirk_msi_intx_disable_bug);
3021 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
3022 quirk_msi_intx_disable_bug);
3023 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
3024 quirk_msi_intx_disable_bug);
3025 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
3026 quirk_msi_intx_disable_bug);
3027 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
3028 quirk_msi_intx_disable_bug);
3029 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
3030 quirk_msi_intx_disable_bug);
3031 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090,
3032 quirk_msi_intx_disable_qca_bug);
3033 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091,
3034 quirk_msi_intx_disable_qca_bug);
3035 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0,
3036 quirk_msi_intx_disable_qca_bug);
3037 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1,
3038 quirk_msi_intx_disable_qca_bug);
3039 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091,
3040 quirk_msi_intx_disable_qca_bug);
3041
3042
3043
3044
3045
3046
3047
3048
3049
3050
3051
3052 static void quirk_al_msi_disable(struct pci_dev *dev)
3053 {
3054 dev->no_msi = 1;
3055 pci_warn(dev, "Disabling MSI/MSI-X\n");
3056 }
3057 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031,
3058 PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable);
3059 #endif
3060
3061
3062
3063
3064
3065
3066
3067
3068 static void quirk_hotplug_bridge(struct pci_dev *dev)
3069 {
3070 dev->is_hotplug_bridge = 1;
3071 }
3072 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
3073
3074
3075
3076
3077
3078
3079
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099 #ifdef CONFIG_MMC_RICOH_MMC
3100 static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
3101 {
3102 u8 write_enable;
3103 u8 write_target;
3104 u8 disable;
3105
3106
3107
3108
3109
3110
3111 if (PCI_FUNC(dev->devfn))
3112 return;
3113
3114 pci_read_config_byte(dev, 0xB7, &disable);
3115 if (disable & 0x02)
3116 return;
3117
3118 pci_read_config_byte(dev, 0x8E, &write_enable);
3119 pci_write_config_byte(dev, 0x8E, 0xAA);
3120 pci_read_config_byte(dev, 0x8D, &write_target);
3121 pci_write_config_byte(dev, 0x8D, 0xB7);
3122 pci_write_config_byte(dev, 0xB7, disable | 0x02);
3123 pci_write_config_byte(dev, 0x8E, write_enable);
3124 pci_write_config_byte(dev, 0x8D, write_target);
3125
3126 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n");
3127 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3128 }
3129 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3130 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
3131
3132 static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
3133 {
3134 u8 write_enable;
3135 u8 disable;
3136
3137
3138
3139
3140
3141
3142 if (PCI_FUNC(dev->devfn))
3143 return;
3144
3145
3146
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156 if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 ||
3157 dev->device == PCI_DEVICE_ID_RICOH_R5CE823) {
3158 pci_write_config_byte(dev, 0xf9, 0xfc);
3159 pci_write_config_byte(dev, 0x150, 0x10);
3160 pci_write_config_byte(dev, 0xf9, 0x00);
3161 pci_write_config_byte(dev, 0xfc, 0x01);
3162 pci_write_config_byte(dev, 0xe1, 0x32);
3163 pci_write_config_byte(dev, 0xfc, 0x00);
3164
3165 pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n");
3166 }
3167
3168 pci_read_config_byte(dev, 0xCB, &disable);
3169
3170 if (disable & 0x02)
3171 return;
3172
3173 pci_read_config_byte(dev, 0xCA, &write_enable);
3174 pci_write_config_byte(dev, 0xCA, 0x57);
3175 pci_write_config_byte(dev, 0xCB, disable | 0x02);
3176 pci_write_config_byte(dev, 0xCA, write_enable);
3177
3178 pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n");
3179 pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n");
3180
3181 }
3182 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3183 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
3184 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3185 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832);
3186 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3187 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832);
3188 #endif
3189
3190 #ifdef CONFIG_DMAR_TABLE
3191 #define VTUNCERRMSK_REG 0x1ac
3192 #define VTD_MSK_SPEC_ERRORS (1 << 31)
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203 static void vtd_mask_spec_errors(struct pci_dev *dev)
3204 {
3205 u32 word;
3206
3207 pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
3208 pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
3209 }
3210 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
3211 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
3212 #endif
3213
3214 static void fixup_ti816x_class(struct pci_dev *dev)
3215 {
3216 u32 class = dev->class;
3217
3218
3219 dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
3220 pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n",
3221 class, dev->class);
3222 }
3223 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800,
3224 PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class);
3225
3226
3227
3228
3229
3230 static void fixup_mpss_256(struct pci_dev *dev)
3231 {
3232 dev->pcie_mpss = 1;
3233 }
3234 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3235 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256);
3236 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3237 PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256);
3238 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SOLARFLARE,
3239 PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256);
3240
3241
3242
3243
3244
3245
3246
3247
3248
3249 static void quirk_intel_mc_errata(struct pci_dev *dev)
3250 {
3251 int err;
3252 u16 rcc;
3253
3254 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
3255 pcie_bus_config == PCIE_BUS_DEFAULT)
3256 return;
3257
3258
3259
3260
3261
3262
3263 err = pci_read_config_word(dev, 0x48, &rcc);
3264 if (err) {
3265 pci_err(dev, "Error attempting to read the read completion coalescing register\n");
3266 return;
3267 }
3268
3269 if (!(rcc & (1 << 10)))
3270 return;
3271
3272 rcc &= ~(1 << 10);
3273
3274 err = pci_write_config_word(dev, 0x48, rcc);
3275 if (err) {
3276 pci_err(dev, "Error attempting to write the read completion coalescing register\n");
3277 return;
3278 }
3279
3280 pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n");
3281 }
3282
3283 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata);
3284 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata);
3285 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata);
3286 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata);
3287 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata);
3288 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata);
3289 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata);
3290 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata);
3291 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata);
3292 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata);
3293 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata);
3294 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata);
3295 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata);
3296 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata);
3297
3298 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata);
3299 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata);
3300 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata);
3301 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata);
3302 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata);
3303 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata);
3304 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata);
3305 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata);
3306 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata);
3307 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata);
3308 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata);
3309
3310
3311
3312
3313
3314
3315 static void quirk_intel_ntb(struct pci_dev *dev)
3316 {
3317 int rc;
3318 u8 val;
3319
3320 rc = pci_read_config_byte(dev, 0x00D0, &val);
3321 if (rc)
3322 return;
3323
3324 dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1;
3325
3326 rc = pci_read_config_byte(dev, 0x00D1, &val);
3327 if (rc)
3328 return;
3329
3330 dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1;
3331 }
3332 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb);
3333 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb);
3334
3335
3336
3337
3338
3339
3340
3341
3342
3343
3344
3345
3346
3347 #define I915_DEIER_REG 0x4400c
3348 static void disable_igfx_irq(struct pci_dev *dev)
3349 {
3350 void __iomem *regs = pci_iomap(dev, 0, 0);
3351 if (regs == NULL) {
3352 pci_warn(dev, "igfx quirk: Can't iomap PCI device\n");
3353 return;
3354 }
3355
3356
3357 if (readl(regs + I915_DEIER_REG) != 0) {
3358 pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n");
3359
3360 writel(0, regs + I915_DEIER_REG);
3361 }
3362
3363 pci_iounmap(dev, regs);
3364 }
3365 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq);
3366 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq);
3367 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq);
3368 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
3369 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq);
3370 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
3371 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
3372
3373
3374
3375
3376
3377 static void quirk_remove_d3_delay(struct pci_dev *dev)
3378 {
3379 dev->d3_delay = 0;
3380 }
3381
3382 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
3383 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
3384 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
3385
3386 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
3387 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
3388 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
3389 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
3390 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
3391 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
3392 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
3393 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
3394 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
3395 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
3396 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
3397
3398 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
3399 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
3400 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
3401 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
3402 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
3403 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
3404 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
3405 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
3406 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
3407
3408
3409
3410
3411
3412
3413 static void quirk_broken_intx_masking(struct pci_dev *dev)
3414 {
3415 dev->broken_intx_masking = 1;
3416 }
3417 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030,
3418 quirk_broken_intx_masking);
3419 DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601,
3420 quirk_broken_intx_masking);
3421 DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004,
3422 quirk_broken_intx_masking);
3423
3424
3425
3426
3427
3428
3429
3430 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169,
3431 quirk_broken_intx_masking);
3432
3433
3434
3435
3436
3437 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking);
3438 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking);
3439 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking);
3440 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking);
3441 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking);
3442 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking);
3443 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking);
3444 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking);
3445 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking);
3446 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking);
3447 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking);
3448 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking);
3449 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking);
3450 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking);
3451 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking);
3452 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking);
3453
3454 static u16 mellanox_broken_intx_devs[] = {
3455 PCI_DEVICE_ID_MELLANOX_HERMON_SDR,
3456 PCI_DEVICE_ID_MELLANOX_HERMON_DDR,
3457 PCI_DEVICE_ID_MELLANOX_HERMON_QDR,
3458 PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2,
3459 PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2,
3460 PCI_DEVICE_ID_MELLANOX_HERMON_EN,
3461 PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2,
3462 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN,
3463 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2,
3464 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2,
3465 PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2,
3466 PCI_DEVICE_ID_MELLANOX_CONNECTX2,
3467 PCI_DEVICE_ID_MELLANOX_CONNECTX3,
3468 PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO,
3469 };
3470
3471 #define CONNECTX_4_CURR_MAX_MINOR 99
3472 #define CONNECTX_4_INTX_SUPPORT_MINOR 14
3473
3474
3475
3476
3477
3478
3479
3480 static void mellanox_check_broken_intx_masking(struct pci_dev *pdev)
3481 {
3482 __be32 __iomem *fw_ver;
3483 u16 fw_major;
3484 u16 fw_minor;
3485 u16 fw_subminor;
3486 u32 fw_maj_min;
3487 u32 fw_sub_min;
3488 int i;
3489
3490 for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) {
3491 if (pdev->device == mellanox_broken_intx_devs[i]) {
3492 pdev->broken_intx_masking = 1;
3493 return;
3494 }
3495 }
3496
3497
3498
3499
3500
3501 if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB)
3502 return;
3503
3504 if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 &&
3505 pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX)
3506 return;
3507
3508
3509 if (pci_enable_device_mem(pdev)) {
3510 pci_warn(pdev, "Can't enable device memory\n");
3511 return;
3512 }
3513
3514 fw_ver = ioremap(pci_resource_start(pdev, 0), 4);
3515 if (!fw_ver) {
3516 pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n");
3517 goto out;
3518 }
3519
3520
3521 fw_maj_min = ioread32be(fw_ver);
3522 fw_sub_min = ioread32be(fw_ver + 1);
3523 fw_major = fw_maj_min & 0xffff;
3524 fw_minor = fw_maj_min >> 16;
3525 fw_subminor = fw_sub_min & 0xffff;
3526 if (fw_minor > CONNECTX_4_CURR_MAX_MINOR ||
3527 fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) {
3528 pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n",
3529 fw_major, fw_minor, fw_subminor, pdev->device ==
3530 PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14);
3531 pdev->broken_intx_masking = 1;
3532 }
3533
3534 iounmap(fw_ver);
3535
3536 out:
3537 pci_disable_device(pdev);
3538 }
3539 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
3540 mellanox_check_broken_intx_masking);
3541
3542 static void quirk_no_bus_reset(struct pci_dev *dev)
3543 {
3544 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
3545 }
3546
3547
3548
3549
3550
3551
3552
3553
3554 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
3555 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset);
3556 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
3557 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
3558 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
3559
3560
3561
3562
3563
3564
3565 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset);
3566
3567 static void quirk_no_pm_reset(struct pci_dev *dev)
3568 {
3569
3570
3571
3572
3573 if (!pci_is_root_bus(dev->bus))
3574 dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET;
3575 }
3576
3577
3578
3579
3580
3581
3582
3583
3584
3585 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
3586 PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset);
3587
3588
3589
3590
3591
3592
3593 static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev)
3594 {
3595 if (pdev->is_hotplug_bridge &&
3596 (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C ||
3597 pdev->revision <= 1))
3598 pdev->no_msi = 1;
3599 }
3600 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3601 quirk_thunderbolt_hotplug_msi);
3602 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE,
3603 quirk_thunderbolt_hotplug_msi);
3604 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK,
3605 quirk_thunderbolt_hotplug_msi);
3606 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3607 quirk_thunderbolt_hotplug_msi);
3608 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3609 quirk_thunderbolt_hotplug_msi);
3610
3611 #ifdef CONFIG_ACPI
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627 static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev)
3628 {
3629 acpi_handle bridge, SXIO, SXFP, SXLV;
3630
3631 if (!x86_apple_machine)
3632 return;
3633 if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)
3634 return;
3635 bridge = ACPI_HANDLE(&dev->dev);
3636 if (!bridge)
3637 return;
3638
3639
3640
3641
3642
3643
3644
3645
3646 if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO))
3647 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP))
3648 || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV)))
3649 return;
3650 pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n");
3651
3652
3653 acpi_execute_simple_method(SXIO, NULL, 1);
3654 acpi_execute_simple_method(SXFP, NULL, 0);
3655 msleep(300);
3656 acpi_execute_simple_method(SXLV, NULL, 0);
3657 acpi_execute_simple_method(SXIO, NULL, 0);
3658 acpi_execute_simple_method(SXLV, NULL, 0);
3659 }
3660 DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL,
3661 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3662 quirk_apple_poweroff_thunderbolt);
3663
3664
3665
3666
3667
3668
3669
3670
3671
3672
3673 static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev)
3674 {
3675 struct pci_dev *sibling = NULL;
3676 struct pci_dev *nhi = NULL;
3677
3678 if (!x86_apple_machine)
3679 return;
3680 if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)
3681 return;
3682
3683
3684
3685
3686
3687 sibling = pci_get_slot(dev->bus, 0x0);
3688 if (sibling == dev)
3689 goto out;
3690 if (!sibling || !sibling->subordinate)
3691 goto out;
3692 nhi = pci_get_slot(sibling->subordinate, 0x0);
3693 if (!nhi)
3694 goto out;
3695 if (nhi->vendor != PCI_VENDOR_ID_INTEL
3696 || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE &&
3697 nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C &&
3698 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI &&
3699 nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI)
3700 || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8)
3701 goto out;
3702 pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n");
3703 device_pm_wait_for_dev(&dev->dev, &nhi->dev);
3704 out:
3705 pci_dev_put(nhi);
3706 pci_dev_put(sibling);
3707 }
3708 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3709 PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
3710 quirk_apple_wait_for_thunderbolt);
3711 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3712 PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
3713 quirk_apple_wait_for_thunderbolt);
3714 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3715 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE,
3716 quirk_apple_wait_for_thunderbolt);
3717 DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL,
3718 PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE,
3719 quirk_apple_wait_for_thunderbolt);
3720 #endif
3721
3722
3723
3724
3725
3726
3727 static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
3728 {
3729
3730
3731
3732
3733
3734
3735
3736
3737 if (!probe)
3738 pcie_flr(dev);
3739 return 0;
3740 }
3741
3742 #define SOUTH_CHICKEN2 0xc2004
3743 #define PCH_PP_STATUS 0xc7200
3744 #define PCH_PP_CONTROL 0xc7204
3745 #define MSG_CTL 0x45010
3746 #define NSDE_PWR_STATE 0xd0100
3747 #define IGD_OPERATION_TIMEOUT 10000
3748
3749 static int reset_ivb_igd(struct pci_dev *dev, int probe)
3750 {
3751 void __iomem *mmio_base;
3752 unsigned long timeout;
3753 u32 val;
3754
3755 if (probe)
3756 return 0;
3757
3758 mmio_base = pci_iomap(dev, 0, 0);
3759 if (!mmio_base)
3760 return -ENOMEM;
3761
3762 iowrite32(0x00000002, mmio_base + MSG_CTL);
3763
3764
3765
3766
3767
3768
3769
3770 iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2);
3771
3772 val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe;
3773 iowrite32(val, mmio_base + PCH_PP_CONTROL);
3774
3775 timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT);
3776 do {
3777 val = ioread32(mmio_base + PCH_PP_STATUS);
3778 if ((val & 0xb0000000) == 0)
3779 goto reset_complete;
3780 msleep(10);
3781 } while (time_before(jiffies, timeout));
3782 pci_warn(dev, "timeout during reset\n");
3783
3784 reset_complete:
3785 iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE);
3786
3787 pci_iounmap(dev, mmio_base);
3788 return 0;
3789 }
3790
3791
3792 static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
3793 {
3794 u16 old_command;
3795 u16 msix_flags;
3796
3797
3798
3799
3800
3801 if ((dev->device & 0xf000) != 0x4000)
3802 return -ENOTTY;
3803
3804
3805
3806
3807
3808 if (probe)
3809 return 0;
3810
3811
3812
3813
3814
3815
3816
3817 pci_read_config_word(dev, PCI_COMMAND, &old_command);
3818 pci_write_config_word(dev, PCI_COMMAND,
3819 old_command | PCI_COMMAND_MASTER);
3820
3821
3822
3823
3824
3825 pci_save_state(dev);
3826
3827
3828
3829
3830
3831
3832
3833
3834 pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
3835 if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
3836 pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
3837 msix_flags |
3838 PCI_MSIX_FLAGS_ENABLE |
3839 PCI_MSIX_FLAGS_MASKALL);
3840
3841 pcie_flr(dev);
3842
3843
3844
3845
3846
3847
3848 pci_restore_state(dev);
3849 pci_write_config_word(dev, PCI_COMMAND, old_command);
3850 return 0;
3851 }
3852
3853 #define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
3854 #define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
3855 #define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
3856
3857
3858
3859
3860
3861
3862
3863
3864
3865
3866
3867
3868
3869
3870 static int nvme_disable_and_flr(struct pci_dev *dev, int probe)
3871 {
3872 void __iomem *bar;
3873 u16 cmd;
3874 u32 cfg;
3875
3876 if (dev->class != PCI_CLASS_STORAGE_EXPRESS ||
3877 !pcie_has_flr(dev) || !pci_resource_start(dev, 0))
3878 return -ENOTTY;
3879
3880 if (probe)
3881 return 0;
3882
3883 bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg));
3884 if (!bar)
3885 return -ENOTTY;
3886
3887 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3888 pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY);
3889
3890 cfg = readl(bar + NVME_REG_CC);
3891
3892
3893 if (cfg & NVME_CC_ENABLE) {
3894 u32 cap = readl(bar + NVME_REG_CAP);
3895 unsigned long timeout;
3896
3897
3898
3899
3900
3901
3902 cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE);
3903
3904 writel(cfg, bar + NVME_REG_CC);
3905
3906
3907
3908
3909
3910
3911
3912
3913 timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
3914
3915 for (;;) {
3916 u32 status = readl(bar + NVME_REG_CSTS);
3917
3918
3919 if (!(status & NVME_CSTS_RDY))
3920 break;
3921
3922 msleep(100);
3923
3924 if (time_after(jiffies, timeout)) {
3925 pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n");
3926 break;
3927 }
3928 }
3929 }
3930
3931 pci_iounmap(dev, bar);
3932
3933 pcie_flr(dev);
3934
3935 return 0;
3936 }
3937
3938
3939
3940
3941
3942
3943
3944 static int delay_250ms_after_flr(struct pci_dev *dev, int probe)
3945 {
3946 if (!pcie_has_flr(dev))
3947 return -ENOTTY;
3948
3949 if (probe)
3950 return 0;
3951
3952 pcie_flr(dev);
3953
3954 msleep(250);
3955
3956 return 0;
3957 }
3958
3959 static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
3960 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
3961 reset_intel_82599_sfp_virtfn },
3962 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA,
3963 reset_ivb_igd },
3964 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA,
3965 reset_ivb_igd },
3966 { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr },
3967 { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr },
3968 { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3969 reset_chelsio_generic_dev },
3970 { 0 }
3971 };
3972
3973
3974
3975
3976
3977
3978 int pci_dev_specific_reset(struct pci_dev *dev, int probe)
3979 {
3980 const struct pci_dev_reset_methods *i;
3981
3982 for (i = pci_dev_reset_methods; i->reset; i++) {
3983 if ((i->vendor == dev->vendor ||
3984 i->vendor == (u16)PCI_ANY_ID) &&
3985 (i->device == dev->device ||
3986 i->device == (u16)PCI_ANY_ID))
3987 return i->reset(dev, probe);
3988 }
3989
3990 return -ENOTTY;
3991 }
3992
3993 static void quirk_dma_func0_alias(struct pci_dev *dev)
3994 {
3995 if (PCI_FUNC(dev->devfn) != 0)
3996 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1);
3997 }
3998
3999
4000
4001
4002
4003
4004 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias);
4005 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias);
4006
4007 static void quirk_dma_func1_alias(struct pci_dev *dev)
4008 {
4009 if (PCI_FUNC(dev->devfn) != 1)
4010 pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1);
4011 }
4012
4013
4014
4015
4016
4017
4018
4019 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
4020 quirk_dma_func1_alias);
4021 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
4022 quirk_dma_func1_alias);
4023 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
4024 quirk_dma_func1_alias);
4025
4026 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
4027 quirk_dma_func1_alias);
4028 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170,
4029 quirk_dma_func1_alias);
4030
4031 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172,
4032 quirk_dma_func1_alias);
4033
4034 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a,
4035 quirk_dma_func1_alias);
4036
4037 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182,
4038 quirk_dma_func1_alias);
4039
4040 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183,
4041 quirk_dma_func1_alias);
4042
4043 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0,
4044 quirk_dma_func1_alias);
4045
4046 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220,
4047 quirk_dma_func1_alias);
4048
4049 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
4050 quirk_dma_func1_alias);
4051 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
4052 quirk_dma_func1_alias);
4053 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
4054 quirk_dma_func1_alias);
4055
4056 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
4057 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
4058 quirk_dma_func1_alias);
4059
4060 DECLARE_PCI_FIXUP_HEADER(0x1c28,
4061 0x0122,
4062 quirk_dma_func1_alias);
4063
4064
4065
4066
4067
4068
4069
4070
4071
4072
4073
4074
4075
4076
4077
4078
4079 static const struct pci_device_id fixed_dma_alias_tbl[] = {
4080 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4081 PCI_VENDOR_ID_ADAPTEC2, 0x02bb),
4082 .driver_data = PCI_DEVFN(1, 0) },
4083 { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285,
4084 PCI_VENDOR_ID_ADAPTEC2, 0x02bc),
4085 .driver_data = PCI_DEVFN(1, 0) },
4086 { 0 }
4087 };
4088
4089 static void quirk_fixed_dma_alias(struct pci_dev *dev)
4090 {
4091 const struct pci_device_id *id;
4092
4093 id = pci_match_id(fixed_dma_alias_tbl, dev);
4094 if (id)
4095 pci_add_dma_alias(dev, id->driver_data, 1);
4096 }
4097
4098 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias);
4099
4100
4101
4102
4103
4104
4105
4106
4107
4108
4109 static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev)
4110 {
4111 if (!pci_is_root_bus(pdev->bus) &&
4112 pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4113 !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) &&
4114 pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE)
4115 pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS;
4116 }
4117
4118 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
4119 quirk_use_pcie_bridge_dma_alias);
4120
4121 DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
4122
4123 DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
4124
4125 DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias);
4126
4127 DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
4128
4129
4130
4131
4132
4133
4134
4135 static void quirk_mic_x200_dma_alias(struct pci_dev *pdev)
4136 {
4137 pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1);
4138 pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1);
4139 pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1);
4140 }
4141 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias);
4142 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias);
4143
4144
4145
4146
4147
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158 static void quirk_pex_vca_alias(struct pci_dev *pdev)
4159 {
4160 const unsigned int num_pci_slots = 0x20;
4161 unsigned int slot;
4162
4163 for (slot = 0; slot < num_pci_slots; slot++)
4164 pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5);
4165 }
4166 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias);
4167 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias);
4168 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias);
4169 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias);
4170 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias);
4171 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias);
4172
4173
4174
4175
4176
4177
4178 static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev)
4179 {
4180 pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT;
4181 }
4182 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000,
4183 quirk_bridge_cavm_thrx2_pcie_root);
4184 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084,
4185 quirk_bridge_cavm_thrx2_pcie_root);
4186
4187
4188
4189
4190
4191 static void quirk_tw686x_class(struct pci_dev *pdev)
4192 {
4193 u32 class = pdev->class;
4194
4195
4196 pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01;
4197 pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n",
4198 class, pdev->class);
4199 }
4200 DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8,
4201 quirk_tw686x_class);
4202 DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8,
4203 quirk_tw686x_class);
4204 DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8,
4205 quirk_tw686x_class);
4206 DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8,
4207 quirk_tw686x_class);
4208
4209
4210
4211
4212
4213
4214 static void quirk_relaxedordering_disable(struct pci_dev *dev)
4215 {
4216 dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING;
4217 pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n");
4218 }
4219
4220
4221
4222
4223
4224
4225 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8,
4226 quirk_relaxedordering_disable);
4227 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8,
4228 quirk_relaxedordering_disable);
4229 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8,
4230 quirk_relaxedordering_disable);
4231 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8,
4232 quirk_relaxedordering_disable);
4233 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8,
4234 quirk_relaxedordering_disable);
4235 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8,
4236 quirk_relaxedordering_disable);
4237 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8,
4238 quirk_relaxedordering_disable);
4239 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8,
4240 quirk_relaxedordering_disable);
4241 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8,
4242 quirk_relaxedordering_disable);
4243 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8,
4244 quirk_relaxedordering_disable);
4245 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8,
4246 quirk_relaxedordering_disable);
4247 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8,
4248 quirk_relaxedordering_disable);
4249 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8,
4250 quirk_relaxedordering_disable);
4251 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8,
4252 quirk_relaxedordering_disable);
4253 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8,
4254 quirk_relaxedordering_disable);
4255 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8,
4256 quirk_relaxedordering_disable);
4257 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8,
4258 quirk_relaxedordering_disable);
4259 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8,
4260 quirk_relaxedordering_disable);
4261 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8,
4262 quirk_relaxedordering_disable);
4263 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8,
4264 quirk_relaxedordering_disable);
4265 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8,
4266 quirk_relaxedordering_disable);
4267 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8,
4268 quirk_relaxedordering_disable);
4269 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8,
4270 quirk_relaxedordering_disable);
4271 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8,
4272 quirk_relaxedordering_disable);
4273 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8,
4274 quirk_relaxedordering_disable);
4275 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8,
4276 quirk_relaxedordering_disable);
4277 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8,
4278 quirk_relaxedordering_disable);
4279 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8,
4280 quirk_relaxedordering_disable);
4281
4282
4283
4284
4285
4286
4287
4288
4289
4290
4291 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8,
4292 quirk_relaxedordering_disable);
4293 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8,
4294 quirk_relaxedordering_disable);
4295 DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8,
4296 quirk_relaxedordering_disable);
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321 static void quirk_disable_root_port_attributes(struct pci_dev *pdev)
4322 {
4323 struct pci_dev *root_port = pci_find_pcie_root_port(pdev);
4324
4325 if (!root_port) {
4326 pci_warn(pdev, "PCIe Completion erratum may cause device errors\n");
4327 return;
4328 }
4329
4330 pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n",
4331 dev_name(&pdev->dev));
4332 pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL,
4333 PCI_EXP_DEVCTL_RELAX_EN |
4334 PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
4335 }
4336
4337
4338
4339
4340
4341 static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
4342 {
4343
4344
4345
4346
4347
4348
4349 if ((pdev->device & 0xff00) == 0x5400)
4350 quirk_disable_root_port_attributes(pdev);
4351 }
4352 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
4353 quirk_chelsio_T5_disable_root_port_attributes);
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363
4364
4365
4366 static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
4367 {
4368 if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
4369 return 1;
4370 return 0;
4371 }
4372
4373
4374
4375
4376
4377
4378 static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
4379 {
4380 if (!pci_is_pcie(dev) ||
4381 ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
4382 (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
4383 return -ENOTTY;
4384
4385 switch (dev->device) {
4386 case 0x0710 ... 0x071e:
4387 case 0x0721:
4388 case 0x0723 ... 0x0732:
4389 return pci_acs_ctrl_enabled(acs_flags,
4390 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4391 }
4392
4393 return false;
4394 }
4395
4396
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415
4416
4417
4418
4419
4420
4421 static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
4422 {
4423 #ifdef CONFIG_ACPI
4424 struct acpi_table_header *header = NULL;
4425 acpi_status status;
4426
4427
4428 if (!dev->multifunction || !pci_is_root_bus(dev->bus))
4429 return -ENODEV;
4430
4431
4432 status = acpi_get_table("IVRS", 0, &header);
4433 if (ACPI_FAILURE(status))
4434 return -ENODEV;
4435
4436
4437 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
4438
4439 return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
4440 #else
4441 return -ENODEV;
4442 #endif
4443 }
4444
4445 static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
4446 {
4447 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4448 return false;
4449
4450 switch (dev->device) {
4451
4452
4453
4454
4455 case 0xa000 ... 0xa7ff:
4456 case 0xaf84:
4457 case 0xb884:
4458 return true;
4459 default:
4460 return false;
4461 }
4462 }
4463
4464 static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
4465 {
4466 if (!pci_quirk_cavium_acs_match(dev))
4467 return -ENOTTY;
4468
4469
4470
4471
4472
4473
4474
4475
4476
4477 return pci_acs_ctrl_enabled(acs_flags,
4478 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4479 }
4480
4481 static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
4482 {
4483
4484
4485
4486
4487
4488 return pci_acs_ctrl_enabled(acs_flags,
4489 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4490 }
4491
4492
4493
4494
4495
4496
4497
4498 static const u16 pci_quirk_intel_pch_acs_ids[] = {
4499
4500 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49,
4501 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51,
4502
4503 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17,
4504 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f,
4505
4506 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17,
4507 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f,
4508
4509 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17,
4510 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f,
4511
4512 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17,
4513 0x9c18, 0x9c19, 0x9c1a, 0x9c1b,
4514
4515 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97,
4516 0x9c98, 0x9c99, 0x9c9a, 0x9c9b,
4517
4518 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e,
4519
4520 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17,
4521 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e,
4522
4523 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e,
4524 };
4525
4526 static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
4527 {
4528 int i;
4529
4530
4531 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4532 return false;
4533
4534 for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++)
4535 if (pci_quirk_intel_pch_acs_ids[i] == dev->device)
4536 return true;
4537
4538 return false;
4539 }
4540
4541 static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
4542 {
4543 if (!pci_quirk_intel_pch_acs_match(dev))
4544 return -ENOTTY;
4545
4546 if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
4547 return pci_acs_ctrl_enabled(acs_flags,
4548 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4549
4550 return pci_acs_ctrl_enabled(acs_flags, 0);
4551 }
4552
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562
4563 static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
4564 {
4565 return pci_acs_ctrl_enabled(acs_flags,
4566 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4567 }
4568
4569 static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
4570 {
4571 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4572 return -ENOTTY;
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582 acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4583
4584 return acs_flags ? 0 : 1;
4585 }
4586
4587
4588
4589
4590
4591
4592
4593
4594
4595
4596
4597
4598
4599
4600
4601
4602
4603
4604
4605
4606
4607
4608
4609
4610
4611
4612
4613
4614
4615
4616
4617
4618
4619
4620
4621
4622
4623
4624
4625
4626
4627
4628
4629
4630
4631
4632 static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
4633 {
4634 if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
4635 return false;
4636
4637 switch (dev->device) {
4638 case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a:
4639 case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee:
4640 case 0x9d10 ... 0x9d1b:
4641 return true;
4642 }
4643
4644 return false;
4645 }
4646
4647 #define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4)
4648
4649 static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
4650 {
4651 int pos;
4652 u32 cap, ctrl;
4653
4654 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4655 return -ENOTTY;
4656
4657 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4658 if (!pos)
4659 return -ENOTTY;
4660
4661
4662 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4663 acs_flags &= (cap | PCI_ACS_EC);
4664
4665 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4666
4667 return pci_acs_ctrl_enabled(acs_flags, ctrl);
4668 }
4669
4670 static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
4671 {
4672
4673
4674
4675
4676
4677
4678
4679
4680
4681 return pci_acs_ctrl_enabled(acs_flags,
4682 PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
4683 PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
4684 }
4685
4686 static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
4687 {
4688
4689
4690
4691
4692
4693
4694 return pci_acs_ctrl_enabled(acs_flags,
4695 PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
4696 }
4697
4698 static const struct pci_dev_acs_enabled {
4699 u16 vendor;
4700 u16 device;
4701 int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
4702 } pci_dev_acs_enabled[] = {
4703 { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
4704 { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
4705 { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
4706 { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
4707 { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
4708 { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
4709 { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs },
4710 { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs },
4711 { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
4712 { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
4713 { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs },
4714 { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
4715 { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
4716 { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
4717 { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
4718 { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
4719 { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
4720 { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
4721 { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
4722 { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
4723 { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
4724 { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
4725 { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
4726 { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
4727 { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
4728 { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
4729 { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
4730 { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
4731 { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
4732 { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
4733 { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
4734
4735 { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs },
4736 { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs },
4737 { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs },
4738 { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs },
4739 { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs },
4740 { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs },
4741 { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs },
4742
4743 { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs },
4744 { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs },
4745 { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs },
4746 { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs },
4747 { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs },
4748 { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs },
4749 { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs },
4750 { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs },
4751
4752 { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs },
4753 { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs },
4754 { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs },
4755
4756 { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs },
4757 { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs },
4758 { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs },
4759 { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs },
4760
4761 { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs },
4762 { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs },
4763 { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs },
4764 { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs },
4765
4766 { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs },
4767 { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs },
4768
4769 { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs },
4770 { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs },
4771
4772 { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs },
4773
4774 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
4775 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs },
4776 { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs },
4777 { 0x10df, 0x720, pci_quirk_mf_endpoint_acs },
4778
4779 { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
4780
4781 { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
4782
4783 { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs },
4784 { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs },
4785 { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs },
4786 { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs },
4787 { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs },
4788 { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs },
4789 { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs },
4790 { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs },
4791 { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
4792
4793 { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
4794
4795 { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
4796 { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
4797 { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
4798
4799 { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
4800 { 0 }
4801 };
4802
4803
4804
4805
4806
4807
4808
4809
4810
4811
4812
4813
4814 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
4815 {
4816 const struct pci_dev_acs_enabled *i;
4817 int ret;
4818
4819
4820
4821
4822
4823
4824
4825 for (i = pci_dev_acs_enabled; i->acs_enabled; i++) {
4826 if ((i->vendor == dev->vendor ||
4827 i->vendor == (u16)PCI_ANY_ID) &&
4828 (i->device == dev->device ||
4829 i->device == (u16)PCI_ANY_ID)) {
4830 ret = i->acs_enabled(dev, acs_flags);
4831 if (ret >= 0)
4832 return ret;
4833 }
4834 }
4835
4836 return -ENOTTY;
4837 }
4838
4839
4840 #define INTEL_LPC_RCBA_REG 0xf0
4841
4842 #define INTEL_LPC_RCBA_MASK 0xffffc000
4843
4844 #define INTEL_LPC_RCBA_ENABLE (1 << 0)
4845
4846
4847 #define INTEL_BSPR_REG 0x1104
4848
4849 #define INTEL_BSPR_REG_BPNPD (1 << 8)
4850
4851 #define INTEL_BSPR_REG_BPPD (1 << 9)
4852
4853
4854 #define INTEL_UPDCR_REG 0x1014
4855
4856 #define INTEL_UPDCR_REG_MASK 0x3f
4857
4858 static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev)
4859 {
4860 u32 rcba, bspr, updcr;
4861 void __iomem *rcba_mem;
4862
4863
4864
4865
4866
4867
4868 pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0),
4869 INTEL_LPC_RCBA_REG, &rcba);
4870 if (!(rcba & INTEL_LPC_RCBA_ENABLE))
4871 return -EINVAL;
4872
4873 rcba_mem = ioremap_nocache(rcba & INTEL_LPC_RCBA_MASK,
4874 PAGE_ALIGN(INTEL_UPDCR_REG));
4875 if (!rcba_mem)
4876 return -ENOMEM;
4877
4878
4879
4880
4881
4882
4883
4884
4885 bspr = readl(rcba_mem + INTEL_BSPR_REG);
4886 bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD;
4887 if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) {
4888 updcr = readl(rcba_mem + INTEL_UPDCR_REG);
4889 if (updcr & INTEL_UPDCR_REG_MASK) {
4890 pci_info(dev, "Disabling UPDCR peer decodes\n");
4891 updcr &= ~INTEL_UPDCR_REG_MASK;
4892 writel(updcr, rcba_mem + INTEL_UPDCR_REG);
4893 }
4894 }
4895
4896 iounmap(rcba_mem);
4897 return 0;
4898 }
4899
4900
4901 #define INTEL_MPC_REG 0xd8
4902
4903 #define INTEL_MPC_REG_IRBNCE (1 << 26)
4904
4905 static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
4906 {
4907 u32 mpc;
4908
4909
4910
4911
4912
4913
4914
4915 pci_read_config_dword(dev, INTEL_MPC_REG, &mpc);
4916 if (!(mpc & INTEL_MPC_REG_IRBNCE)) {
4917 pci_info(dev, "Enabling MPC IRBNCE\n");
4918 mpc |= INTEL_MPC_REG_IRBNCE;
4919 pci_write_config_word(dev, INTEL_MPC_REG, mpc);
4920 }
4921 }
4922
4923 static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
4924 {
4925 if (!pci_quirk_intel_pch_acs_match(dev))
4926 return -ENOTTY;
4927
4928 if (pci_quirk_enable_intel_lpc_acs(dev)) {
4929 pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n");
4930 return 0;
4931 }
4932
4933 pci_quirk_enable_intel_rp_mpc_acs(dev);
4934
4935 dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK;
4936
4937 pci_info(dev, "Intel PCH root port ACS workaround enabled\n");
4938
4939 return 0;
4940 }
4941
4942 static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
4943 {
4944 int pos;
4945 u32 cap, ctrl;
4946
4947 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4948 return -ENOTTY;
4949
4950 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4951 if (!pos)
4952 return -ENOTTY;
4953
4954 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4955 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4956
4957 ctrl |= (cap & PCI_ACS_SV);
4958 ctrl |= (cap & PCI_ACS_RR);
4959 ctrl |= (cap & PCI_ACS_CR);
4960 ctrl |= (cap & PCI_ACS_UF);
4961
4962 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4963
4964 pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");
4965
4966 return 0;
4967 }
4968
4969 static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev)
4970 {
4971 int pos;
4972 u32 cap, ctrl;
4973
4974 if (!pci_quirk_intel_spt_pch_acs_match(dev))
4975 return -ENOTTY;
4976
4977 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
4978 if (!pos)
4979 return -ENOTTY;
4980
4981 pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap);
4982 pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
4983
4984 ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
4985
4986 pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
4987
4988 pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n");
4989
4990 return 0;
4991 }
4992
4993 static const struct pci_dev_acs_ops {
4994 u16 vendor;
4995 u16 device;
4996 int (*enable_acs)(struct pci_dev *dev);
4997 int (*disable_acs_redir)(struct pci_dev *dev);
4998 } pci_dev_acs_ops[] = {
4999 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5000 .enable_acs = pci_quirk_enable_intel_pch_acs,
5001 },
5002 { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
5003 .enable_acs = pci_quirk_enable_intel_spt_pch_acs,
5004 .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir,
5005 },
5006 };
5007
5008 int pci_dev_specific_enable_acs(struct pci_dev *dev)
5009 {
5010 const struct pci_dev_acs_ops *p;
5011 int i, ret;
5012
5013 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5014 p = &pci_dev_acs_ops[i];
5015 if ((p->vendor == dev->vendor ||
5016 p->vendor == (u16)PCI_ANY_ID) &&
5017 (p->device == dev->device ||
5018 p->device == (u16)PCI_ANY_ID) &&
5019 p->enable_acs) {
5020 ret = p->enable_acs(dev);
5021 if (ret >= 0)
5022 return ret;
5023 }
5024 }
5025
5026 return -ENOTTY;
5027 }
5028
5029 int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
5030 {
5031 const struct pci_dev_acs_ops *p;
5032 int i, ret;
5033
5034 for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) {
5035 p = &pci_dev_acs_ops[i];
5036 if ((p->vendor == dev->vendor ||
5037 p->vendor == (u16)PCI_ANY_ID) &&
5038 (p->device == dev->device ||
5039 p->device == (u16)PCI_ANY_ID) &&
5040 p->disable_acs_redir) {
5041 ret = p->disable_acs_redir(dev);
5042 if (ret >= 0)
5043 return ret;
5044 }
5045 }
5046
5047 return -ENOTTY;
5048 }
5049
5050
5051
5052
5053
5054
5055
5056
5057 static void quirk_intel_qat_vf_cap(struct pci_dev *pdev)
5058 {
5059 int pos, i = 0;
5060 u8 next_cap;
5061 u16 reg16, *cap;
5062 struct pci_cap_saved_state *state;
5063
5064
5065 if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP))
5066 return;
5067
5068
5069 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
5070 if (!pos)
5071 return;
5072
5073
5074
5075
5076
5077 pci_read_config_byte(pdev, pos + 1, &next_cap);
5078 if (next_cap)
5079 return;
5080
5081
5082
5083
5084
5085
5086
5087
5088
5089 pos = 0x50;
5090 pci_read_config_word(pdev, pos, ®16);
5091 if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) {
5092 u32 status;
5093 #ifndef PCI_EXP_SAVE_REGS
5094 #define PCI_EXP_SAVE_REGS 7
5095 #endif
5096 int size = PCI_EXP_SAVE_REGS * sizeof(u16);
5097
5098 pdev->pcie_cap = pos;
5099 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16);
5100 pdev->pcie_flags_reg = reg16;
5101 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, ®16);
5102 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
5103
5104 pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE;
5105 if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) !=
5106 PCIBIOS_SUCCESSFUL || (status == 0xffffffff))
5107 pdev->cfg_size = PCI_CFG_SPACE_SIZE;
5108
5109 if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP))
5110 return;
5111
5112
5113 state = kzalloc(sizeof(*state) + size, GFP_KERNEL);
5114 if (!state)
5115 return;
5116
5117 state->cap.cap_nr = PCI_CAP_ID_EXP;
5118 state->cap.cap_extended = 0;
5119 state->cap.size = size;
5120 cap = (u16 *)&state->cap.data[0];
5121 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]);
5122 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]);
5123 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]);
5124 pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]);
5125 pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]);
5126 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]);
5127 pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]);
5128 hlist_add_head(&state->next, &pdev->saved_cap_space);
5129 }
5130 }
5131 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
5132
5133
5134 static void quirk_intel_no_flr(struct pci_dev *dev)
5135 {
5136 dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET;
5137 }
5138 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_intel_no_flr);
5139 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_intel_no_flr);
5140
5141 static void quirk_no_ext_tags(struct pci_dev *pdev)
5142 {
5143 struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus);
5144
5145 if (!bridge)
5146 return;
5147
5148 bridge->no_ext_tags = 1;
5149 pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n");
5150
5151 pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
5152 }
5153 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
5154 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
5155 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
5156 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
5157 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
5158 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
5159 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
5160
5161 #ifdef CONFIG_PCI_ATS
5162
5163
5164
5165
5166
5167 static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
5168 {
5169 if (pdev->device == 0x7340 && pdev->revision != 0xc5)
5170 return;
5171
5172 pci_info(pdev, "disabling ATS\n");
5173 pdev->ats_cap = 0;
5174 }
5175
5176
5177 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
5178
5179 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
5180
5181 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
5182 #endif
5183
5184
5185 static void quirk_fsl_no_msi(struct pci_dev *pdev)
5186 {
5187 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
5188 pdev->no_msi = 1;
5189 }
5190 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200 static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer,
5201 unsigned int supplier, unsigned int class,
5202 unsigned int class_shift)
5203 {
5204 struct pci_dev *supplier_pdev;
5205
5206 if (PCI_FUNC(pdev->devfn) != consumer)
5207 return;
5208
5209 supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
5210 pdev->bus->number,
5211 PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier));
5212 if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) {
5213 pci_dev_put(supplier_pdev);
5214 return;
5215 }
5216
5217 if (device_link_add(&pdev->dev, &supplier_pdev->dev,
5218 DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
5219 pci_info(pdev, "D0 power state depends on %s\n",
5220 pci_name(supplier_pdev));
5221 else
5222 pci_err(pdev, "Cannot enforce power dependency on %s\n",
5223 pci_name(supplier_pdev));
5224
5225 pm_runtime_allow(&pdev->dev);
5226 pci_dev_put(supplier_pdev);
5227 }
5228
5229
5230
5231
5232
5233 static void quirk_gpu_hda(struct pci_dev *hda)
5234 {
5235 pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16);
5236 }
5237 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
5238 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5239 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
5240 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5241 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5242 PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
5243
5244
5245
5246
5247
5248 static void quirk_gpu_usb(struct pci_dev *usb)
5249 {
5250 pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16);
5251 }
5252 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5253 PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb);
5254
5255
5256
5257
5258
5259
5260
5261 #define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
5262 static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi)
5263 {
5264 pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16);
5265 }
5266 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5267 PCI_CLASS_SERIAL_UNKNOWN, 8,
5268 quirk_gpu_usb_typec_ucsi);
5269
5270
5271
5272
5273
5274 static void quirk_nvidia_hda(struct pci_dev *gpu)
5275 {
5276 u8 hdr_type;
5277 u32 val;
5278
5279
5280 if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M)
5281 return;
5282
5283
5284 pci_read_config_dword(gpu, 0x488, &val);
5285 if (val & BIT(25))
5286 return;
5287
5288 pci_info(gpu, "Enabling HDA controller\n");
5289 pci_write_config_dword(gpu, 0x488, val | BIT(25));
5290
5291
5292 pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type);
5293 gpu->multifunction = !!(hdr_type & 0x80);
5294 }
5295 DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5296 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5297 DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
5298 PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda);
5299
5300
5301
5302
5303
5304
5305
5306
5307
5308
5309
5310
5311
5312
5313
5314
5315
5316
5317
5318
5319
5320
5321
5322
5323
5324
5325 int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout)
5326 {
5327 int pos;
5328 u16 ctrl = 0;
5329 bool found;
5330 struct pci_dev *bridge = bus->self;
5331
5332 pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS);
5333
5334
5335 if (pos) {
5336 pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl);
5337 if (ctrl & PCI_ACS_SV)
5338 pci_write_config_word(bridge, pos + PCI_ACS_CTRL,
5339 ctrl & ~PCI_ACS_SV);
5340 }
5341
5342 found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout);
5343
5344
5345 if (found)
5346 pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0);
5347
5348
5349 if (ctrl & PCI_ACS_SV)
5350 pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl);
5351
5352 return found;
5353 }
5354
5355
5356
5357
5358
5359
5360
5361
5362 static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev)
5363 {
5364 void __iomem *mmio;
5365 struct ntb_info_regs __iomem *mmio_ntb;
5366 struct ntb_ctrl_regs __iomem *mmio_ctrl;
5367 u64 partition_map;
5368 u8 partition;
5369 int pp;
5370
5371 if (pci_enable_device(pdev)) {
5372 pci_err(pdev, "Cannot enable Switchtec device\n");
5373 return;
5374 }
5375
5376 mmio = pci_iomap(pdev, 0, 0);
5377 if (mmio == NULL) {
5378 pci_disable_device(pdev);
5379 pci_err(pdev, "Cannot iomap Switchtec device\n");
5380 return;
5381 }
5382
5383 pci_info(pdev, "Setting Switchtec proxy ID aliases\n");
5384
5385 mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET;
5386 mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET;
5387
5388 partition = ioread8(&mmio_ntb->partition_id);
5389
5390 partition_map = ioread32(&mmio_ntb->ep_map);
5391 partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32;
5392 partition_map &= ~(1ULL << partition);
5393
5394 for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) {
5395 struct ntb_ctrl_regs __iomem *mmio_peer_ctrl;
5396 u32 table_sz = 0;
5397 int te;
5398
5399 if (!(partition_map & (1ULL << pp)))
5400 continue;
5401
5402 pci_dbg(pdev, "Processing partition %d\n", pp);
5403
5404 mmio_peer_ctrl = &mmio_ctrl[pp];
5405
5406 table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size);
5407 if (!table_sz) {
5408 pci_warn(pdev, "Partition %d table_sz 0\n", pp);
5409 continue;
5410 }
5411
5412 if (table_sz > 512) {
5413 pci_warn(pdev,
5414 "Invalid Switchtec partition %d table_sz %d\n",
5415 pp, table_sz);
5416 continue;
5417 }
5418
5419 for (te = 0; te < table_sz; te++) {
5420 u32 rid_entry;
5421 u8 devfn;
5422
5423 rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]);
5424 devfn = (rid_entry >> 1) & 0xFF;
5425 pci_dbg(pdev,
5426 "Aliasing Partition %d Proxy ID %02x.%d\n",
5427 pp, PCI_SLOT(devfn), PCI_FUNC(devfn));
5428 pci_add_dma_alias(pdev, devfn, 1);
5429 }
5430 }
5431
5432 pci_iounmap(pdev, mmio);
5433 pci_disable_device(pdev);
5434 }
5435 #define SWITCHTEC_QUIRK(vid) \
5436 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \
5437 PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias)
5438
5439 SWITCHTEC_QUIRK(0x8531);
5440 SWITCHTEC_QUIRK(0x8532);
5441 SWITCHTEC_QUIRK(0x8533);
5442 SWITCHTEC_QUIRK(0x8534);
5443 SWITCHTEC_QUIRK(0x8535);
5444 SWITCHTEC_QUIRK(0x8536);
5445 SWITCHTEC_QUIRK(0x8541);
5446 SWITCHTEC_QUIRK(0x8542);
5447 SWITCHTEC_QUIRK(0x8543);
5448 SWITCHTEC_QUIRK(0x8544);
5449 SWITCHTEC_QUIRK(0x8545);
5450 SWITCHTEC_QUIRK(0x8546);
5451 SWITCHTEC_QUIRK(0x8551);
5452 SWITCHTEC_QUIRK(0x8552);
5453 SWITCHTEC_QUIRK(0x8553);
5454 SWITCHTEC_QUIRK(0x8554);
5455 SWITCHTEC_QUIRK(0x8555);
5456 SWITCHTEC_QUIRK(0x8556);
5457 SWITCHTEC_QUIRK(0x8561);
5458 SWITCHTEC_QUIRK(0x8562);
5459 SWITCHTEC_QUIRK(0x8563);
5460 SWITCHTEC_QUIRK(0x8564);
5461 SWITCHTEC_QUIRK(0x8565);
5462 SWITCHTEC_QUIRK(0x8566);
5463 SWITCHTEC_QUIRK(0x8571);
5464 SWITCHTEC_QUIRK(0x8572);
5465 SWITCHTEC_QUIRK(0x8573);
5466 SWITCHTEC_QUIRK(0x8574);
5467 SWITCHTEC_QUIRK(0x8575);
5468 SWITCHTEC_QUIRK(0x8576);
5469
5470
5471
5472
5473
5474
5475
5476 static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev)
5477 {
5478 pci_info(pdev, "Setting PLX NTB proxy ID aliases\n");
5479
5480 pci_add_dma_alias(pdev, 0, 256);
5481 }
5482 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias);
5483 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias);
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501 static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
5502 {
5503 void __iomem *map;
5504 int ret;
5505
5506 if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO ||
5507 pdev->subsystem_device != 0x222e ||
5508 !pdev->reset_fn)
5509 return;
5510
5511 if (pci_enable_device_mem(pdev))
5512 return;
5513
5514
5515
5516
5517
5518 map = pci_iomap(pdev, 0, 0x23000);
5519 if (!map) {
5520 pci_err(pdev, "Can't map MMIO space\n");
5521 goto out_disable;
5522 }
5523
5524
5525
5526
5527
5528 if (ioread32(map + 0x2240c) & 0x2) {
5529 pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n");
5530 ret = pci_reset_bus(pdev);
5531 if (ret < 0)
5532 pci_err(pdev, "Failed to reset GPU: %d\n", ret);
5533 }
5534
5535 iounmap(map);
5536 out_disable:
5537 pci_disable_device(pdev);
5538 }
5539 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
5540 PCI_CLASS_DISPLAY_VGA, 8,
5541 quirk_reset_lenovo_thinkpad_p50_nvgpu);
5542
5543
5544
5545
5546
5547 static void pci_fixup_no_d0_pme(struct pci_dev *dev)
5548 {
5549 pci_info(dev, "PME# does not work under D0, disabling it\n");
5550 dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
5551 }
5552 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
5553
5554 static void apex_pci_fixup_class(struct pci_dev *pdev)
5555 {
5556 pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
5557 }
5558 DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
5559 PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);