root/arch/x86/kernel/quirks.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. quirk_intel_irqbalance
  2. ich_force_hpet_resume
  3. ich_force_enable_hpet
  4. hpet_print_force_info
  5. old_ich_force_hpet_resume
  6. old_ich_force_enable_hpet
  7. old_ich_force_enable_hpet_user
  8. vt8237_force_hpet_resume
  9. vt8237_force_enable_hpet
  10. ati_force_hpet_resume
  11. ati_ixp4x0_rev
  12. ati_force_enable_hpet
  13. nvidia_force_hpet_resume
  14. nvidia_force_enable_hpet
  15. force_hpet_resume
  16. e6xx_force_enable_hpet
  17. force_disable_hpet_msi
  18. quirk_amd_nb_node
  19. amd_disable_seq_and_redirect_scrub
  20. quirk_intel_brickland_xeon_ras_cap
  21. quirk_intel_purley_xeon_ras_cap
  22. early_platform_quirks

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * This file contains work-arounds for x86 and x86_64 platform bugs.
   4  */
   5 #include <linux/dmi.h>
   6 #include <linux/pci.h>
   7 #include <linux/irq.h>
   8 
   9 #include <asm/hpet.h>
  10 #include <asm/setup.h>
  11 
  12 #if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
  13 
  14 static void quirk_intel_irqbalance(struct pci_dev *dev)
  15 {
  16         u8 config;
  17         u16 word;
  18 
  19         /* BIOS may enable hardware IRQ balancing for
  20          * E7520/E7320/E7525(revision ID 0x9 and below)
  21          * based platforms.
  22          * Disable SW irqbalance/affinity on those platforms.
  23          */
  24         if (dev->revision > 0x9)
  25                 return;
  26 
  27         /* enable access to config space*/
  28         pci_read_config_byte(dev, 0xf4, &config);
  29         pci_write_config_byte(dev, 0xf4, config|0x2);
  30 
  31         /*
  32          * read xTPR register.  We may not have a pci_dev for device 8
  33          * because it might be hidden until the above write.
  34          */
  35         pci_bus_read_config_word(dev->bus, PCI_DEVFN(8, 0), 0x4c, &word);
  36 
  37         if (!(word & (1 << 13))) {
  38                 dev_info(&dev->dev, "Intel E7520/7320/7525 detected; "
  39                         "disabling irq balancing and affinity\n");
  40                 noirqdebug_setup("");
  41 #ifdef CONFIG_PROC_FS
  42                 no_irq_affinity = 1;
  43 #endif
  44         }
  45 
  46         /* put back the original value for config space*/
  47         if (!(config & 0x2))
  48                 pci_write_config_byte(dev, 0xf4, config);
  49 }
  50 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
  51                         quirk_intel_irqbalance);
  52 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
  53                         quirk_intel_irqbalance);
  54 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
  55                         quirk_intel_irqbalance);
  56 #endif
  57 
  58 #if defined(CONFIG_HPET_TIMER)
  59 unsigned long force_hpet_address;
  60 
  61 static enum {
  62         NONE_FORCE_HPET_RESUME,
  63         OLD_ICH_FORCE_HPET_RESUME,
  64         ICH_FORCE_HPET_RESUME,
  65         VT8237_FORCE_HPET_RESUME,
  66         NVIDIA_FORCE_HPET_RESUME,
  67         ATI_FORCE_HPET_RESUME,
  68 } force_hpet_resume_type;
  69 
  70 static void __iomem *rcba_base;
  71 
  72 static void ich_force_hpet_resume(void)
  73 {
  74         u32 val;
  75 
  76         if (!force_hpet_address)
  77                 return;
  78 
  79         BUG_ON(rcba_base == NULL);
  80 
  81         /* read the Function Disable register, dword mode only */
  82         val = readl(rcba_base + 0x3404);
  83         if (!(val & 0x80)) {
  84                 /* HPET disabled in HPTC. Trying to enable */
  85                 writel(val | 0x80, rcba_base + 0x3404);
  86         }
  87 
  88         val = readl(rcba_base + 0x3404);
  89         if (!(val & 0x80))
  90                 BUG();
  91         else
  92                 printk(KERN_DEBUG "Force enabled HPET at resume\n");
  93 }
  94 
  95 static void ich_force_enable_hpet(struct pci_dev *dev)
  96 {
  97         u32 val;
  98         u32 uninitialized_var(rcba);
  99         int err = 0;
 100 
 101         if (hpet_address || force_hpet_address)
 102                 return;
 103 
 104         pci_read_config_dword(dev, 0xF0, &rcba);
 105         rcba &= 0xFFFFC000;
 106         if (rcba == 0) {
 107                 dev_printk(KERN_DEBUG, &dev->dev, "RCBA disabled; "
 108                         "cannot force enable HPET\n");
 109                 return;
 110         }
 111 
 112         /* use bits 31:14, 16 kB aligned */
 113         rcba_base = ioremap_nocache(rcba, 0x4000);
 114         if (rcba_base == NULL) {
 115                 dev_printk(KERN_DEBUG, &dev->dev, "ioremap failed; "
 116                         "cannot force enable HPET\n");
 117                 return;
 118         }
 119 
 120         /* read the Function Disable register, dword mode only */
 121         val = readl(rcba_base + 0x3404);
 122 
 123         if (val & 0x80) {
 124                 /* HPET is enabled in HPTC. Just not reported by BIOS */
 125                 val = val & 0x3;
 126                 force_hpet_address = 0xFED00000 | (val << 12);
 127                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 128                         "0x%lx\n", force_hpet_address);
 129                 iounmap(rcba_base);
 130                 return;
 131         }
 132 
 133         /* HPET disabled in HPTC. Trying to enable */
 134         writel(val | 0x80, rcba_base + 0x3404);
 135 
 136         val = readl(rcba_base + 0x3404);
 137         if (!(val & 0x80)) {
 138                 err = 1;
 139         } else {
 140                 val = val & 0x3;
 141                 force_hpet_address = 0xFED00000 | (val << 12);
 142         }
 143 
 144         if (err) {
 145                 force_hpet_address = 0;
 146                 iounmap(rcba_base);
 147                 dev_printk(KERN_DEBUG, &dev->dev,
 148                         "Failed to force enable HPET\n");
 149         } else {
 150                 force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
 151                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 152                         "0x%lx\n", force_hpet_address);
 153         }
 154 }
 155 
 156 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
 157                          ich_force_enable_hpet);
 158 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0,
 159                          ich_force_enable_hpet);
 160 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
 161                          ich_force_enable_hpet);
 162 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
 163                          ich_force_enable_hpet);
 164 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
 165                          ich_force_enable_hpet);
 166 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
 167                          ich_force_enable_hpet);
 168 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
 169                          ich_force_enable_hpet);
 170 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
 171                          ich_force_enable_hpet);
 172 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
 173                          ich_force_enable_hpet);
 174 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16,   /* ICH10 */
 175                          ich_force_enable_hpet);
 176 
 177 static struct pci_dev *cached_dev;
 178 
 179 static void hpet_print_force_info(void)
 180 {
 181         printk(KERN_INFO "HPET not enabled in BIOS. "
 182                "You might try hpet=force boot option\n");
 183 }
 184 
 185 static void old_ich_force_hpet_resume(void)
 186 {
 187         u32 val;
 188         u32 uninitialized_var(gen_cntl);
 189 
 190         if (!force_hpet_address || !cached_dev)
 191                 return;
 192 
 193         pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
 194         gen_cntl &= (~(0x7 << 15));
 195         gen_cntl |= (0x4 << 15);
 196 
 197         pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
 198         pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
 199         val = gen_cntl >> 15;
 200         val &= 0x7;
 201         if (val == 0x4)
 202                 printk(KERN_DEBUG "Force enabled HPET at resume\n");
 203         else
 204                 BUG();
 205 }
 206 
 207 static void old_ich_force_enable_hpet(struct pci_dev *dev)
 208 {
 209         u32 val;
 210         u32 uninitialized_var(gen_cntl);
 211 
 212         if (hpet_address || force_hpet_address)
 213                 return;
 214 
 215         pci_read_config_dword(dev, 0xD0, &gen_cntl);
 216         /*
 217          * Bit 17 is HPET enable bit.
 218          * Bit 16:15 control the HPET base address.
 219          */
 220         val = gen_cntl >> 15;
 221         val &= 0x7;
 222         if (val & 0x4) {
 223                 val &= 0x3;
 224                 force_hpet_address = 0xFED00000 | (val << 12);
 225                 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
 226                         force_hpet_address);
 227                 return;
 228         }
 229 
 230         /*
 231          * HPET is disabled. Trying enabling at FED00000 and check
 232          * whether it sticks
 233          */
 234         gen_cntl &= (~(0x7 << 15));
 235         gen_cntl |= (0x4 << 15);
 236         pci_write_config_dword(dev, 0xD0, gen_cntl);
 237 
 238         pci_read_config_dword(dev, 0xD0, &gen_cntl);
 239 
 240         val = gen_cntl >> 15;
 241         val &= 0x7;
 242         if (val & 0x4) {
 243                 /* HPET is enabled in HPTC. Just not reported by BIOS */
 244                 val &= 0x3;
 245                 force_hpet_address = 0xFED00000 | (val << 12);
 246                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 247                         "0x%lx\n", force_hpet_address);
 248                 cached_dev = dev;
 249                 force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
 250                 return;
 251         }
 252 
 253         dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
 254 }
 255 
 256 /*
 257  * Undocumented chipset features. Make sure that the user enforced
 258  * this.
 259  */
 260 static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
 261 {
 262         if (hpet_force_user)
 263                 old_ich_force_enable_hpet(dev);
 264 }
 265 
 266 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1,
 267                          old_ich_force_enable_hpet_user);
 268 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
 269                          old_ich_force_enable_hpet_user);
 270 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
 271                          old_ich_force_enable_hpet_user);
 272 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
 273                          old_ich_force_enable_hpet_user);
 274 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
 275                          old_ich_force_enable_hpet_user);
 276 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
 277                          old_ich_force_enable_hpet);
 278 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
 279                          old_ich_force_enable_hpet);
 280 
 281 
 282 static void vt8237_force_hpet_resume(void)
 283 {
 284         u32 val;
 285 
 286         if (!force_hpet_address || !cached_dev)
 287                 return;
 288 
 289         val = 0xfed00000 | 0x80;
 290         pci_write_config_dword(cached_dev, 0x68, val);
 291 
 292         pci_read_config_dword(cached_dev, 0x68, &val);
 293         if (val & 0x80)
 294                 printk(KERN_DEBUG "Force enabled HPET at resume\n");
 295         else
 296                 BUG();
 297 }
 298 
 299 static void vt8237_force_enable_hpet(struct pci_dev *dev)
 300 {
 301         u32 uninitialized_var(val);
 302 
 303         if (hpet_address || force_hpet_address)
 304                 return;
 305 
 306         if (!hpet_force_user) {
 307                 hpet_print_force_info();
 308                 return;
 309         }
 310 
 311         pci_read_config_dword(dev, 0x68, &val);
 312         /*
 313          * Bit 7 is HPET enable bit.
 314          * Bit 31:10 is HPET base address (contrary to what datasheet claims)
 315          */
 316         if (val & 0x80) {
 317                 force_hpet_address = (val & ~0x3ff);
 318                 dev_printk(KERN_DEBUG, &dev->dev, "HPET at 0x%lx\n",
 319                         force_hpet_address);
 320                 return;
 321         }
 322 
 323         /*
 324          * HPET is disabled. Trying enabling at FED00000 and check
 325          * whether it sticks
 326          */
 327         val = 0xfed00000 | 0x80;
 328         pci_write_config_dword(dev, 0x68, val);
 329 
 330         pci_read_config_dword(dev, 0x68, &val);
 331         if (val & 0x80) {
 332                 force_hpet_address = (val & ~0x3ff);
 333                 dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 334                         "0x%lx\n", force_hpet_address);
 335                 cached_dev = dev;
 336                 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
 337                 return;
 338         }
 339 
 340         dev_printk(KERN_DEBUG, &dev->dev, "Failed to force enable HPET\n");
 341 }
 342 
 343 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
 344                          vt8237_force_enable_hpet);
 345 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
 346                          vt8237_force_enable_hpet);
 347 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700,
 348                          vt8237_force_enable_hpet);
 349 
 350 static void ati_force_hpet_resume(void)
 351 {
 352         pci_write_config_dword(cached_dev, 0x14, 0xfed00000);
 353         printk(KERN_DEBUG "Force enabled HPET at resume\n");
 354 }
 355 
 356 static u32 ati_ixp4x0_rev(struct pci_dev *dev)
 357 {
 358         int err = 0;
 359         u32 d = 0;
 360         u8  b = 0;
 361 
 362         err = pci_read_config_byte(dev, 0xac, &b);
 363         b &= ~(1<<5);
 364         err |= pci_write_config_byte(dev, 0xac, b);
 365         err |= pci_read_config_dword(dev, 0x70, &d);
 366         d |= 1<<8;
 367         err |= pci_write_config_dword(dev, 0x70, d);
 368         err |= pci_read_config_dword(dev, 0x8, &d);
 369         d &= 0xff;
 370         dev_printk(KERN_DEBUG, &dev->dev, "SB4X0 revision 0x%x\n", d);
 371 
 372         WARN_ON_ONCE(err);
 373 
 374         return d;
 375 }
 376 
 377 static void ati_force_enable_hpet(struct pci_dev *dev)
 378 {
 379         u32 d, val;
 380         u8  b;
 381 
 382         if (hpet_address || force_hpet_address)
 383                 return;
 384 
 385         if (!hpet_force_user) {
 386                 hpet_print_force_info();
 387                 return;
 388         }
 389 
 390         d = ati_ixp4x0_rev(dev);
 391         if (d  < 0x82)
 392                 return;
 393 
 394         /* base address */
 395         pci_write_config_dword(dev, 0x14, 0xfed00000);
 396         pci_read_config_dword(dev, 0x14, &val);
 397 
 398         /* enable interrupt */
 399         outb(0x72, 0xcd6); b = inb(0xcd7);
 400         b |= 0x1;
 401         outb(0x72, 0xcd6); outb(b, 0xcd7);
 402         outb(0x72, 0xcd6); b = inb(0xcd7);
 403         if (!(b & 0x1))
 404                 return;
 405         pci_read_config_dword(dev, 0x64, &d);
 406         d |= (1<<10);
 407         pci_write_config_dword(dev, 0x64, d);
 408         pci_read_config_dword(dev, 0x64, &d);
 409         if (!(d & (1<<10)))
 410                 return;
 411 
 412         force_hpet_address = val;
 413         force_hpet_resume_type = ATI_FORCE_HPET_RESUME;
 414         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
 415                    force_hpet_address);
 416         cached_dev = dev;
 417 }
 418 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS,
 419                          ati_force_enable_hpet);
 420 
 421 /*
 422  * Undocumented chipset feature taken from LinuxBIOS.
 423  */
 424 static void nvidia_force_hpet_resume(void)
 425 {
 426         pci_write_config_dword(cached_dev, 0x44, 0xfed00001);
 427         printk(KERN_DEBUG "Force enabled HPET at resume\n");
 428 }
 429 
 430 static void nvidia_force_enable_hpet(struct pci_dev *dev)
 431 {
 432         u32 uninitialized_var(val);
 433 
 434         if (hpet_address || force_hpet_address)
 435                 return;
 436 
 437         if (!hpet_force_user) {
 438                 hpet_print_force_info();
 439                 return;
 440         }
 441 
 442         pci_write_config_dword(dev, 0x44, 0xfed00001);
 443         pci_read_config_dword(dev, 0x44, &val);
 444         force_hpet_address = val & 0xfffffffe;
 445         force_hpet_resume_type = NVIDIA_FORCE_HPET_RESUME;
 446         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at 0x%lx\n",
 447                 force_hpet_address);
 448         cached_dev = dev;
 449 }
 450 
 451 /* ISA Bridges */
 452 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0050,
 453                         nvidia_force_enable_hpet);
 454 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0051,
 455                         nvidia_force_enable_hpet);
 456 
 457 /* LPC bridges */
 458 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0260,
 459                         nvidia_force_enable_hpet);
 460 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0360,
 461                         nvidia_force_enable_hpet);
 462 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0361,
 463                         nvidia_force_enable_hpet);
 464 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0362,
 465                         nvidia_force_enable_hpet);
 466 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0363,
 467                         nvidia_force_enable_hpet);
 468 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0364,
 469                         nvidia_force_enable_hpet);
 470 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0365,
 471                         nvidia_force_enable_hpet);
 472 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0366,
 473                         nvidia_force_enable_hpet);
 474 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, 0x0367,
 475                         nvidia_force_enable_hpet);
 476 
 477 void force_hpet_resume(void)
 478 {
 479         switch (force_hpet_resume_type) {
 480         case ICH_FORCE_HPET_RESUME:
 481                 ich_force_hpet_resume();
 482                 return;
 483         case OLD_ICH_FORCE_HPET_RESUME:
 484                 old_ich_force_hpet_resume();
 485                 return;
 486         case VT8237_FORCE_HPET_RESUME:
 487                 vt8237_force_hpet_resume();
 488                 return;
 489         case NVIDIA_FORCE_HPET_RESUME:
 490                 nvidia_force_hpet_resume();
 491                 return;
 492         case ATI_FORCE_HPET_RESUME:
 493                 ati_force_hpet_resume();
 494                 return;
 495         default:
 496                 break;
 497         }
 498 }
 499 
 500 /*
 501  * According to the datasheet e6xx systems have the HPET hardwired to
 502  * 0xfed00000
 503  */
 504 static void e6xx_force_enable_hpet(struct pci_dev *dev)
 505 {
 506         if (hpet_address || force_hpet_address)
 507                 return;
 508 
 509         force_hpet_address = 0xFED00000;
 510         force_hpet_resume_type = NONE_FORCE_HPET_RESUME;
 511         dev_printk(KERN_DEBUG, &dev->dev, "Force enabled HPET at "
 512                 "0x%lx\n", force_hpet_address);
 513 }
 514 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU,
 515                          e6xx_force_enable_hpet);
 516 
 517 /*
 518  * HPET MSI on some boards (ATI SB700/SB800) has side effect on
 519  * floppy DMA. Disable HPET MSI on such platforms.
 520  * See erratum #27 (Misinterpreted MSI Requests May Result in
 521  * Corrupted LPC DMA Data) in AMD Publication #46837,
 522  * "SB700 Family Product Errata", Rev. 1.0, March 2010.
 523  */
 524 static void force_disable_hpet_msi(struct pci_dev *unused)
 525 {
 526         hpet_msi_disable = true;
 527 }
 528 
 529 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS,
 530                          force_disable_hpet_msi);
 531 
 532 #endif
 533 
 534 #if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
 535 /* Set correct numa_node information for AMD NB functions */
 536 static void quirk_amd_nb_node(struct pci_dev *dev)
 537 {
 538         struct pci_dev *nb_ht;
 539         unsigned int devfn;
 540         u32 node;
 541         u32 val;
 542 
 543         devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0);
 544         nb_ht = pci_get_slot(dev->bus, devfn);
 545         if (!nb_ht)
 546                 return;
 547 
 548         pci_read_config_dword(nb_ht, 0x60, &val);
 549         node = pcibus_to_node(dev->bus) | (val & 7);
 550         /*
 551          * Some hardware may return an invalid node ID,
 552          * so check it first:
 553          */
 554         if (node_online(node))
 555                 set_dev_node(&dev->dev, node);
 556         pci_dev_put(nb_ht);
 557 }
 558 
 559 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB,
 560                         quirk_amd_nb_node);
 561 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_ADDRMAP,
 562                         quirk_amd_nb_node);
 563 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
 564                         quirk_amd_nb_node);
 565 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC,
 566                         quirk_amd_nb_node);
 567 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_HT,
 568                         quirk_amd_nb_node);
 569 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MAP,
 570                         quirk_amd_nb_node);
 571 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_DRAM,
 572                         quirk_amd_nb_node);
 573 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC,
 574                         quirk_amd_nb_node);
 575 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
 576                         quirk_amd_nb_node);
 577 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
 578                         quirk_amd_nb_node);
 579 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
 580                         quirk_amd_nb_node);
 581 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
 582                         quirk_amd_nb_node);
 583 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
 584                         quirk_amd_nb_node);
 585 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
 586                         quirk_amd_nb_node);
 587 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
 588                         quirk_amd_nb_node);
 589 
 590 #endif
 591 
 592 #ifdef CONFIG_PCI
 593 /*
 594  * Processor does not ensure DRAM scrub read/write sequence
 595  * is atomic wrt accesses to CC6 save state area. Therefore
 596  * if a concurrent scrub read/write access is to same address
 597  * the entry may appear as if it is not written. This quirk
 598  * applies to Fam16h models 00h-0Fh
 599  *
 600  * See "Revision Guide" for AMD F16h models 00h-0fh,
 601  * document 51810 rev. 3.04, Nov 2013
 602  */
 603 static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
 604 {
 605         u32 val;
 606 
 607         /*
 608          * Suggested workaround:
 609          * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
 610          */
 611         pci_read_config_dword(dev, 0x58, &val);
 612         if (val & 0x1F) {
 613                 val &= ~(0x1F);
 614                 pci_write_config_dword(dev, 0x58, val);
 615         }
 616 
 617         pci_read_config_dword(dev, 0x5C, &val);
 618         if (val & BIT(0)) {
 619                 val &= ~BIT(0);
 620                 pci_write_config_dword(dev, 0x5c, val);
 621         }
 622 }
 623 
 624 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
 625                         amd_disable_seq_and_redirect_scrub);
 626 
 627 #if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
 628 #include <linux/jump_label.h>
 629 #include <asm/string_64.h>
 630 
 631 /* Ivy Bridge, Haswell, Broadwell */
 632 static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
 633 {
 634         u32 capid0;
 635 
 636         pci_read_config_dword(pdev, 0x84, &capid0);
 637 
 638         if (capid0 & 0x10)
 639                 static_branch_inc(&mcsafe_key);
 640 }
 641 
 642 /* Skylake */
 643 static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
 644 {
 645         u32 capid0, capid5;
 646 
 647         pci_read_config_dword(pdev, 0x84, &capid0);
 648         pci_read_config_dword(pdev, 0x98, &capid5);
 649 
 650         /*
 651          * CAPID0{7:6} indicate whether this is an advanced RAS SKU
 652          * CAPID5{8:5} indicate that various NVDIMM usage modes are
 653          * enabled, so memory machine check recovery is also enabled.
 654          */
 655         if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
 656                 static_branch_inc(&mcsafe_key);
 657 
 658 }
 659 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
 660 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
 661 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x6fc0, quirk_intel_brickland_xeon_ras_cap);
 662 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2083, quirk_intel_purley_xeon_ras_cap);
 663 #endif
 664 #endif
 665 
 666 bool x86_apple_machine;
 667 EXPORT_SYMBOL(x86_apple_machine);
 668 
 669 void __init early_platform_quirks(void)
 670 {
 671         x86_apple_machine = dmi_match(DMI_SYS_VENDOR, "Apple Inc.") ||
 672                             dmi_match(DMI_SYS_VENDOR, "Apple Computer, Inc.");
 673 }

/* [<][>][^][v][top][bottom][index][help] */