root/arch/x86/pci/xen.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. xen_pcifront_enable_irq
  2. xen_register_pirq
  3. acpi_register_gsi_xen_hvm
  4. xen_register_gsi
  5. acpi_register_gsi_xen
  6. xen_setup_msi_irqs
  7. xen_msi_compose_msg
  8. xen_hvm_setup_msi_irqs
  9. xen_initdom_setup_msi_irqs
  10. xen_initdom_restore_msi_irqs
  11. xen_teardown_msi_irqs
  12. xen_teardown_msi_irq
  13. pci_xen_init
  14. xen_msi_init
  15. pci_xen_hvm_init
  16. pci_xen_initial_domain
  17. find_device
  18. xen_find_device_domain_owner
  19. xen_register_device_domain_owner
  20. xen_unregister_device_domain_owner

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Xen PCI - handle PCI (INTx) and MSI infrastructure calls for PV, HVM and
   4  * initial domain support. We also handle the DSDT _PRT callbacks for GSI's
   5  * used in HVM and initial domain mode (PV does not parse ACPI, so it has no
   6  * concept of GSIs). Under PV we hook under the pnbbios API for IRQs and
   7  * 0xcf8 PCI configuration read/write.
   8  *
   9  *   Author: Ryan Wilson <hap9@epoch.ncsc.mil>
  10  *           Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  11  *           Stefano Stabellini <stefano.stabellini@eu.citrix.com>
  12  */
  13 #include <linux/export.h>
  14 #include <linux/init.h>
  15 #include <linux/pci.h>
  16 #include <linux/acpi.h>
  17 
  18 #include <linux/io.h>
  19 #include <asm/io_apic.h>
  20 #include <asm/pci_x86.h>
  21 
  22 #include <asm/xen/hypervisor.h>
  23 
  24 #include <xen/features.h>
  25 #include <xen/events.h>
  26 #include <asm/xen/pci.h>
  27 #include <asm/xen/cpuid.h>
  28 #include <asm/apic.h>
  29 #include <asm/i8259.h>
  30 
  31 static int xen_pcifront_enable_irq(struct pci_dev *dev)
  32 {
  33         int rc;
  34         int share = 1;
  35         int pirq;
  36         u8 gsi;
  37 
  38         rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
  39         if (rc < 0) {
  40                 dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
  41                          rc);
  42                 return rc;
  43         }
  44         /* In PV DomU the Xen PCI backend puts the PIRQ in the interrupt line.*/
  45         pirq = gsi;
  46 
  47         if (gsi < nr_legacy_irqs())
  48                 share = 0;
  49 
  50         rc = xen_bind_pirq_gsi_to_irq(gsi, pirq, share, "pcifront");
  51         if (rc < 0) {
  52                 dev_warn(&dev->dev, "Xen PCI: failed to bind GSI%d (PIRQ%d) to IRQ: %d\n",
  53                          gsi, pirq, rc);
  54                 return rc;
  55         }
  56 
  57         dev->irq = rc;
  58         dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
  59         return 0;
  60 }
  61 
  62 #ifdef CONFIG_ACPI
  63 static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
  64                              bool set_pirq)
  65 {
  66         int rc, pirq = -1, irq = -1;
  67         struct physdev_map_pirq map_irq;
  68         int shareable = 0;
  69         char *name;
  70 
  71         irq = xen_irq_from_gsi(gsi);
  72         if (irq > 0)
  73                 return irq;
  74 
  75         if (set_pirq)
  76                 pirq = gsi;
  77 
  78         map_irq.domid = DOMID_SELF;
  79         map_irq.type = MAP_PIRQ_TYPE_GSI;
  80         map_irq.index = gsi;
  81         map_irq.pirq = pirq;
  82 
  83         rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
  84         if (rc) {
  85                 printk(KERN_WARNING "xen map irq failed %d\n", rc);
  86                 return -1;
  87         }
  88 
  89         if (triggering == ACPI_EDGE_SENSITIVE) {
  90                 shareable = 0;
  91                 name = "ioapic-edge";
  92         } else {
  93                 shareable = 1;
  94                 name = "ioapic-level";
  95         }
  96 
  97         if (gsi_override >= 0)
  98                 gsi = gsi_override;
  99 
 100         irq = xen_bind_pirq_gsi_to_irq(gsi, map_irq.pirq, shareable, name);
 101         if (irq < 0)
 102                 goto out;
 103 
 104         printk(KERN_DEBUG "xen: --> pirq=%d -> irq=%d (gsi=%d)\n", map_irq.pirq, irq, gsi);
 105 out:
 106         return irq;
 107 }
 108 
 109 static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
 110                                      int trigger, int polarity)
 111 {
 112         if (!xen_hvm_domain())
 113                 return -1;
 114 
 115         return xen_register_pirq(gsi, -1 /* no GSI override */, trigger,
 116                                  false /* no mapping of GSI to PIRQ */);
 117 }
 118 
 119 #ifdef CONFIG_XEN_DOM0
 120 static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
 121 {
 122         int rc, irq;
 123         struct physdev_setup_gsi setup_gsi;
 124 
 125         if (!xen_pv_domain())
 126                 return -1;
 127 
 128         printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
 129                         gsi, triggering, polarity);
 130 
 131         irq = xen_register_pirq(gsi, gsi_override, triggering, true);
 132 
 133         setup_gsi.gsi = gsi;
 134         setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
 135         setup_gsi.polarity = (polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
 136 
 137         rc = HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
 138         if (rc == -EEXIST)
 139                 printk(KERN_INFO "Already setup the GSI :%d\n", gsi);
 140         else if (rc) {
 141                 printk(KERN_ERR "Failed to setup GSI :%d, err_code:%d\n",
 142                                 gsi, rc);
 143         }
 144 
 145         return irq;
 146 }
 147 
 148 static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
 149                                  int trigger, int polarity)
 150 {
 151         return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
 152 }
 153 #endif
 154 #endif
 155 
 156 #if defined(CONFIG_PCI_MSI)
 157 #include <linux/msi.h>
 158 #include <asm/msidef.h>
 159 
 160 struct xen_pci_frontend_ops *xen_pci_frontend;
 161 EXPORT_SYMBOL_GPL(xen_pci_frontend);
 162 
 163 static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 164 {
 165         int irq, ret, i;
 166         struct msi_desc *msidesc;
 167         int *v;
 168 
 169         if (type == PCI_CAP_ID_MSI && nvec > 1)
 170                 return 1;
 171 
 172         v = kcalloc(max(1, nvec), sizeof(int), GFP_KERNEL);
 173         if (!v)
 174                 return -ENOMEM;
 175 
 176         if (type == PCI_CAP_ID_MSIX)
 177                 ret = xen_pci_frontend_enable_msix(dev, v, nvec);
 178         else
 179                 ret = xen_pci_frontend_enable_msi(dev, v);
 180         if (ret)
 181                 goto error;
 182         i = 0;
 183         for_each_pci_msi_entry(msidesc, dev) {
 184                 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
 185                                                (type == PCI_CAP_ID_MSI) ? nvec : 1,
 186                                                (type == PCI_CAP_ID_MSIX) ?
 187                                                "pcifront-msi-x" :
 188                                                "pcifront-msi",
 189                                                 DOMID_SELF);
 190                 if (irq < 0) {
 191                         ret = irq;
 192                         goto free;
 193                 }
 194                 i++;
 195         }
 196         kfree(v);
 197         return 0;
 198 
 199 error:
 200         if (ret == -ENOSYS)
 201                 dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
 202         else if (ret)
 203                 dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
 204 free:
 205         kfree(v);
 206         return ret;
 207 }
 208 
 209 #define XEN_PIRQ_MSI_DATA  (MSI_DATA_TRIGGER_EDGE | \
 210                 MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
 211 
 212 static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
 213                 struct msi_msg *msg)
 214 {
 215         /* We set vector == 0 to tell the hypervisor we don't care about it,
 216          * but we want a pirq setup instead.
 217          * We use the dest_id field to pass the pirq that we want. */
 218         msg->address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(pirq);
 219         msg->address_lo =
 220                 MSI_ADDR_BASE_LO |
 221                 MSI_ADDR_DEST_MODE_PHYSICAL |
 222                 MSI_ADDR_REDIRECTION_CPU |
 223                 MSI_ADDR_DEST_ID(pirq);
 224 
 225         msg->data = XEN_PIRQ_MSI_DATA;
 226 }
 227 
 228 static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 229 {
 230         int irq, pirq;
 231         struct msi_desc *msidesc;
 232         struct msi_msg msg;
 233 
 234         if (type == PCI_CAP_ID_MSI && nvec > 1)
 235                 return 1;
 236 
 237         for_each_pci_msi_entry(msidesc, dev) {
 238                 pirq = xen_allocate_pirq_msi(dev, msidesc);
 239                 if (pirq < 0) {
 240                         irq = -ENODEV;
 241                         goto error;
 242                 }
 243                 xen_msi_compose_msg(dev, pirq, &msg);
 244                 __pci_write_msi_msg(msidesc, &msg);
 245                 dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
 246                 irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
 247                                                (type == PCI_CAP_ID_MSI) ? nvec : 1,
 248                                                (type == PCI_CAP_ID_MSIX) ?
 249                                                "msi-x" : "msi",
 250                                                DOMID_SELF);
 251                 if (irq < 0)
 252                         goto error;
 253                 dev_dbg(&dev->dev,
 254                         "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq);
 255         }
 256         return 0;
 257 
 258 error:
 259         dev_err(&dev->dev, "Failed to create MSI%s! ret=%d!\n",
 260                 type == PCI_CAP_ID_MSI ? "" : "-X", irq);
 261         return irq;
 262 }
 263 
 264 #ifdef CONFIG_XEN_DOM0
 265 static bool __read_mostly pci_seg_supported = true;
 266 
 267 static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 268 {
 269         int ret = 0;
 270         struct msi_desc *msidesc;
 271 
 272         for_each_pci_msi_entry(msidesc, dev) {
 273                 struct physdev_map_pirq map_irq;
 274                 domid_t domid;
 275 
 276                 domid = ret = xen_find_device_domain_owner(dev);
 277                 /* N.B. Casting int's -ENODEV to uint16_t results in 0xFFED,
 278                  * hence check ret value for < 0. */
 279                 if (ret < 0)
 280                         domid = DOMID_SELF;
 281 
 282                 memset(&map_irq, 0, sizeof(map_irq));
 283                 map_irq.domid = domid;
 284                 map_irq.type = MAP_PIRQ_TYPE_MSI_SEG;
 285                 map_irq.index = -1;
 286                 map_irq.pirq = -1;
 287                 map_irq.bus = dev->bus->number |
 288                               (pci_domain_nr(dev->bus) << 16);
 289                 map_irq.devfn = dev->devfn;
 290 
 291                 if (type == PCI_CAP_ID_MSI && nvec > 1) {
 292                         map_irq.type = MAP_PIRQ_TYPE_MULTI_MSI;
 293                         map_irq.entry_nr = nvec;
 294                 } else if (type == PCI_CAP_ID_MSIX) {
 295                         int pos;
 296                         unsigned long flags;
 297                         u32 table_offset, bir;
 298 
 299                         pos = dev->msix_cap;
 300                         pci_read_config_dword(dev, pos + PCI_MSIX_TABLE,
 301                                               &table_offset);
 302                         bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR);
 303                         flags = pci_resource_flags(dev, bir);
 304                         if (!flags || (flags & IORESOURCE_UNSET))
 305                                 return -EINVAL;
 306 
 307                         map_irq.table_base = pci_resource_start(dev, bir);
 308                         map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
 309                 }
 310 
 311                 ret = -EINVAL;
 312                 if (pci_seg_supported)
 313                         ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
 314                                                     &map_irq);
 315                 if (type == PCI_CAP_ID_MSI && nvec > 1 && ret) {
 316                         /*
 317                          * If MAP_PIRQ_TYPE_MULTI_MSI is not available
 318                          * there's nothing else we can do in this case.
 319                          * Just set ret > 0 so driver can retry with
 320                          * single MSI.
 321                          */
 322                         ret = 1;
 323                         goto out;
 324                 }
 325                 if (ret == -EINVAL && !pci_domain_nr(dev->bus)) {
 326                         map_irq.type = MAP_PIRQ_TYPE_MSI;
 327                         map_irq.index = -1;
 328                         map_irq.pirq = -1;
 329                         map_irq.bus = dev->bus->number;
 330                         ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq,
 331                                                     &map_irq);
 332                         if (ret != -EINVAL)
 333                                 pci_seg_supported = false;
 334                 }
 335                 if (ret) {
 336                         dev_warn(&dev->dev, "xen map irq failed %d for %d domain\n",
 337                                  ret, domid);
 338                         goto out;
 339                 }
 340 
 341                 ret = xen_bind_pirq_msi_to_irq(dev, msidesc, map_irq.pirq,
 342                                                (type == PCI_CAP_ID_MSI) ? nvec : 1,
 343                                                (type == PCI_CAP_ID_MSIX) ? "msi-x" : "msi",
 344                                                domid);
 345                 if (ret < 0)
 346                         goto out;
 347         }
 348         ret = 0;
 349 out:
 350         return ret;
 351 }
 352 
 353 static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
 354 {
 355         int ret = 0;
 356 
 357         if (pci_seg_supported) {
 358                 struct physdev_pci_device restore_ext;
 359 
 360                 restore_ext.seg = pci_domain_nr(dev->bus);
 361                 restore_ext.bus = dev->bus->number;
 362                 restore_ext.devfn = dev->devfn;
 363                 ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi_ext,
 364                                         &restore_ext);
 365                 if (ret == -ENOSYS)
 366                         pci_seg_supported = false;
 367                 WARN(ret && ret != -ENOSYS, "restore_msi_ext -> %d\n", ret);
 368         }
 369         if (!pci_seg_supported) {
 370                 struct physdev_restore_msi restore;
 371 
 372                 restore.bus = dev->bus->number;
 373                 restore.devfn = dev->devfn;
 374                 ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
 375                 WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
 376         }
 377 }
 378 #endif
 379 
 380 static void xen_teardown_msi_irqs(struct pci_dev *dev)
 381 {
 382         struct msi_desc *msidesc;
 383 
 384         msidesc = first_pci_msi_entry(dev);
 385         if (msidesc->msi_attrib.is_msix)
 386                 xen_pci_frontend_disable_msix(dev);
 387         else
 388                 xen_pci_frontend_disable_msi(dev);
 389 
 390         /* Free the IRQ's and the msidesc using the generic code. */
 391         default_teardown_msi_irqs(dev);
 392 }
 393 
 394 static void xen_teardown_msi_irq(unsigned int irq)
 395 {
 396         xen_destroy_irq(irq);
 397 }
 398 
 399 #endif
 400 
 401 int __init pci_xen_init(void)
 402 {
 403         if (!xen_pv_domain() || xen_initial_domain())
 404                 return -ENODEV;
 405 
 406         printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
 407 
 408         pcibios_set_cache_line_size();
 409 
 410         pcibios_enable_irq = xen_pcifront_enable_irq;
 411         pcibios_disable_irq = NULL;
 412 
 413         /* Keep ACPI out of the picture */
 414         acpi_noirq_set();
 415 
 416 #ifdef CONFIG_PCI_MSI
 417         x86_msi.setup_msi_irqs = xen_setup_msi_irqs;
 418         x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
 419         x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs;
 420         pci_msi_ignore_mask = 1;
 421 #endif
 422         return 0;
 423 }
 424 
 425 #ifdef CONFIG_PCI_MSI
 426 void __init xen_msi_init(void)
 427 {
 428         if (!disable_apic) {
 429                 /*
 430                  * If hardware supports (x2)APIC virtualization (as indicated
 431                  * by hypervisor's leaf 4) then we don't need to use pirqs/
 432                  * event channels for MSI handling and instead use regular
 433                  * APIC processing
 434                  */
 435                 uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
 436 
 437                 if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
 438                     ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && boot_cpu_has(X86_FEATURE_APIC)))
 439                         return;
 440         }
 441 
 442         x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
 443         x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
 444 }
 445 #endif
 446 
 447 int __init pci_xen_hvm_init(void)
 448 {
 449         if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
 450                 return 0;
 451 
 452 #ifdef CONFIG_ACPI
 453         /*
 454          * We don't want to change the actual ACPI delivery model,
 455          * just how GSIs get registered.
 456          */
 457         __acpi_register_gsi = acpi_register_gsi_xen_hvm;
 458         __acpi_unregister_gsi = NULL;
 459 #endif
 460 
 461 #ifdef CONFIG_PCI_MSI
 462         /*
 463          * We need to wait until after x2apic is initialized
 464          * before we can set MSI IRQ ops.
 465          */
 466         x86_platform.apic_post_init = xen_msi_init;
 467 #endif
 468         return 0;
 469 }
 470 
 471 #ifdef CONFIG_XEN_DOM0
 472 int __init pci_xen_initial_domain(void)
 473 {
 474         int irq;
 475 
 476 #ifdef CONFIG_PCI_MSI
 477         x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs;
 478         x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
 479         x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
 480         pci_msi_ignore_mask = 1;
 481 #endif
 482         __acpi_register_gsi = acpi_register_gsi_xen;
 483         __acpi_unregister_gsi = NULL;
 484         /*
 485          * Pre-allocate the legacy IRQs.  Use NR_LEGACY_IRQS here
 486          * because we don't have a PIC and thus nr_legacy_irqs() is zero.
 487          */
 488         for (irq = 0; irq < NR_IRQS_LEGACY; irq++) {
 489                 int trigger, polarity;
 490 
 491                 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
 492                         continue;
 493 
 494                 xen_register_pirq(irq, -1 /* no GSI override */,
 495                         trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE,
 496                         true /* Map GSI to PIRQ */);
 497         }
 498         if (0 == nr_ioapics) {
 499                 for (irq = 0; irq < nr_legacy_irqs(); irq++)
 500                         xen_bind_pirq_gsi_to_irq(irq, irq, 0, "xt-pic");
 501         }
 502         return 0;
 503 }
 504 
 505 struct xen_device_domain_owner {
 506         domid_t domain;
 507         struct pci_dev *dev;
 508         struct list_head list;
 509 };
 510 
 511 static DEFINE_SPINLOCK(dev_domain_list_spinlock);
 512 static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
 513 
 514 static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
 515 {
 516         struct xen_device_domain_owner *owner;
 517 
 518         list_for_each_entry(owner, &dev_domain_list, list) {
 519                 if (owner->dev == dev)
 520                         return owner;
 521         }
 522         return NULL;
 523 }
 524 
 525 int xen_find_device_domain_owner(struct pci_dev *dev)
 526 {
 527         struct xen_device_domain_owner *owner;
 528         int domain = -ENODEV;
 529 
 530         spin_lock(&dev_domain_list_spinlock);
 531         owner = find_device(dev);
 532         if (owner)
 533                 domain = owner->domain;
 534         spin_unlock(&dev_domain_list_spinlock);
 535         return domain;
 536 }
 537 EXPORT_SYMBOL_GPL(xen_find_device_domain_owner);
 538 
 539 int xen_register_device_domain_owner(struct pci_dev *dev, uint16_t domain)
 540 {
 541         struct xen_device_domain_owner *owner;
 542 
 543         owner = kzalloc(sizeof(struct xen_device_domain_owner), GFP_KERNEL);
 544         if (!owner)
 545                 return -ENODEV;
 546 
 547         spin_lock(&dev_domain_list_spinlock);
 548         if (find_device(dev)) {
 549                 spin_unlock(&dev_domain_list_spinlock);
 550                 kfree(owner);
 551                 return -EEXIST;
 552         }
 553         owner->domain = domain;
 554         owner->dev = dev;
 555         list_add_tail(&owner->list, &dev_domain_list);
 556         spin_unlock(&dev_domain_list_spinlock);
 557         return 0;
 558 }
 559 EXPORT_SYMBOL_GPL(xen_register_device_domain_owner);
 560 
 561 int xen_unregister_device_domain_owner(struct pci_dev *dev)
 562 {
 563         struct xen_device_domain_owner *owner;
 564 
 565         spin_lock(&dev_domain_list_spinlock);
 566         owner = find_device(dev);
 567         if (!owner) {
 568                 spin_unlock(&dev_domain_list_spinlock);
 569                 return -ENODEV;
 570         }
 571         list_del(&owner->list);
 572         spin_unlock(&dev_domain_list_spinlock);
 573         kfree(owner);
 574         return 0;
 575 }
 576 EXPORT_SYMBOL_GPL(xen_unregister_device_domain_owner);
 577 #endif

/* [<][>][^][v][top][bottom][index][help] */