vmd 111 drivers/pci/controller/vmd.c static inline unsigned int index_from_irqs(struct vmd_dev *vmd, vmd 114 drivers/pci/controller/vmd.c return irqs - vmd->irqs; vmd 129 drivers/pci/controller/vmd.c struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); vmd 133 drivers/pci/controller/vmd.c MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); vmd 197 drivers/pci/controller/vmd.c static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) vmd 202 drivers/pci/controller/vmd.c if (vmd->msix_count == 1) vmd 203 drivers/pci/controller/vmd.c return &vmd->irqs[0]; vmd 213 drivers/pci/controller/vmd.c return &vmd->irqs[0]; vmd 217 drivers/pci/controller/vmd.c for (i = 1; i < vmd->msix_count; i++) vmd 218 drivers/pci/controller/vmd.c if (vmd->irqs[i].count < vmd->irqs[best].count) vmd 220 drivers/pci/controller/vmd.c vmd->irqs[best].count++; vmd 223 drivers/pci/controller/vmd.c return &vmd->irqs[best]; vmd 231 drivers/pci/controller/vmd.c struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); vmd 239 drivers/pci/controller/vmd.c vmdirq->irq = vmd_next_irq(vmd, desc); vmd 241 drivers/pci/controller/vmd.c index = index_from_irqs(vmd, vmdirq->irq); vmd 242 drivers/pci/controller/vmd.c vector = pci_irq_vector(vmd->dev, index); vmd 245 drivers/pci/controller/vmd.c handle_untracked_irq, vmd, NULL); vmd 269 drivers/pci/controller/vmd.c struct vmd_dev *vmd = vmd_from_bus(pdev->bus); vmd 271 drivers/pci/controller/vmd.c if (nvec > vmd->msix_count) vmd 272 drivers/pci/controller/vmd.c return vmd->msix_count; vmd 306 drivers/pci/controller/vmd.c struct vmd_dev *vmd = vmd_from_bus(pdev->bus); vmd 308 drivers/pci/controller/vmd.c return &vmd->dev->dev; vmd 400 drivers/pci/controller/vmd.c static void vmd_teardown_dma_ops(struct vmd_dev *vmd) vmd 402 drivers/pci/controller/vmd.c struct dma_domain *domain = &vmd->dma_domain; vmd 404 drivers/pci/controller/vmd.c if (get_dma_ops(&vmd->dev->dev)) vmd 414 drivers/pci/controller/vmd.c static void vmd_setup_dma_ops(struct vmd_dev *vmd) vmd 416 drivers/pci/controller/vmd.c const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); vmd 417 drivers/pci/controller/vmd.c struct dma_map_ops *dest = &vmd->dma_ops; vmd 418 drivers/pci/controller/vmd.c struct dma_domain *domain = &vmd->dma_domain; vmd 420 drivers/pci/controller/vmd.c domain->domain_nr = vmd->sysdata.domain; vmd 443 drivers/pci/controller/vmd.c static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, vmd 446 drivers/pci/controller/vmd.c char __iomem *addr = vmd->cfgbar + vmd 447 drivers/pci/controller/vmd.c ((bus->number - vmd->busn_start) << 20) + vmd 450 drivers/pci/controller/vmd.c if ((addr - vmd->cfgbar) + len >= vmd 451 drivers/pci/controller/vmd.c resource_size(&vmd->dev->resource[VMD_CFGBAR])) vmd 464 drivers/pci/controller/vmd.c struct vmd_dev *vmd = vmd_from_bus(bus); vmd 465 drivers/pci/controller/vmd.c char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); vmd 472 drivers/pci/controller/vmd.c spin_lock_irqsave(&vmd->cfg_lock, flags); vmd 487 drivers/pci/controller/vmd.c spin_unlock_irqrestore(&vmd->cfg_lock, flags); vmd 499 drivers/pci/controller/vmd.c struct vmd_dev *vmd = vmd_from_bus(bus); vmd 500 drivers/pci/controller/vmd.c char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); vmd 507 drivers/pci/controller/vmd.c spin_lock_irqsave(&vmd->cfg_lock, flags); vmd 525 drivers/pci/controller/vmd.c spin_unlock_irqrestore(&vmd->cfg_lock, flags); vmd 534 drivers/pci/controller/vmd.c static void vmd_attach_resources(struct vmd_dev *vmd) vmd 536 drivers/pci/controller/vmd.c vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; vmd 537 drivers/pci/controller/vmd.c vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; vmd 540 drivers/pci/controller/vmd.c static void vmd_detach_resources(struct vmd_dev *vmd) vmd 542 drivers/pci/controller/vmd.c vmd->dev->resource[VMD_MEMBAR1].child = NULL; vmd 543 drivers/pci/controller/vmd.c vmd->dev->resource[VMD_MEMBAR2].child = NULL; vmd 562 drivers/pci/controller/vmd.c static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) vmd 564 drivers/pci/controller/vmd.c struct pci_sysdata *sd = &vmd->sysdata; vmd 585 drivers/pci/controller/vmd.c ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); vmd 592 drivers/pci/controller/vmd.c membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); vmd 595 drivers/pci/controller/vmd.c offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - vmd 597 drivers/pci/controller/vmd.c offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - vmd 599 drivers/pci/controller/vmd.c pci_iounmap(vmd->dev, membar2); vmd 610 drivers/pci/controller/vmd.c pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); vmd 611 drivers/pci/controller/vmd.c pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); vmd 614 drivers/pci/controller/vmd.c vmd->busn_start = 128; vmd 617 drivers/pci/controller/vmd.c res = &vmd->dev->resource[VMD_CFGBAR]; vmd 618 drivers/pci/controller/vmd.c vmd->resources[0] = (struct resource) { vmd 620 drivers/pci/controller/vmd.c .start = vmd->busn_start, vmd 621 drivers/pci/controller/vmd.c .end = vmd->busn_start + (resource_size(res) >> 20) - 1, vmd 642 drivers/pci/controller/vmd.c res = &vmd->dev->resource[VMD_MEMBAR1]; vmd 647 drivers/pci/controller/vmd.c vmd->resources[1] = (struct resource) { vmd 655 drivers/pci/controller/vmd.c res = &vmd->dev->resource[VMD_MEMBAR2]; vmd 660 drivers/pci/controller/vmd.c vmd->resources[2] = (struct resource) { vmd 673 drivers/pci/controller/vmd.c sd->node = pcibus_to_node(vmd->dev->bus); vmd 675 drivers/pci/controller/vmd.c fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); vmd 679 drivers/pci/controller/vmd.c vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, vmd 682 drivers/pci/controller/vmd.c if (!vmd->irq_domain) vmd 685 drivers/pci/controller/vmd.c pci_add_resource(&resources, &vmd->resources[0]); vmd 686 drivers/pci/controller/vmd.c pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); vmd 687 drivers/pci/controller/vmd.c pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); vmd 689 drivers/pci/controller/vmd.c vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start, vmd 691 drivers/pci/controller/vmd.c if (!vmd->bus) { vmd 693 drivers/pci/controller/vmd.c irq_domain_remove(vmd->irq_domain); vmd 697 drivers/pci/controller/vmd.c vmd_attach_resources(vmd); vmd 698 drivers/pci/controller/vmd.c vmd_setup_dma_ops(vmd); vmd 699 drivers/pci/controller/vmd.c dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); vmd 701 drivers/pci/controller/vmd.c pci_scan_child_bus(vmd->bus); vmd 702 drivers/pci/controller/vmd.c pci_assign_unassigned_bus_resources(vmd->bus); vmd 709 drivers/pci/controller/vmd.c list_for_each_entry(child, &vmd->bus->children, node) vmd 712 drivers/pci/controller/vmd.c pci_bus_add_devices(vmd->bus); vmd 714 drivers/pci/controller/vmd.c WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, vmd 735 drivers/pci/controller/vmd.c struct vmd_dev *vmd; vmd 741 drivers/pci/controller/vmd.c vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); vmd 742 drivers/pci/controller/vmd.c if (!vmd) vmd 745 drivers/pci/controller/vmd.c vmd->dev = dev; vmd 750 drivers/pci/controller/vmd.c vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); vmd 751 drivers/pci/controller/vmd.c if (!vmd->cfgbar) vmd 759 drivers/pci/controller/vmd.c vmd->msix_count = pci_msix_vec_count(dev); vmd 760 drivers/pci/controller/vmd.c if (vmd->msix_count < 0) vmd 763 drivers/pci/controller/vmd.c vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, vmd 765 drivers/pci/controller/vmd.c if (vmd->msix_count < 0) vmd 766 drivers/pci/controller/vmd.c return vmd->msix_count; vmd 768 drivers/pci/controller/vmd.c vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), vmd 770 drivers/pci/controller/vmd.c if (!vmd->irqs) vmd 773 drivers/pci/controller/vmd.c for (i = 0; i < vmd->msix_count; i++) { vmd 774 drivers/pci/controller/vmd.c err = init_srcu_struct(&vmd->irqs[i].srcu); vmd 778 drivers/pci/controller/vmd.c INIT_LIST_HEAD(&vmd->irqs[i].irq_list); vmd 781 drivers/pci/controller/vmd.c "vmd", &vmd->irqs[i]); vmd 786 drivers/pci/controller/vmd.c spin_lock_init(&vmd->cfg_lock); vmd 787 drivers/pci/controller/vmd.c pci_set_drvdata(dev, vmd); vmd 788 drivers/pci/controller/vmd.c err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); vmd 792 drivers/pci/controller/vmd.c dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", vmd 793 drivers/pci/controller/vmd.c vmd->sysdata.domain); vmd 797 drivers/pci/controller/vmd.c static void vmd_cleanup_srcu(struct vmd_dev *vmd) vmd 801 drivers/pci/controller/vmd.c for (i = 0; i < vmd->msix_count; i++) vmd 802 drivers/pci/controller/vmd.c cleanup_srcu_struct(&vmd->irqs[i].srcu); vmd 807 drivers/pci/controller/vmd.c struct vmd_dev *vmd = pci_get_drvdata(dev); vmd 809 drivers/pci/controller/vmd.c sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); vmd 810 drivers/pci/controller/vmd.c pci_stop_root_bus(vmd->bus); vmd 811 drivers/pci/controller/vmd.c pci_remove_root_bus(vmd->bus); vmd 812 drivers/pci/controller/vmd.c vmd_cleanup_srcu(vmd); vmd 813 drivers/pci/controller/vmd.c vmd_teardown_dma_ops(vmd); vmd 814 drivers/pci/controller/vmd.c vmd_detach_resources(vmd); vmd 815 drivers/pci/controller/vmd.c irq_domain_remove(vmd->irq_domain); vmd 822 drivers/pci/controller/vmd.c struct vmd_dev *vmd = pci_get_drvdata(pdev); vmd 825 drivers/pci/controller/vmd.c for (i = 0; i < vmd->msix_count; i++) vmd 826 drivers/pci/controller/vmd.c devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); vmd 835 drivers/pci/controller/vmd.c struct vmd_dev *vmd = pci_get_drvdata(pdev); vmd 838 drivers/pci/controller/vmd.c for (i = 0; i < vmd->msix_count; i++) { vmd 841 drivers/pci/controller/vmd.c "vmd", &vmd->irqs[i]);