Lines Matching refs:pe
59 static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level, in pe_level_printk() argument
71 if (pe->flags & PNV_IODA_PE_DEV) in pe_level_printk()
72 strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix)); in pe_level_printk()
73 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pe_level_printk()
75 pci_domain_nr(pe->pbus), pe->pbus->number); in pe_level_printk()
77 else if (pe->flags & PNV_IODA_PE_VF) in pe_level_printk()
79 pci_domain_nr(pe->parent_dev->bus), in pe_level_printk()
80 (pe->rid & 0xff00) >> 8, in pe_level_printk()
81 PCI_SLOT(pe->rid), PCI_FUNC(pe->rid)); in pe_level_printk()
85 level, pfix, pe->pe_number, &vaf); in pe_level_printk()
90 #define pe_err(pe, fmt, ...) \ argument
91 pe_level_printk(pe, KERN_ERR, fmt, ##__VA_ARGS__)
92 #define pe_warn(pe, fmt, ...) \ argument
93 pe_level_printk(pe, KERN_WARNING, fmt, ##__VA_ARGS__)
94 #define pe_info(pe, fmt, ...) \ argument
95 pe_level_printk(pe, KERN_INFO, fmt, ##__VA_ARGS__)
153 unsigned long pe; in pnv_ioda_alloc_pe() local
156 pe = find_next_zero_bit(phb->ioda.pe_alloc, in pnv_ioda_alloc_pe()
158 if (pe >= phb->ioda.total_pe) in pnv_ioda_alloc_pe()
160 } while(test_and_set_bit(pe, phb->ioda.pe_alloc)); in pnv_ioda_alloc_pe()
162 phb->ioda.pe_array[pe].phb = phb; in pnv_ioda_alloc_pe()
163 phb->ioda.pe_array[pe].pe_number = pe; in pnv_ioda_alloc_pe()
164 return pe; in pnv_ioda_alloc_pe()
167 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe) in pnv_ioda_free_pe() argument
169 WARN_ON(phb->ioda.pe_array[pe].pdev); in pnv_ioda_free_pe()
171 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe)); in pnv_ioda_free_pe()
172 clear_bit(pe, phb->ioda.pe_alloc); in pnv_ioda_free_pe()
278 struct pnv_ioda_pe *master_pe, *pe; in pnv_ioda2_pick_m64_pe() local
316 pe = &phb->ioda.pe_array[i]; in pnv_ioda2_pick_m64_pe()
319 pe->flags |= PNV_IODA_PE_MASTER; in pnv_ioda2_pick_m64_pe()
320 INIT_LIST_HEAD(&pe->slaves); in pnv_ioda2_pick_m64_pe()
321 master_pe = pe; in pnv_ioda2_pick_m64_pe()
323 pe->flags |= PNV_IODA_PE_SLAVE; in pnv_ioda2_pick_m64_pe()
324 pe->master = master_pe; in pnv_ioda2_pick_m64_pe()
325 list_add_tail(&pe->list, &master_pe->slaves); in pnv_ioda2_pick_m64_pe()
382 struct pnv_ioda_pe *pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_freeze_pe() local
387 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_freeze_pe()
388 pe = pe->master; in pnv_ioda_freeze_pe()
389 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER))) in pnv_ioda_freeze_pe()
392 pe_no = pe->pe_number; in pnv_ioda_freeze_pe()
406 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_freeze_pe()
409 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_freeze_pe()
422 struct pnv_ioda_pe *pe, *slave; in pnv_ioda_unfreeze_pe() local
426 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_unfreeze_pe()
427 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_unfreeze_pe()
428 pe = pe->master; in pnv_ioda_unfreeze_pe()
429 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_unfreeze_pe()
430 pe_no = pe->pe_number; in pnv_ioda_unfreeze_pe()
441 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_unfreeze_pe()
445 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_unfreeze_pe()
462 struct pnv_ioda_pe *slave, *pe; in pnv_ioda_get_pe_state() local
475 pe = &phb->ioda.pe_array[pe_no]; in pnv_ioda_get_pe_state()
476 if (pe->flags & PNV_IODA_PE_SLAVE) { in pnv_ioda_get_pe_state()
477 pe = pe->master; in pnv_ioda_get_pe_state()
478 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); in pnv_ioda_get_pe_state()
479 pe_no = pe->pe_number; in pnv_ioda_get_pe_state()
494 if (!(pe->flags & PNV_IODA_PE_MASTER)) in pnv_ioda_get_pe_state()
497 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_get_pe_state()
578 struct pnv_ioda_pe *pe, in pnv_ioda_set_peltv() argument
590 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_set_peltv()
592 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
593 list_for_each_entry(slave, &pe->slaves, list) in pnv_ioda_set_peltv()
606 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add); in pnv_ioda_set_peltv()
611 if (pe->flags & PNV_IODA_PE_MASTER) { in pnv_ioda_set_peltv()
612 list_for_each_entry(slave, &pe->slaves, list) { in pnv_ioda_set_peltv()
613 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add); in pnv_ioda_set_peltv()
619 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS)) in pnv_ioda_set_peltv()
620 pdev = pe->pbus->self; in pnv_ioda_set_peltv()
621 else if (pe->flags & PNV_IODA_PE_DEV) in pnv_ioda_set_peltv()
622 pdev = pe->pdev->bus->self; in pnv_ioda_set_peltv()
624 else if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_set_peltv()
625 pdev = pe->parent_dev; in pnv_ioda_set_peltv()
633 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add); in pnv_ioda_set_peltv()
645 static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) in pnv_ioda_deconfigure_pe() argument
653 if (pe->pbus) { in pnv_ioda_deconfigure_pe()
658 parent = pe->pbus->self; in pnv_ioda_deconfigure_pe()
659 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_deconfigure_pe()
660 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; in pnv_ioda_deconfigure_pe()
672 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_deconfigure_pe()
677 rid_end = pe->rid + (count << 8); in pnv_ioda_deconfigure_pe()
679 if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_deconfigure_pe()
680 parent = pe->parent_dev; in pnv_ioda_deconfigure_pe()
682 parent = pe->pdev->bus->self; in pnv_ioda_deconfigure_pe()
686 rid_end = pe->rid + 1; in pnv_ioda_deconfigure_pe()
690 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_deconfigure_pe()
698 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); in pnv_ioda_deconfigure_pe()
704 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number, in pnv_ioda_deconfigure_pe()
708 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, in pnv_ioda_deconfigure_pe()
709 pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN); in pnv_ioda_deconfigure_pe()
711 pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc); in pnv_ioda_deconfigure_pe()
712 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_deconfigure_pe()
715 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); in pnv_ioda_deconfigure_pe()
717 pe->pbus = NULL; in pnv_ioda_deconfigure_pe()
718 pe->pdev = NULL; in pnv_ioda_deconfigure_pe()
719 pe->parent_dev = NULL; in pnv_ioda_deconfigure_pe()
725 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) in pnv_ioda_configure_pe() argument
732 if (pe->pbus) { in pnv_ioda_configure_pe()
737 parent = pe->pbus->self; in pnv_ioda_configure_pe()
738 if (pe->flags & PNV_IODA_PE_BUS_ALL) in pnv_ioda_configure_pe()
739 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1; in pnv_ioda_configure_pe()
751 dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n", in pnv_ioda_configure_pe()
756 rid_end = pe->rid + (count << 8); in pnv_ioda_configure_pe()
759 if (pe->flags & PNV_IODA_PE_VF) in pnv_ioda_configure_pe()
760 parent = pe->parent_dev; in pnv_ioda_configure_pe()
763 parent = pe->pdev->bus->self; in pnv_ioda_configure_pe()
767 rid_end = pe->rid + 1; in pnv_ioda_configure_pe()
776 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid, in pnv_ioda_configure_pe()
779 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc); in pnv_ioda_configure_pe()
784 pnv_ioda_set_peltv(phb, pe, true); in pnv_ioda_configure_pe()
787 for (rid = pe->rid; rid < rid_end; rid++) in pnv_ioda_configure_pe()
788 phb->ioda.pe_rmap[rid] = pe->pe_number; in pnv_ioda_configure_pe()
792 pe->mve_number = 0; in pnv_ioda_configure_pe()
796 pe->mve_number = pe->pe_number; in pnv_ioda_configure_pe()
797 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number); in pnv_ioda_configure_pe()
799 pe_err(pe, "OPAL error %ld setting up MVE %d\n", in pnv_ioda_configure_pe()
800 rc, pe->mve_number); in pnv_ioda_configure_pe()
801 pe->mve_number = -1; in pnv_ioda_configure_pe()
804 pe->mve_number, OPAL_ENABLE_MVE); in pnv_ioda_configure_pe()
806 pe_err(pe, "OPAL error %ld enabling MVE %d\n", in pnv_ioda_configure_pe()
807 rc, pe->mve_number); in pnv_ioda_configure_pe()
808 pe->mve_number = -1; in pnv_ioda_configure_pe()
817 struct pnv_ioda_pe *pe) in pnv_ioda_link_pe_by_weight() argument
822 if (lpe->dma_weight < pe->dma_weight) { in pnv_ioda_link_pe_by_weight()
823 list_add_tail(&pe->dma_link, &lpe->dma_link); in pnv_ioda_link_pe_by_weight()
827 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list); in pnv_ioda_link_pe_by_weight()
933 struct pnv_ioda_pe *pe;
962 pe = &phb->ioda.pe_array[pe_num];
966 pe->pdev = dev;
967 pe->pbus = NULL;
968 pe->tce32_seg = -1;
969 pe->mve_number = -1;
970 pe->rid = dev->bus->number << 8 | pdn->devfn;
972 pe_info(pe, "Associated device to PE\n");
974 if (pnv_ioda_configure_pe(phb, pe)) {
979 pe->pdev = NULL;
985 pe->dma_weight = pnv_ioda_dma_weight(dev);
986 if (pe->dma_weight != 0) {
987 phb->ioda.dma_weight += pe->dma_weight;
992 pnv_ioda_link_pe_by_weight(phb, pe);
994 return pe;
998 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) in pnv_ioda_setup_same_PE() argument
1010 pdn->pe_number = pe->pe_number; in pnv_ioda_setup_same_PE()
1011 pe->dma_weight += pnv_ioda_dma_weight(dev); in pnv_ioda_setup_same_PE()
1012 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) in pnv_ioda_setup_same_PE()
1013 pnv_ioda_setup_same_PE(dev->subordinate, pe); in pnv_ioda_setup_same_PE()
1027 struct pnv_ioda_pe *pe; in pnv_ioda_setup_bus_PE() local
1044 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_bus_PE()
1045 pe->flags |= (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS); in pnv_ioda_setup_bus_PE()
1046 pe->pbus = bus; in pnv_ioda_setup_bus_PE()
1047 pe->pdev = NULL; in pnv_ioda_setup_bus_PE()
1048 pe->tce32_seg = -1; in pnv_ioda_setup_bus_PE()
1049 pe->mve_number = -1; in pnv_ioda_setup_bus_PE()
1050 pe->rid = bus->busn_res.start << 8; in pnv_ioda_setup_bus_PE()
1051 pe->dma_weight = 0; in pnv_ioda_setup_bus_PE()
1054 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n", in pnv_ioda_setup_bus_PE()
1057 pe_info(pe, "Secondary bus %d associated with PE#%d\n", in pnv_ioda_setup_bus_PE()
1060 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_bus_PE()
1064 pe->pbus = NULL; in pnv_ioda_setup_bus_PE()
1069 pnv_ioda_setup_same_PE(bus, pe); in pnv_ioda_setup_bus_PE()
1072 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_bus_PE()
1077 if (pe->dma_weight != 0) { in pnv_ioda_setup_bus_PE()
1078 phb->ioda.dma_weight += pe->dma_weight; in pnv_ioda_setup_bus_PE()
1083 pnv_ioda_link_pe_by_weight(phb, pe); in pnv_ioda_setup_bus_PE()
1263 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable);
1265 static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe) in pnv_pci_ioda2_release_dma_pe() argument
1270 tbl = pe->table_group.tables[0]; in pnv_pci_ioda2_release_dma_pe()
1271 rc = pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_pci_ioda2_release_dma_pe()
1273 pe_warn(pe, "OPAL error %ld release DMA window\n", rc); in pnv_pci_ioda2_release_dma_pe()
1275 pnv_pci_ioda2_set_bypass(pe, false); in pnv_pci_ioda2_release_dma_pe()
1276 if (pe->table_group.group) { in pnv_pci_ioda2_release_dma_pe()
1277 iommu_group_put(pe->table_group.group); in pnv_pci_ioda2_release_dma_pe()
1278 BUG_ON(pe->table_group.group); in pnv_pci_ioda2_release_dma_pe()
1289 struct pnv_ioda_pe *pe, *pe_n; in pnv_ioda_release_vf_PE() local
1331 list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) { in pnv_ioda_release_vf_PE()
1332 if (pe->parent_dev != pdev) in pnv_ioda_release_vf_PE()
1335 pnv_pci_ioda2_release_dma_pe(pdev, pe); in pnv_ioda_release_vf_PE()
1339 list_del(&pe->list); in pnv_ioda_release_vf_PE()
1342 pnv_ioda_deconfigure_pe(phb, pe); in pnv_ioda_release_vf_PE()
1344 pnv_ioda_free_pe(phb, pe->pe_number); in pnv_ioda_release_vf_PE()
1381 struct pnv_ioda_pe *pe);
1387 struct pnv_ioda_pe *pe; in pnv_ioda_setup_vf_PE() local
1405 pe = &phb->ioda.pe_array[pe_num]; in pnv_ioda_setup_vf_PE()
1406 pe->pe_number = pe_num; in pnv_ioda_setup_vf_PE()
1407 pe->phb = phb; in pnv_ioda_setup_vf_PE()
1408 pe->flags = PNV_IODA_PE_VF; in pnv_ioda_setup_vf_PE()
1409 pe->pbus = NULL; in pnv_ioda_setup_vf_PE()
1410 pe->parent_dev = pdev; in pnv_ioda_setup_vf_PE()
1411 pe->tce32_seg = -1; in pnv_ioda_setup_vf_PE()
1412 pe->mve_number = -1; in pnv_ioda_setup_vf_PE()
1413 pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) | in pnv_ioda_setup_vf_PE()
1416 pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n", in pnv_ioda_setup_vf_PE()
1421 if (pnv_ioda_configure_pe(phb, pe)) { in pnv_ioda_setup_vf_PE()
1425 pe->pdev = NULL; in pnv_ioda_setup_vf_PE()
1431 list_add_tail(&pe->list, &phb->ioda.pe_list); in pnv_ioda_setup_vf_PE()
1434 pnv_pci_ioda2_setup_dma_pe(phb, pe); in pnv_ioda_setup_vf_PE()
1551 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_dev_setup() local
1561 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_dma_dev_setup()
1563 set_dma_offset(&pdev->dev, pe->tce_bypass_base); in pnv_pci_ioda_dma_dev_setup()
1564 set_iommu_table_base(&pdev->dev, pe->table_group.tables[0]); in pnv_pci_ioda_dma_dev_setup()
1578 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_set_mask() local
1585 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_dma_set_mask()
1586 if (pe->tce_bypass_enabled) { in pnv_pci_ioda_dma_set_mask()
1587 top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1; in pnv_pci_ioda_dma_set_mask()
1607 struct pnv_ioda_pe *pe; in pnv_pci_ioda_dma_get_required_mask() local
1613 pe = &phb->ioda.pe_array[pdn->pe_number]; in pnv_pci_ioda_dma_get_required_mask()
1614 if (!pe->tce_bypass_enabled) in pnv_pci_ioda_dma_get_required_mask()
1618 end = pe->tce_bypass_base + memblock_end_of_DRAM(); in pnv_pci_ioda_dma_get_required_mask()
1625 static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, in pnv_ioda_setup_bus_dma() argument
1631 set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); in pnv_ioda_setup_bus_dma()
1632 set_dma_offset(&dev->dev, pe->tce_bypass_base); in pnv_ioda_setup_bus_dma()
1635 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) in pnv_ioda_setup_bus_dma()
1636 pnv_ioda_setup_bus_dma(pe, dev->subordinate); in pnv_ioda_setup_bus_dma()
1646 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_ioda1_tce_invalidate() local
1649 (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys : in pnv_pci_ioda1_tce_invalidate()
1650 pe->phb->ioda.tce_inval_reg; in pnv_pci_ioda1_tce_invalidate()
1738 static inline void pnv_pci_ioda2_tce_invalidate_entire(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_tce_invalidate_entire() argument
1741 unsigned long val = (0x4ull << 60) | (pe->pe_number & 0xFF); in pnv_pci_ioda2_tce_invalidate_entire()
1742 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_tce_invalidate_entire()
1783 struct pnv_ioda_pe *pe = container_of(tgl->table_group, in pnv_pci_ioda2_tce_invalidate() local
1786 (__be64 __iomem *)pe->phb->ioda.tce_inval_reg_phys : in pnv_pci_ioda2_tce_invalidate()
1787 pe->phb->ioda.tce_inval_reg; in pnv_pci_ioda2_tce_invalidate()
1789 pnv_pci_ioda2_do_tce_invalidate(pe->pe_number, rm, in pnv_pci_ioda2_tce_invalidate()
1849 struct pnv_ioda_pe *pe, unsigned int base, in pnv_pci_ioda_setup_dma_pe() argument
1864 if (WARN_ON(pe->tce32_seg >= 0)) in pnv_pci_ioda_setup_dma_pe()
1868 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda_setup_dma_pe()
1869 pe->pe_number); in pnv_pci_ioda_setup_dma_pe()
1870 pnv_pci_link_table_and_group(phb->hose->node, 0, tbl, &pe->table_group); in pnv_pci_ioda_setup_dma_pe()
1873 pe->tce32_seg = base; in pnv_pci_ioda_setup_dma_pe()
1874 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n", in pnv_pci_ioda_setup_dma_pe()
1885 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n"); in pnv_pci_ioda_setup_dma_pe()
1894 pe->pe_number, in pnv_pci_ioda_setup_dma_pe()
1899 pe_err(pe, " Failed to configure 32-bit TCE table," in pnv_pci_ioda_setup_dma_pe()
1916 pe->table_group.tce32_start = tbl->it_offset << tbl->it_page_shift; in pnv_pci_ioda_setup_dma_pe()
1917 pe->table_group.tce32_size = tbl->it_size << tbl->it_page_shift; in pnv_pci_ioda_setup_dma_pe()
1920 if (pe->flags & PNV_IODA_PE_DEV) { in pnv_pci_ioda_setup_dma_pe()
1926 set_iommu_table_base(&pe->pdev->dev, tbl); in pnv_pci_ioda_setup_dma_pe()
1927 iommu_add_device(&pe->pdev->dev); in pnv_pci_ioda_setup_dma_pe()
1928 } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pnv_pci_ioda_setup_dma_pe()
1929 pnv_ioda_setup_bus_dma(pe, pe->pbus); in pnv_pci_ioda_setup_dma_pe()
1934 if (pe->tce32_seg >= 0) in pnv_pci_ioda_setup_dma_pe()
1935 pe->tce32_seg = -1; in pnv_pci_ioda_setup_dma_pe()
1939 pnv_pci_unlink_table_and_group(tbl, &pe->table_group); in pnv_pci_ioda_setup_dma_pe()
1947 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_set_window() local
1949 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_set_window()
1956 pe_info(pe, "Setting up window#%d %llx..%llx pg=%x\n", num, in pnv_pci_ioda2_set_window()
1965 pe->pe_number, in pnv_pci_ioda2_set_window()
1966 (pe->pe_number << 1) + num, in pnv_pci_ioda2_set_window()
1972 pe_err(pe, "Failed to configure TCE table, err %ld\n", rc); in pnv_pci_ioda2_set_window()
1977 tbl, &pe->table_group); in pnv_pci_ioda2_set_window()
1978 pnv_pci_ioda2_tce_invalidate_entire(pe); in pnv_pci_ioda2_set_window()
1983 static void pnv_pci_ioda2_set_bypass(struct pnv_ioda_pe *pe, bool enable) in pnv_pci_ioda2_set_bypass() argument
1985 uint16_t window_id = (pe->pe_number << 1 ) + 1; in pnv_pci_ioda2_set_bypass()
1988 pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis"); in pnv_pci_ioda2_set_bypass()
1993 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
1994 pe->pe_number, in pnv_pci_ioda2_set_bypass()
1996 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
1999 rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id, in pnv_pci_ioda2_set_bypass()
2000 pe->pe_number, in pnv_pci_ioda2_set_bypass()
2002 pe->tce_bypass_base, in pnv_pci_ioda2_set_bypass()
2006 pe_err(pe, "OPAL error %lld configuring bypass window\n", rc); in pnv_pci_ioda2_set_bypass()
2008 pe->tce_bypass_enabled = enable; in pnv_pci_ioda2_set_bypass()
2019 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_create_table() local
2021 int nid = pe->phb->hose->node; in pnv_pci_ioda2_create_table()
2022 __u64 bus_offset = num ? pe->tce_bypass_base : table_group->tce32_start; in pnv_pci_ioda2_create_table()
2039 if (pe->phb->ioda.tce_inval_reg) in pnv_pci_ioda2_create_table()
2047 static long pnv_pci_ioda2_setup_default_config(struct pnv_ioda_pe *pe) in pnv_pci_ioda2_setup_default_config() argument
2064 const u64 window_size = min((u64)pe->table_group.tce32_size, max_memory); in pnv_pci_ioda2_setup_default_config()
2066 rc = pnv_pci_ioda2_create_table(&pe->table_group, 0, in pnv_pci_ioda2_setup_default_config()
2071 pe_err(pe, "Failed to create 32-bit TCE table, err %ld", in pnv_pci_ioda2_setup_default_config()
2076 iommu_init_table(tbl, pe->phb->hose->node); in pnv_pci_ioda2_setup_default_config()
2078 rc = pnv_pci_ioda2_set_window(&pe->table_group, 0, tbl); in pnv_pci_ioda2_setup_default_config()
2080 pe_err(pe, "Failed to configure 32-bit TCE table, err %ld\n", in pnv_pci_ioda2_setup_default_config()
2087 pnv_pci_ioda2_set_bypass(pe, true); in pnv_pci_ioda2_setup_default_config()
2090 if (pe->phb->ioda.tce_inval_reg) in pnv_pci_ioda2_setup_default_config()
2098 if (pe->flags & PNV_IODA_PE_DEV) in pnv_pci_ioda2_setup_default_config()
2099 set_iommu_table_base(&pe->pdev->dev, tbl); in pnv_pci_ioda2_setup_default_config()
2108 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_pci_ioda2_unset_window() local
2110 struct pnv_phb *phb = pe->phb; in pnv_pci_ioda2_unset_window()
2113 pe_info(pe, "Removing DMA window #%d\n", num); in pnv_pci_ioda2_unset_window()
2115 ret = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number, in pnv_pci_ioda2_unset_window()
2116 (pe->pe_number << 1) + num, in pnv_pci_ioda2_unset_window()
2120 pe_warn(pe, "Unmapping failed, ret = %ld\n", ret); in pnv_pci_ioda2_unset_window()
2122 pnv_pci_ioda2_tce_invalidate_entire(pe); in pnv_pci_ioda2_unset_window()
2165 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_ioda2_take_ownership() local
2168 struct iommu_table *tbl = pe->table_group.tables[0]; in pnv_ioda2_take_ownership()
2170 pnv_pci_ioda2_set_bypass(pe, false); in pnv_ioda2_take_ownership()
2171 pnv_pci_ioda2_unset_window(&pe->table_group, 0); in pnv_ioda2_take_ownership()
2177 struct pnv_ioda_pe *pe = container_of(table_group, struct pnv_ioda_pe, in pnv_ioda2_release_ownership() local
2180 pnv_pci_ioda2_setup_default_config(pe); in pnv_ioda2_release_ownership()
2342 struct pnv_ioda_pe *pe) in pnv_pci_ioda2_setup_dma_pe() argument
2347 if (WARN_ON(pe->tce32_seg >= 0)) in pnv_pci_ioda2_setup_dma_pe()
2351 pe->tce_bypass_base = 1ull << 59; in pnv_pci_ioda2_setup_dma_pe()
2353 iommu_register_group(&pe->table_group, phb->hose->global_number, in pnv_pci_ioda2_setup_dma_pe()
2354 pe->pe_number); in pnv_pci_ioda2_setup_dma_pe()
2357 pe->tce32_seg = 0; in pnv_pci_ioda2_setup_dma_pe()
2358 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n", in pnv_pci_ioda2_setup_dma_pe()
2362 pe->table_group.tce32_start = 0; in pnv_pci_ioda2_setup_dma_pe()
2363 pe->table_group.tce32_size = phb->ioda.m32_pci_base; in pnv_pci_ioda2_setup_dma_pe()
2364 pe->table_group.max_dynamic_windows_supported = in pnv_pci_ioda2_setup_dma_pe()
2366 pe->table_group.max_levels = POWERNV_IOMMU_MAX_LEVELS; in pnv_pci_ioda2_setup_dma_pe()
2367 pe->table_group.pgsizes = SZ_4K | SZ_64K | SZ_16M; in pnv_pci_ioda2_setup_dma_pe()
2369 pe->table_group.ops = &pnv_pci_ioda2_ops; in pnv_pci_ioda2_setup_dma_pe()
2372 rc = pnv_pci_ioda2_setup_default_config(pe); in pnv_pci_ioda2_setup_dma_pe()
2374 if (pe->tce32_seg >= 0) in pnv_pci_ioda2_setup_dma_pe()
2375 pe->tce32_seg = -1; in pnv_pci_ioda2_setup_dma_pe()
2379 if (pe->flags & PNV_IODA_PE_DEV) in pnv_pci_ioda2_setup_dma_pe()
2380 iommu_add_device(&pe->pdev->dev); in pnv_pci_ioda2_setup_dma_pe()
2381 else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) in pnv_pci_ioda2_setup_dma_pe()
2382 pnv_ioda_setup_bus_dma(pe, pe->pbus); in pnv_pci_ioda2_setup_dma_pe()
2389 struct pnv_ioda_pe *pe; in pnv_ioda_setup_dma() local
2416 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) { in pnv_ioda_setup_dma()
2417 if (!pe->dma_weight) in pnv_ioda_setup_dma()
2420 pe_warn(pe, "No DMA32 resources available\n"); in pnv_ioda_setup_dma()
2425 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw; in pnv_ioda_setup_dma()
2436 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n", in pnv_ioda_setup_dma()
2437 pe->dma_weight, segs); in pnv_ioda_setup_dma()
2438 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs); in pnv_ioda_setup_dma()
2440 pe_info(pe, "Assign DMA32 space\n"); in pnv_ioda_setup_dma()
2442 pnv_pci_ioda2_setup_dma_pe(phb, pe); in pnv_ioda_setup_dma()
2502 struct pnv_ioda_pe *pe; in pnv_phb_to_cxl_mode() local
2505 pe = pnv_ioda_get_pe(dev); in pnv_phb_to_cxl_mode()
2506 if (!pe) in pnv_phb_to_cxl_mode()
2509 pe_info(pe, "Switching PHB to CXL\n"); in pnv_phb_to_cxl_mode()
2511 rc = opal_pci_set_phb_cxl_mode(phb->opal_id, mode, pe->pe_number); in pnv_phb_to_cxl_mode()
2618 struct pnv_ioda_pe *pe; in pnv_cxl_ioda_msi_setup() local
2621 if (!(pe = pnv_ioda_get_pe(dev))) in pnv_cxl_ioda_msi_setup()
2625 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); in pnv_cxl_ioda_msi_setup()
2627 pe_warn(pe, "%s: OPAL error %d setting msi_base 0x%x " in pnv_cxl_ioda_msi_setup()
2643 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); in pnv_pci_ioda_msi_setup() local
2649 if (pe == NULL) in pnv_pci_ioda_msi_setup()
2653 if (pe->mve_number < 0) in pnv_pci_ioda_msi_setup()
2661 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); in pnv_pci_ioda_msi_setup()
2671 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1, in pnv_pci_ioda_msi_setup()
2683 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1, in pnv_pci_ioda_msi_setup()
2700 msg->address_hi, msg->address_lo, data, pe->pe_number); in pnv_pci_ioda_msi_setup()
2807 struct pnv_ioda_pe *pe) in pnv_ioda_setup_pe_seg() argument
2820 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))); in pnv_ioda_setup_pe_seg()
2822 pci_bus_for_each_resource(pe->pbus, res, i) { in pnv_ioda_setup_pe_seg()
2834 phb->ioda.io_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_seg()
2836 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_seg()
2840 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_seg()
2859 phb->ioda.m32_segmap[index] = pe->pe_number; in pnv_ioda_setup_pe_seg()
2861 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index); in pnv_ioda_setup_pe_seg()
2865 __func__, rc, index, pe->pe_number); in pnv_ioda_setup_pe_seg()
2880 struct pnv_ioda_pe *pe; in pnv_pci_ioda_setup_seg() local
2884 list_for_each_entry(pe, &phb->ioda.pe_list, list) { in pnv_pci_ioda_setup_seg()
2885 pnv_ioda_setup_pe_seg(hose, pe); in pnv_pci_ioda_setup_seg()