Lines Matching refs:ioc
202 struct ioc { struct
235 struct ioc *next; /* list of IOC's in system */ argument
245 static struct ioc *ioc_list, *ioc_found; argument
248 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
249 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
317 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) in sba_dump_pdir_entry() argument
320 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; in sba_dump_pdir_entry()
321 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; in sba_dump_pdir_entry()
348 sba_check_pdir(struct ioc *ioc, char *msg) in sba_check_pdir() argument
350 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); in sba_check_pdir()
351 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ in sba_check_pdir()
352 u64 *pptr = ioc->pdir_base; /* pdir ptr */ in sba_check_pdir()
371 sba_dump_pdir_entry(ioc, msg, pide); in sba_check_pdir()
395 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) in sba_dump_sg() argument
406 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) in sba_check_sg() argument
435 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset)) argument
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) argument
465 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, in ptr_to_pide() argument
468 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) in ptr_to_pide()
483 sba_search_bitmap(struct ioc *ioc, struct device *dev, in sba_search_bitmap() argument
487 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); in sba_search_bitmap()
493 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); in sba_search_bitmap()
499 BUG_ON(ioc->ibase & ~iovp_mask); in sba_search_bitmap()
500 shift = ioc->ibase >> iovp_shift; in sba_search_bitmap()
502 spin_lock_irqsave(&ioc->res_lock, flags); in sba_search_bitmap()
506 res_ptr = ioc->res_hint; in sba_search_bitmap()
508 res_ptr = (ulong *)ioc->res_map; in sba_search_bitmap()
509 ioc->res_bitshift = 0; in sba_search_bitmap()
526 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); in sba_search_bitmap()
527 ioc->res_bitshift = bitshiftcnt + bits_wanted; in sba_search_bitmap()
543 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); in sba_search_bitmap()
555 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); in sba_search_bitmap()
562 ioc->res_bitshift = bitshiftcnt + bits_wanted; in sba_search_bitmap()
582 tpide = ptr_to_pide(ioc, res_ptr, 0); in sba_search_bitmap()
601 ioc->res_bitshift = bits; in sba_search_bitmap()
609 prefetch(ioc->res_map); in sba_search_bitmap()
610 ioc->res_hint = (unsigned long *) ioc->res_map; in sba_search_bitmap()
611 ioc->res_bitshift = 0; in sba_search_bitmap()
612 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_search_bitmap()
616 ioc->res_hint = res_ptr; in sba_search_bitmap()
617 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_search_bitmap()
631 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) in sba_alloc_range() argument
648 pide = sba_search_bitmap(ioc, dev, pages_needed, 1); in sba_alloc_range()
649 if (unlikely(pide >= (ioc->res_size << 3))) { in sba_alloc_range()
650 pide = sba_search_bitmap(ioc, dev, pages_needed, 0); in sba_alloc_range()
651 if (unlikely(pide >= (ioc->res_size << 3))) { in sba_alloc_range()
660 spin_lock_irqsave(&ioc->saved_lock, flags); in sba_alloc_range()
661 if (ioc->saved_cnt > 0) { in sba_alloc_range()
663 int cnt = ioc->saved_cnt; in sba_alloc_range()
665 d = &(ioc->saved[ioc->saved_cnt - 1]); in sba_alloc_range()
667 spin_lock(&ioc->res_lock); in sba_alloc_range()
669 sba_mark_invalid(ioc, d->iova, d->size); in sba_alloc_range()
670 sba_free_range(ioc, d->iova, d->size); in sba_alloc_range()
673 ioc->saved_cnt = 0; in sba_alloc_range()
674 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_alloc_range()
675 spin_unlock(&ioc->res_lock); in sba_alloc_range()
677 spin_unlock_irqrestore(&ioc->saved_lock, flags); in sba_alloc_range()
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0); in sba_alloc_range()
680 if (unlikely(pide >= (ioc->res_size << 3))) { in sba_alloc_range()
683 __func__, ioc->ioc_hpa, ioc->res_size, in sba_alloc_range()
690 __func__, ioc->ioc_hpa, ioc->res_size, in sba_alloc_range()
698 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed; in sba_alloc_range()
699 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; in sba_alloc_range()
702 prefetchw(&(ioc->pdir_base[pide])); in sba_alloc_range()
706 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) { in sba_alloc_range()
707 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); in sba_alloc_range()
713 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), in sba_alloc_range()
714 ioc->res_bitshift ); in sba_alloc_range()
729 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
731 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
734 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); in sba_free_range()
847 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in sba_mark_invalid() argument
849 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_invalid()
859 if (!(ioc->pdir_base[off] >> 60)) { in sba_mark_invalid()
860 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); in sba_mark_invalid()
866 ASSERT(off < ioc->pdir_size); in sba_mark_invalid()
877 ioc->pdir_base[off] &= ~(0x80000000000000FFULL); in sba_mark_invalid()
884 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); in sba_mark_invalid()
894 ASSERT(ioc->pdir_base[off] >> 63); in sba_mark_invalid()
897 ioc->pdir_base[off] &= ~(0x80000000000000FFULL); in sba_mark_invalid()
899 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); in sba_mark_invalid()
906 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); in sba_mark_invalid()
924 struct ioc *ioc; in sba_map_page() local
953 ioc = GET_IOC(dev); in sba_map_page()
954 ASSERT(ioc); in sba_map_page()
956 prefetch(ioc->res_hint); in sba_map_page()
968 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_page()
969 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) in sba_map_page()
971 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_page()
974 pide = sba_alloc_range(ioc, dev, size); in sba_map_page()
982 pdir_start = &(ioc->pdir_base[pide]); in sba_map_page()
999 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_page()
1000 sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); in sba_map_page()
1001 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_page()
1003 return SBA_IOVA(ioc, iovp, offset); in sba_map_page()
1016 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_mark_clean() argument
1018 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_clean()
1023 addr = phys_to_virt(ioc->pdir_base[off] & in sba_mark_clean()
1028 addr = phys_to_virt(ioc->pdir_base[off] & in sba_mark_clean()
1051 struct ioc *ioc; in sba_unmap_page() local
1058 ioc = GET_IOC(dev); in sba_unmap_page()
1059 ASSERT(ioc); in sba_unmap_page()
1062 if (likely((iova & ioc->imask) != ioc->ibase)) { in sba_unmap_page()
1087 sba_mark_clean(ioc, iova, size); in sba_unmap_page()
1091 spin_lock_irqsave(&ioc->saved_lock, flags); in sba_unmap_page()
1092 d = &(ioc->saved[ioc->saved_cnt]); in sba_unmap_page()
1095 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) { in sba_unmap_page()
1096 int cnt = ioc->saved_cnt; in sba_unmap_page()
1097 spin_lock(&ioc->res_lock); in sba_unmap_page()
1099 sba_mark_invalid(ioc, d->iova, d->size); in sba_unmap_page()
1100 sba_free_range(ioc, d->iova, d->size); in sba_unmap_page()
1103 ioc->saved_cnt = 0; in sba_unmap_page()
1104 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_unmap_page()
1105 spin_unlock(&ioc->res_lock); in sba_unmap_page()
1107 spin_unlock_irqrestore(&ioc->saved_lock, flags); in sba_unmap_page()
1109 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_page()
1110 sba_mark_invalid(ioc, iova, size); in sba_unmap_page()
1111 sba_free_range(ioc, iova, size); in sba_unmap_page()
1112 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_unmap_page()
1113 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_page()
1135 struct ioc *ioc; in sba_alloc_coherent() local
1138 ioc = GET_IOC(dev); in sba_alloc_coherent()
1139 ASSERT(ioc); in sba_alloc_coherent()
1145 page = alloc_pages_node(ioc->node, flags, get_order(size)); in sba_alloc_coherent()
1177 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, in sba_alloc_coherent()
1225 struct ioc *ioc, in sba_fill_pdir() argument
1257 dma_sg->dma_address = pide | ioc->ibase; in sba_fill_pdir()
1258 pdirp = &(ioc->pdir_base[pide >> iovp_shift]); in sba_fill_pdir()
1320 sba_coalesce_chunks(struct ioc *ioc, struct device *dev, in sba_coalesce_chunks() argument
1430 idx = sba_alloc_range(ioc, dev, dma_len); in sba_coalesce_chunks()
1460 struct ioc *ioc; in sba_map_sg_attrs() local
1470 ioc = GET_IOC(dev); in sba_map_sg_attrs()
1471 ASSERT(ioc); in sba_map_sg_attrs()
1475 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { in sba_map_sg_attrs()
1491 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_sg_attrs()
1492 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()")) in sba_map_sg_attrs()
1494 sba_dump_sg(ioc, sglist, nents); in sba_map_sg_attrs()
1497 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_sg_attrs()
1500 prefetch(ioc->res_hint); in sba_map_sg_attrs()
1510 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); in sba_map_sg_attrs()
1524 filled = sba_fill_pdir(ioc, sglist, nents); in sba_map_sg_attrs()
1527 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_sg_attrs()
1528 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()")) in sba_map_sg_attrs()
1530 sba_dump_sg(ioc, sglist, nents); in sba_map_sg_attrs()
1533 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_sg_attrs()
1557 struct ioc *ioc; in sba_unmap_sg_attrs() local
1565 ioc = GET_IOC(dev); in sba_unmap_sg_attrs()
1566 ASSERT(ioc); in sba_unmap_sg_attrs()
1568 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_sg_attrs()
1569 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()"); in sba_unmap_sg_attrs()
1570 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_sg_attrs()
1584 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_sg_attrs()
1585 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()"); in sba_unmap_sg_attrs()
1586 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_sg_attrs()
1598 ioc_iova_init(struct ioc *ioc) in ioc_iova_init() argument
1612 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; in ioc_iova_init()
1613 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL; in ioc_iova_init()
1615 ioc->iov_size = ~ioc->imask + 1; in ioc_iova_init()
1618 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask, in ioc_iova_init()
1619 ioc->iov_size >> 20); in ioc_iova_init()
1631 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); in ioc_iova_init()
1633 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE; in ioc_iova_init()
1634 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, in ioc_iova_init()
1635 get_order(ioc->pdir_size)); in ioc_iova_init()
1636 if (!ioc->pdir_base) in ioc_iova_init()
1639 memset(ioc->pdir_base, 0, ioc->pdir_size); in ioc_iova_init()
1642 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); in ioc_iova_init()
1644 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); in ioc_iova_init()
1645 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); in ioc_iova_init()
1660 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2); in ioc_iova_init()
1661 ioc->pdir_size /= 2; in ioc_iova_init()
1662 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; in ioc_iova_init()
1689 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++) in ioc_iova_init()
1690 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); in ioc_iova_init()
1694 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); in ioc_iova_init()
1695 READ_REG(ioc->ioc_hpa + IOC_PCOM); in ioc_iova_init()
1698 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); in ioc_iova_init()
1699 READ_REG(ioc->ioc_hpa + IOC_IBASE); in ioc_iova_init()
1703 ioc_resource_init(struct ioc *ioc) in ioc_resource_init() argument
1705 spin_lock_init(&ioc->res_lock); in ioc_resource_init()
1707 spin_lock_init(&ioc->saved_lock); in ioc_resource_init()
1711 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ in ioc_resource_init()
1712 ioc->res_size >>= 3; /* convert bit count to byte count */ in ioc_resource_init()
1713 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size); in ioc_resource_init()
1715 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, in ioc_resource_init()
1716 get_order(ioc->res_size)); in ioc_resource_init()
1717 if (!ioc->res_map) in ioc_resource_init()
1720 memset(ioc->res_map, 0, ioc->res_size); in ioc_resource_init()
1722 ioc->res_hint = (unsigned long *) ioc->res_map; in ioc_resource_init()
1726 ioc->res_map[0] = 0x1; in ioc_resource_init()
1727 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; in ioc_resource_init()
1731 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ in ioc_resource_init()
1732 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF in ioc_resource_init()
1737 ioc->res_size, (void *) ioc->res_map); in ioc_resource_init()
1741 ioc_sac_init(struct ioc *ioc) in ioc_sac_init() argument
1759 controller->iommu = ioc; in ioc_sac_init()
1765 ioc->sac_only_dev = sac; in ioc_sac_init()
1769 ioc_zx1_init(struct ioc *ioc) in ioc_zx1_init() argument
1774 if (ioc->rev < 0x20) in ioc_zx1_init()
1778 ioc->dma_mask = (0x1UL << 39) - 1; in ioc_zx1_init()
1787 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i); in ioc_zx1_init()
1789 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i); in ioc_zx1_init()
1793 typedef void (initfunc)(struct ioc *);
1808 static void ioc_init(unsigned long hpa, struct ioc *ioc) in ioc_init() argument
1812 ioc->next = ioc_list; in ioc_init()
1813 ioc_list = ioc; in ioc_init()
1815 ioc->ioc_hpa = ioremap(hpa, 0x1000); in ioc_init()
1817 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); in ioc_init()
1818 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; in ioc_init()
1819 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ in ioc_init()
1822 if (ioc->func_id == info->func_id) { in ioc_init()
1823 ioc->name = info->name; in ioc_init()
1825 (info->init)(ioc); in ioc_init()
1835 if (!ioc->name) { in ioc_init()
1836 ioc->name = kmalloc(24, GFP_KERNEL); in ioc_init()
1837 if (ioc->name) in ioc_init()
1838 sprintf((char *) ioc->name, "Unknown (%04x:%04x)", in ioc_init()
1839 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); in ioc_init()
1841 ioc->name = "Unknown"; in ioc_init()
1844 ioc_iova_init(ioc); in ioc_init()
1845 ioc_resource_init(ioc); in ioc_init()
1846 ioc_sac_init(ioc); in ioc_init()
1853 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, in ioc_init()
1854 hpa, ioc->iov_size >> 20, ioc->ibase); in ioc_init()
1872 struct ioc *ioc; in ioc_start() local
1875 for (ioc = ioc_list; ioc; ioc = ioc->next) in ioc_start()
1877 return ioc; in ioc_start()
1885 struct ioc *ioc = v; in ioc_next() local
1888 return ioc->next; in ioc_next()
1899 struct ioc *ioc = v; in ioc_show() local
1900 unsigned long *res_ptr = (unsigned long *)ioc->res_map; in ioc_show()
1904 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); in ioc_show()
1906 if (ioc->node != NUMA_NO_NODE) in ioc_show()
1907 seq_printf(s, "NUMA node : %d\n", ioc->node); in ioc_show()
1909 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024)); in ioc_show()
1912 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) in ioc_show()
1915 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3); in ioc_show()
1921 min = max = ioc->avg_search[0]; in ioc_show()
1923 avg += ioc->avg_search[i]; in ioc_show()
1924 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; in ioc_show()
1925 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; in ioc_show()
1976 struct ioc *ioc; in sba_connect_bus() local
1994 for (ioc = ioc_list; ioc; ioc = ioc->next) in sba_connect_bus()
1995 if (ioc->handle == handle) { in sba_connect_bus()
1996 PCI_CONTROLLER(bus)->iommu = ioc; in sba_connect_bus()
2008 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) in sba_map_ioc_to_node() argument
2017 ioc->node = node; in sba_map_ioc_to_node()
2021 static void acpi_sba_ioc_add(struct ioc *ioc) in acpi_sba_ioc_add() argument
2023 acpi_handle handle = ioc->handle; in acpi_sba_ioc_add()
2028 ioc_found = ioc->next; in acpi_sba_ioc_add()
2056 ioc_init(hpa, ioc); in acpi_sba_ioc_add()
2058 sba_map_ioc_to_node(ioc, handle); in acpi_sba_ioc_add()
2062 kfree(ioc); in acpi_sba_ioc_add()
2074 struct ioc *ioc; in acpi_sba_ioc_attach() local
2076 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); in acpi_sba_ioc_attach()
2077 if (!ioc) in acpi_sba_ioc_attach()
2080 ioc->next = ioc_found; in acpi_sba_ioc_attach()
2081 ioc_found = ioc; in acpi_sba_ioc_attach()
2082 ioc->handle = device->handle; in acpi_sba_ioc_attach()