Lines Matching refs:ioc
198 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) in sba_dump_pdir_entry() argument
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]); in sba_dump_pdir_entry()
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]); in sba_dump_pdir_entry()
230 sba_check_pdir(struct ioc *ioc, char *msg) in sba_check_pdir() argument
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]); in sba_check_pdir()
233 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */ in sba_check_pdir()
234 u64 *pptr = ioc->pdir_base; /* pdir ptr */ in sba_check_pdir()
250 sba_dump_pdir_entry(ioc, msg, pide); in sba_check_pdir()
274 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) in sba_dump_sg() argument
306 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset)) argument
307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) argument
310 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset)) argument
311 #define SBA_IOVP(ioc,iova) (iova) argument
319 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr, in ptr_to_pide() argument
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3) in ptr_to_pide()
336 sba_search_bitmap(struct ioc *ioc, struct device *dev, in sba_search_bitmap() argument
339 unsigned long *res_ptr = ioc->res_hint; in sba_search_bitmap()
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); in sba_search_bitmap()
350 BUG_ON(ioc->ibase & ~IOVP_MASK); in sba_search_bitmap()
351 shift = ioc->ibase >> IOVP_SHIFT; in sba_search_bitmap()
359 tpide = ptr_to_pide(ioc, res_ptr, 0); in sba_search_bitmap()
371 ioc->res_bitshift = 0; in sba_search_bitmap()
380 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o); in sba_search_bitmap()
394 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt); in sba_search_bitmap()
412 ioc->res_bitshift = bitshiftcnt + bits_wanted; in sba_search_bitmap()
417 ioc->res_hint = (unsigned long *) ioc->res_map; in sba_search_bitmap()
418 ioc->res_bitshift = 0; in sba_search_bitmap()
420 ioc->res_hint = res_ptr; in sba_search_bitmap()
435 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) in sba_alloc_range() argument
443 pide = sba_search_bitmap(ioc, dev, pages_needed); in sba_alloc_range()
444 if (pide >= (ioc->res_size << 3)) { in sba_alloc_range()
445 pide = sba_search_bitmap(ioc, dev, pages_needed); in sba_alloc_range()
446 if (pide >= (ioc->res_size << 3)) in sba_alloc_range()
448 __FILE__, ioc->ioc_hpa); in sba_alloc_range()
453 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) { in sba_alloc_range()
454 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); in sba_alloc_range()
460 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), in sba_alloc_range()
461 ioc->res_bitshift ); in sba_alloc_range()
470 ioc->avg_search[ioc->avg_idx++] = cr_start; in sba_alloc_range()
471 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; in sba_alloc_range()
473 ioc->used_pages += pages_needed; in sba_alloc_range()
489 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
491 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
494 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); in sba_free_range()
506 ioc->used_pages -= bits_not_wanted; in sba_free_range()
520 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir) argument
610 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in sba_mark_invalid() argument
612 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_invalid()
613 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)]; in sba_mark_invalid()
623 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); in sba_mark_invalid()
665 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM); in sba_mark_invalid()
677 struct ioc *ioc; in sba_dma_supported() local
693 ioc = GET_IOC(dev); in sba_dma_supported()
699 return((int)(mask >= (ioc->ibase - 1 + in sba_dma_supported()
700 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) ))); in sba_dma_supported()
717 struct ioc *ioc; in sba_map_single() local
724 ioc = GET_IOC(dev); in sba_map_single()
732 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_single()
734 sba_check_pdir(ioc,"Check before sba_map_single()"); in sba_map_single()
738 ioc->msingle_calls++; in sba_map_single()
739 ioc->msingle_pages += size >> IOVP_SHIFT; in sba_map_single()
741 pide = sba_alloc_range(ioc, dev, size); in sba_map_single()
747 pdir_start = &(ioc->pdir_base[pide]); in sba_map_single()
774 sba_check_pdir(ioc,"Check after sba_map_single()"); in sba_map_single()
776 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_single()
779 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG); in sba_map_single()
796 struct ioc *ioc; in sba_unmap_single() local
805 ioc = GET_IOC(dev); in sba_unmap_single()
811 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_single()
814 ioc->usingle_calls++; in sba_unmap_single()
815 ioc->usingle_pages += size >> IOVP_SHIFT; in sba_unmap_single()
818 sba_mark_invalid(ioc, iova, size); in sba_unmap_single()
824 d = &(ioc->saved[ioc->saved_cnt]); in sba_unmap_single()
827 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) { in sba_unmap_single()
828 int cnt = ioc->saved_cnt; in sba_unmap_single()
830 sba_free_range(ioc, d->iova, d->size); in sba_unmap_single()
833 ioc->saved_cnt = 0; in sba_unmap_single()
835 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_unmap_single()
838 sba_free_range(ioc, iova, size); in sba_unmap_single()
844 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ in sba_unmap_single()
847 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_single()
938 struct ioc *ioc; in sba_map_sg() local
944 ioc = GET_IOC(dev); in sba_map_sg()
954 spin_lock_irqsave(&ioc->res_lock, flags); in sba_map_sg()
957 if (sba_check_pdir(ioc,"Check before sba_map_sg()")) in sba_map_sg()
959 sba_dump_sg(ioc, sglist, nents); in sba_map_sg()
965 ioc->msg_calls++; in sba_map_sg()
976 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range); in sba_map_sg()
986 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry); in sba_map_sg()
993 if (sba_check_pdir(ioc,"Check after sba_map_sg()")) in sba_map_sg()
995 sba_dump_sg(ioc, sglist, nents); in sba_map_sg()
1000 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_map_sg()
1021 struct ioc *ioc; in sba_unmap_sg() local
1029 ioc = GET_IOC(dev); in sba_unmap_sg()
1032 ioc->usg_calls++; in sba_unmap_sg()
1036 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_sg()
1037 sba_check_pdir(ioc,"Check before sba_unmap_sg()"); in sba_unmap_sg()
1038 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_sg()
1045 …ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> … in sba_unmap_sg()
1046 ioc->usingle_calls--; /* kluge since call is unmap_sg() */ in sba_unmap_sg()
1054 spin_lock_irqsave(&ioc->res_lock, flags); in sba_unmap_sg()
1055 sba_check_pdir(ioc,"Check after sba_unmap_sg()"); in sba_unmap_sg()
1056 spin_unlock_irqrestore(&ioc->res_lock, flags); in sba_unmap_sg()
1100 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp); in sba_get_pat_resources()
1211 struct ioc *ioc; member
1223 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask); in setup_ibase_imask_callback()
1229 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num) in setup_ibase_imask() argument
1232 .ioc = ioc, in setup_ibase_imask()
1254 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) in sba_ioc_init_pluto() argument
1267 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); in sba_ioc_init_pluto()
1268 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; in sba_ioc_init_pluto()
1270 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { in sba_ioc_init_pluto()
1280 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); in sba_ioc_init_pluto()
1283 __func__, ioc->ioc_hpa, iova_space_size >> 20, in sba_ioc_init_pluto()
1286 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, in sba_ioc_init_pluto()
1287 get_order(ioc->pdir_size)); in sba_ioc_init_pluto()
1288 if (!ioc->pdir_base) in sba_ioc_init_pluto()
1291 memset(ioc->pdir_base, 0, ioc->pdir_size); in sba_ioc_init_pluto()
1294 __func__, ioc->pdir_base, ioc->pdir_size); in sba_ioc_init_pluto()
1297 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; in sba_ioc_init_pluto()
1298 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); in sba_ioc_init_pluto()
1301 ioc->hint_shift_pdir, ioc->hint_mask_pdir); in sba_ioc_init_pluto()
1304 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base); in sba_ioc_init_pluto()
1305 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); in sba_ioc_init_pluto()
1310 ioc->imask = iova_space_mask; in sba_ioc_init_pluto()
1312 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); in sba_ioc_init_pluto()
1314 sba_dump_tlb(ioc->ioc_hpa); in sba_ioc_init_pluto()
1316 setup_ibase_imask(sba, ioc, ioc_num); in sba_ioc_init_pluto()
1318 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK); in sba_ioc_init_pluto()
1325 ioc->imask |= 0xFFFFFFFF00000000UL; in sba_ioc_init_pluto()
1339 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); in sba_ioc_init_pluto()
1345 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); in sba_ioc_init_pluto()
1351 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM); in sba_ioc_init_pluto()
1368 ioc->pdir_size /= 2; in sba_ioc_init_pluto()
1369 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE; in sba_ioc_init_pluto()
1375 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num) in sba_ioc_init() argument
1414 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64); in sba_ioc_init()
1418 ioc->ioc_hpa, in sba_ioc_init()
1423 ioc->pdir_base = sba_alloc_pdir(pdir_size); in sba_ioc_init()
1426 __func__, ioc->pdir_base, pdir_size); in sba_ioc_init()
1430 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT; in sba_ioc_init()
1431 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT)); in sba_ioc_init()
1434 ioc->hint_shift_pdir, ioc->hint_mask_pdir); in sba_ioc_init()
1437 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); in sba_ioc_init()
1447 ioc->ibase = 0; in sba_ioc_init()
1448 ioc->imask = iova_space_mask; /* save it */ in sba_ioc_init()
1450 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1); in sba_ioc_init()
1454 __func__, ioc->ibase, ioc->imask); in sba_ioc_init()
1462 setup_ibase_imask(sba, ioc, ioc_num); in sba_ioc_init()
1467 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE); in sba_ioc_init()
1468 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK); in sba_ioc_init()
1482 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG); in sba_ioc_init()
1488 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM); in sba_ioc_init()
1490 ioc->ibase = 0; /* used by SBA_IOVA and related macros */ in sba_ioc_init()
1582 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET); in sba_hw_init()
1594 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET); in sba_hw_init()
1610 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0)); in sba_hw_init()
1611 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1)); in sba_hw_init()
1620 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa; in sba_hw_init()
1648 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL); in sba_hw_init()
1652 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40), in sba_hw_init()
1653 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50) in sba_hw_init()
1656 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108), in sba_hw_init()
1657 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400) in sba_hw_init()
1661 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i); in sba_hw_init()
1663 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i); in sba_hw_init()
1688 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */ in sba_common_init()
1699 sba_dev->ioc[i].res_size = res_size; in sba_common_init()
1700 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size)); in sba_common_init()
1703 iterate_pages( sba_dev->ioc[i].res_map, res_size, in sba_common_init()
1707 if (NULL == sba_dev->ioc[i].res_map) in sba_common_init()
1713 memset(sba_dev->ioc[i].res_map, 0, res_size); in sba_common_init()
1715 sba_dev->ioc[i].res_hint = (unsigned long *) in sba_common_init()
1716 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]); in sba_common_init()
1720 sba_dev->ioc[i].res_map[0] = 0x80; in sba_common_init()
1721 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL; in sba_common_init()
1730 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]); in sba_common_init()
1731 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]); in sba_common_init()
1740 iterate_pages( sba_dev->ioc[i].res_map, res_size, in sba_common_init()
1742 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size, in sba_common_init()
1747 __func__, i, res_size, sba_dev->ioc[i].res_map); in sba_common_init()
1771 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ in sba_proc_info() local
1772 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */ in sba_proc_info()
1783 (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */ in sba_proc_info()
1787 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */ in sba_proc_info()
1803 total_pages - ioc->used_pages, ioc->used_pages, in sba_proc_info()
1804 (int)(ioc->used_pages * 100 / total_pages)); in sba_proc_info()
1806 min = max = ioc->avg_search[0]; in sba_proc_info()
1808 avg += ioc->avg_search[i]; in sba_proc_info()
1809 if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; in sba_proc_info()
1810 if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; in sba_proc_info()
1817 ioc->msingle_calls, ioc->msingle_pages, in sba_proc_info()
1818 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); in sba_proc_info()
1821 min = ioc->usingle_calls; in sba_proc_info()
1822 max = ioc->usingle_pages - ioc->usg_pages; in sba_proc_info()
1827 ioc->msg_calls, ioc->msg_pages, in sba_proc_info()
1828 (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); in sba_proc_info()
1831 ioc->usg_calls, ioc->usg_pages, in sba_proc_info()
1832 (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); in sba_proc_info()
1856 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */ in sba_proc_bitmap_info() local
1858 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map, in sba_proc_bitmap_info()
1859 ioc->res_size, false); in sba_proc_bitmap_info()
1964 spin_lock_init(&(sba_dev->ioc[i].res_lock)); in sba_driver_callback()
2024 return &(sba->ioc[iocnum]); in sba_get_iommu()