Lines Matching refs:tbl
177 struct iommu_table *tbl, in iommu_range_alloc() argument
211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
214 pool = &(tbl->large_pool); in iommu_range_alloc()
216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
236 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
237 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
244 pool = &(tbl->pools[0]); in iommu_range_alloc()
254 1 << tbl->it_page_shift); in iommu_range_alloc()
256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); in iommu_range_alloc()
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
260 boundary_size >> tbl->it_page_shift, align_mask); in iommu_range_alloc()
268 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
272 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
293 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
294 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
306 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
321 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
322 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
325 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
327 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
335 __iommu_free(tbl, ret, npages); in iommu_alloc()
340 if (tbl->it_ops->flush) in iommu_alloc()
341 tbl->it_ops->flush(tbl); in iommu_alloc()
349 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
354 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
355 free_entry = entry - tbl->it_offset; in iommu_free_check()
357 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
358 (entry < tbl->it_offset)) { in iommu_free_check()
363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
377 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
381 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
385 p = &tbl->large_pool; in get_pool()
387 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
389 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
390 p = &tbl->pools[pool_nr]; in get_pool()
396 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
403 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
404 free_entry = entry - tbl->it_offset; in __iommu_free()
406 pool = get_pool(tbl, free_entry); in __iommu_free()
408 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
411 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
414 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
418 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
421 __iommu_free(tbl, dma_addr, npages); in iommu_free()
427 if (tbl->it_ops->flush) in iommu_free()
428 tbl->it_ops->flush(tbl); in iommu_free()
431 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
445 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
474 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
475 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
476 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
484 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
490 entry += tbl->it_offset; in ppc_iommu_map_sg()
491 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
492 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
498 build_fail = tbl->it_ops->set(tbl, entry, npages, in ppc_iommu_map_sg()
499 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
537 if (tbl->it_ops->flush) in ppc_iommu_map_sg()
538 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
561 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
563 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
564 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
575 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
583 if (!tbl) in ppc_iommu_unmap_sg()
594 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
595 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
603 if (tbl->it_ops->flush) in ppc_iommu_unmap_sg()
604 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
607 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
616 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
621 if (tbl->it_ops->get) { in iommu_table_clear()
625 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
626 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
631 __set_bit(index, tbl->it_map); in iommu_table_clear()
636 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
640 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
641 index < tbl->it_size; index++) in iommu_table_clear()
642 __clear_bit(index, tbl->it_map); in iommu_table_clear()
652 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) in iommu_init_table() argument
660 BUG_ON(!tbl->it_ops); in iommu_init_table()
663 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
668 tbl->it_map = page_address(page); in iommu_init_table()
669 memset(tbl->it_map, 0, sz); in iommu_init_table()
676 if (tbl->it_offset == 0) in iommu_init_table()
677 set_bit(0, tbl->it_map); in iommu_init_table()
680 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
681 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
683 tbl->nr_pools = 1; in iommu_init_table()
686 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
688 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
689 p = &tbl->pools[i]; in iommu_init_table()
691 p->start = tbl->poolsize * i; in iommu_init_table()
693 p->end = p->start + tbl->poolsize; in iommu_init_table()
696 p = &tbl->large_pool; in iommu_init_table()
698 p->start = tbl->poolsize * i; in iommu_init_table()
700 p->end = tbl->it_size; in iommu_init_table()
702 iommu_table_clear(tbl); in iommu_init_table()
710 return tbl; in iommu_init_table()
713 void iommu_free_table(struct iommu_table *tbl, const char *node_name) in iommu_free_table() argument
718 if (!tbl) in iommu_free_table()
721 if (!tbl->it_map) { in iommu_free_table()
722 kfree(tbl); in iommu_free_table()
730 if (tbl->it_offset == 0) in iommu_free_table()
731 clear_bit(0, tbl->it_map); in iommu_free_table()
734 if (!bitmap_empty(tbl->it_map, tbl->it_size)) in iommu_free_table()
738 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_free_table()
742 free_pages((unsigned long) tbl->it_map, order); in iommu_free_table()
745 kfree(tbl); in iommu_free_table()
753 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
767 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
769 if (tbl) { in iommu_map_page()
771 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
773 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
775 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
776 mask >> tbl->it_page_shift, align, in iommu_map_page()
781 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
785 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
791 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
799 if (tbl) { in iommu_unmap_page()
801 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
802 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
810 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
834 if (!tbl) in iommu_alloc_coherent()
845 nio_pages = size >> tbl->it_page_shift; in iommu_alloc_coherent()
846 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
847 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
848 mask >> tbl->it_page_shift, io_order, NULL); in iommu_alloc_coherent()
857 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
860 if (tbl) { in iommu_free_coherent()
864 nio_pages = size >> tbl->it_page_shift; in iommu_free_coherent()
865 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
932 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
935 if (tbl->it_ops->flush) in iommu_flush_tce()
936 tbl->it_ops->flush(tbl); in iommu_flush_tce()
943 int iommu_tce_clear_param_check(struct iommu_table *tbl, in iommu_tce_clear_param_check() argument
951 if (ioba & ~IOMMU_PAGE_MASK(tbl)) in iommu_tce_clear_param_check()
954 ioba >>= tbl->it_page_shift; in iommu_tce_clear_param_check()
955 if (ioba < tbl->it_offset) in iommu_tce_clear_param_check()
958 if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) in iommu_tce_clear_param_check()
965 int iommu_tce_put_param_check(struct iommu_table *tbl, in iommu_tce_put_param_check() argument
968 if (tce & ~IOMMU_PAGE_MASK(tbl)) in iommu_tce_put_param_check()
971 if (ioba & ~IOMMU_PAGE_MASK(tbl)) in iommu_tce_put_param_check()
974 ioba >>= tbl->it_page_shift; in iommu_tce_put_param_check()
975 if (ioba < tbl->it_offset) in iommu_tce_put_param_check()
978 if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) in iommu_tce_put_param_check()
985 long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry, in iommu_tce_xchg() argument
990 ret = tbl->it_ops->exchange(tbl, entry, hpa, direction); in iommu_tce_xchg()
1005 int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1007 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1017 if (!tbl->it_ops->exchange) in iommu_take_ownership()
1020 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1021 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1022 spin_lock(&tbl->pools[i].lock); in iommu_take_ownership()
1024 if (tbl->it_offset == 0) in iommu_take_ownership()
1025 clear_bit(0, tbl->it_map); in iommu_take_ownership()
1027 if (!bitmap_empty(tbl->it_map, tbl->it_size)) { in iommu_take_ownership()
1031 if (tbl->it_offset == 0) in iommu_take_ownership()
1032 set_bit(0, tbl->it_map); in iommu_take_ownership()
1034 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1037 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1038 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1039 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1045 void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1047 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1049 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1050 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1051 spin_lock(&tbl->pools[i].lock); in iommu_release_ownership()
1053 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1056 if (tbl->it_offset == 0) in iommu_release_ownership()
1057 set_bit(0, tbl->it_map); in iommu_release_ownership()
1059 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1060 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
1061 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1067 struct iommu_table *tbl; in iommu_add_device() local
1085 tbl = get_iommu_table_base(dev); in iommu_add_device()
1086 if (!tbl) { in iommu_add_device()
1092 tgl = list_first_entry_or_null(&tbl->it_group_list, in iommu_add_device()
1103 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) { in iommu_add_device()
1105 __func__, IOMMU_PAGE_SIZE(tbl), in iommu_add_device()