Lines Matching refs:tbl

177 				       struct iommu_table *tbl,  in iommu_range_alloc()  argument
211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
214 pool = &(tbl->large_pool); in iommu_range_alloc()
216 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
236 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
237 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
244 pool = &(tbl->pools[0]); in iommu_range_alloc()
254 1 << tbl->it_page_shift); in iommu_range_alloc()
256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); in iommu_range_alloc()
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
260 boundary_size >> tbl->it_page_shift, align_mask); in iommu_range_alloc()
268 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
271 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
272 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
293 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
294 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
306 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
316 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
321 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
322 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
325 build_fail = ppc_md.tce_build(tbl, entry, npages, in iommu_alloc()
327 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
335 __iommu_free(tbl, ret, npages); in iommu_alloc()
341 ppc_md.tce_flush(tbl); in iommu_alloc()
349 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
354 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
355 free_entry = entry - tbl->it_offset; in iommu_free_check()
357 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
358 (entry < tbl->it_offset)) { in iommu_free_check()
363 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
364 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
365 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
366 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
367 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
377 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
381 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
385 p = &tbl->large_pool; in get_pool()
387 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
389 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
390 p = &tbl->pools[pool_nr]; in get_pool()
396 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
403 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
404 free_entry = entry - tbl->it_offset; in __iommu_free()
406 pool = get_pool(tbl, free_entry); in __iommu_free()
408 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
411 ppc_md.tce_free(tbl, entry, npages); in __iommu_free()
414 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
418 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
421 __iommu_free(tbl, dma_addr, npages); in iommu_free()
428 ppc_md.tce_flush(tbl); in iommu_free()
431 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
445 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
474 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
475 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
476 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
484 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
490 entry += tbl->it_offset; in ppc_iommu_map_sg()
491 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
492 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
498 build_fail = ppc_md.tce_build(tbl, entry, npages, in ppc_iommu_map_sg()
499 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
538 ppc_md.tce_flush(tbl); in ppc_iommu_map_sg()
561 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
563 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
564 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
575 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
583 if (!tbl) in ppc_iommu_unmap_sg()
594 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
595 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
604 ppc_md.tce_flush(tbl); in ppc_iommu_unmap_sg()
607 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
616 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
625 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
626 tceval = ppc_md.tce_get(tbl, index + tbl->it_offset); in iommu_table_clear()
631 __set_bit(index, tbl->it_map); in iommu_table_clear()
636 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
640 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
641 index < tbl->it_size; index++) in iommu_table_clear()
642 __clear_bit(index, tbl->it_map); in iommu_table_clear()
652 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) in iommu_init_table() argument
661 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
666 tbl->it_map = page_address(page); in iommu_init_table()
667 memset(tbl->it_map, 0, sz); in iommu_init_table()
674 if (tbl->it_offset == 0) in iommu_init_table()
675 set_bit(0, tbl->it_map); in iommu_init_table()
678 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
679 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
681 tbl->nr_pools = 1; in iommu_init_table()
684 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
686 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
687 p = &tbl->pools[i]; in iommu_init_table()
689 p->start = tbl->poolsize * i; in iommu_init_table()
691 p->end = p->start + tbl->poolsize; in iommu_init_table()
694 p = &tbl->large_pool; in iommu_init_table()
696 p->start = tbl->poolsize * i; in iommu_init_table()
698 p->end = tbl->it_size; in iommu_init_table()
700 iommu_table_clear(tbl); in iommu_init_table()
708 return tbl; in iommu_init_table()
711 void iommu_free_table(struct iommu_table *tbl, const char *node_name) in iommu_free_table() argument
716 if (!tbl || !tbl->it_map) { in iommu_free_table()
726 if (tbl->it_offset == 0) in iommu_free_table()
727 clear_bit(0, tbl->it_map); in iommu_free_table()
730 if (tbl->it_group) { in iommu_free_table()
731 iommu_group_put(tbl->it_group); in iommu_free_table()
732 BUG_ON(tbl->it_group); in iommu_free_table()
737 if (!bitmap_empty(tbl->it_map, tbl->it_size)) in iommu_free_table()
741 bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_free_table()
745 free_pages((unsigned long) tbl->it_map, order); in iommu_free_table()
748 kfree(tbl); in iommu_free_table()
756 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
770 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
772 if (tbl) { in iommu_map_page()
774 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
776 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
778 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
779 mask >> tbl->it_page_shift, align, in iommu_map_page()
784 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
788 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
794 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
802 if (tbl) { in iommu_unmap_page()
804 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
805 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
813 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
837 if (!tbl) in iommu_alloc_coherent()
848 nio_pages = size >> tbl->it_page_shift; in iommu_alloc_coherent()
849 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
851 mask >> tbl->it_page_shift, io_order, NULL); in iommu_alloc_coherent()
860 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
863 if (tbl) { in iommu_free_coherent()
867 nio_pages = size >> tbl->it_page_shift; in iommu_free_coherent()
868 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
880 struct iommu_table *tbl = iommu_data; in group_release() local
881 tbl->it_group = NULL; in group_release()
884 void iommu_register_group(struct iommu_table *tbl, in iommu_register_group() argument
896 tbl->it_group = grp; in iommu_register_group()
897 iommu_group_set_iommudata(grp, tbl, group_release); in iommu_register_group()
919 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
923 ppc_md.tce_flush(tbl); in iommu_flush_tce()
930 int iommu_tce_clear_param_check(struct iommu_table *tbl, in iommu_tce_clear_param_check() argument
938 if (ioba & ~IOMMU_PAGE_MASK(tbl)) in iommu_tce_clear_param_check()
941 ioba >>= tbl->it_page_shift; in iommu_tce_clear_param_check()
942 if (ioba < tbl->it_offset) in iommu_tce_clear_param_check()
945 if ((ioba + npages) > (tbl->it_offset + tbl->it_size)) in iommu_tce_clear_param_check()
952 int iommu_tce_put_param_check(struct iommu_table *tbl, in iommu_tce_put_param_check() argument
958 if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ)) in iommu_tce_put_param_check()
961 if (ioba & ~IOMMU_PAGE_MASK(tbl)) in iommu_tce_put_param_check()
964 ioba >>= tbl->it_page_shift; in iommu_tce_put_param_check()
965 if (ioba < tbl->it_offset) in iommu_tce_put_param_check()
968 if ((ioba + 1) > (tbl->it_offset + tbl->it_size)) in iommu_tce_put_param_check()
975 unsigned long iommu_clear_tce(struct iommu_table *tbl, unsigned long entry) in iommu_clear_tce() argument
978 struct iommu_pool *pool = get_pool(tbl, entry); in iommu_clear_tce()
982 oldtce = ppc_md.tce_get(tbl, entry); in iommu_clear_tce()
984 ppc_md.tce_free(tbl, entry, 1); in iommu_clear_tce()
994 int iommu_clear_tces_and_put_pages(struct iommu_table *tbl, in iommu_clear_tces_and_put_pages() argument
1001 oldtce = iommu_clear_tce(tbl, entry); in iommu_clear_tces_and_put_pages()
1022 int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, in iommu_tce_build() argument
1027 struct iommu_pool *pool = get_pool(tbl, entry); in iommu_tce_build()
1031 oldtce = ppc_md.tce_get(tbl, entry); in iommu_tce_build()
1034 ret = ppc_md.tce_build(tbl, entry, 1, hwaddr, direction, NULL); in iommu_tce_build()
1047 int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, in iommu_put_tce_user_mode() argument
1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; in iommu_put_tce_user_mode()
1064 ret = iommu_tce_build(tbl, entry, hwaddr, direction); in iommu_put_tce_user_mode()
1070 __func__, entry << tbl->it_page_shift, tce, ret); in iommu_put_tce_user_mode()
1076 int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1078 unsigned long sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1080 if (tbl->it_offset == 0) in iommu_take_ownership()
1081 clear_bit(0, tbl->it_map); in iommu_take_ownership()
1083 if (!bitmap_empty(tbl->it_map, tbl->it_size)) { in iommu_take_ownership()
1088 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1089 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); in iommu_take_ownership()
1096 if (tbl->set_bypass) in iommu_take_ownership()
1097 tbl->set_bypass(tbl, false); in iommu_take_ownership()
1103 void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1105 unsigned long sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1107 iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); in iommu_release_ownership()
1108 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1111 if (tbl->it_offset == 0) in iommu_release_ownership()
1112 set_bit(0, tbl->it_map); in iommu_release_ownership()
1115 if (tbl->set_bypass) in iommu_release_ownership()
1116 tbl->set_bypass(tbl, true); in iommu_release_ownership()
1122 struct iommu_table *tbl; in iommu_add_device() local
1139 tbl = get_iommu_table_base(dev); in iommu_add_device()
1140 if (!tbl || !tbl->it_group) { in iommu_add_device()
1148 iommu_group_id(tbl->it_group)); in iommu_add_device()
1150 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) { in iommu_add_device()
1152 __func__, IOMMU_PAGE_SIZE(tbl), in iommu_add_device()
1157 return iommu_group_add_device(tbl->it_group, dev); in iommu_add_device()