iovad 105 drivers/iommu/amd_iommu.c static void iova_domain_flush_tlb(struct iova_domain *iovad); iovad 115 drivers/iommu/amd_iommu.c struct iova_domain iovad; iovad 1771 drivers/iommu/amd_iommu.c pfn = alloc_iova_fast(&dma_dom->iovad, pages, iovad 1775 drivers/iommu/amd_iommu.c pfn = alloc_iova_fast(&dma_dom->iovad, pages, iovad 1788 drivers/iommu/amd_iommu.c free_iova_fast(&dma_dom->iovad, address, pages); iovad 1877 drivers/iommu/amd_iommu.c static void iova_domain_flush_tlb(struct iova_domain *iovad) iovad 1881 drivers/iommu/amd_iommu.c dom = container_of(iovad, struct dma_ops_domain, iovad); iovad 1895 drivers/iommu/amd_iommu.c put_iova_domain(&dom->iovad); iovad 1927 drivers/iommu/amd_iommu.c init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN); iovad 1929 drivers/iommu/amd_iommu.c if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) iovad 1933 drivers/iommu/amd_iommu.c copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); iovad 2498 drivers/iommu/amd_iommu.c queue_iova(&dma_dom->iovad, dma_addr >> PAGE_SHIFT, pages, 0); iovad 2656 drivers/iommu/amd_iommu.c free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); iovad 3247 drivers/iommu/amd_iommu.c WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); iovad 42 drivers/iommu/dma-iommu.c struct iova_domain iovad; iovad 55 drivers/iommu/dma-iommu.c return cookie->iovad.granule; iovad 138 drivers/iommu/dma-iommu.c if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) iovad 139 drivers/iommu/dma-iommu.c put_iova_domain(&cookie->iovad); iovad 172 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 176 drivers/iommu/dma-iommu.c start -= iova_offset(iovad, start); iovad 177 drivers/iommu/dma-iommu.c num_pages = iova_align(iovad, end - start) >> iova_shift(iovad); iovad 188 drivers/iommu/dma-iommu.c start += iovad->granule; iovad 195 drivers/iommu/dma-iommu.c struct iova_domain *iovad) iovad 206 drivers/iommu/dma-iommu.c lo = iova_pfn(iovad, window->res->start - window->offset); iovad 207 drivers/iommu/dma-iommu.c hi = iova_pfn(iovad, window->res->end - window->offset); iovad 208 drivers/iommu/dma-iommu.c reserve_iova(iovad, lo, hi); iovad 216 drivers/iommu/dma-iommu.c lo = iova_pfn(iovad, start); iovad 217 drivers/iommu/dma-iommu.c hi = iova_pfn(iovad, end); iovad 218 drivers/iommu/dma-iommu.c reserve_iova(iovad, lo, hi); iovad 241 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 247 drivers/iommu/dma-iommu.c ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); iovad 260 drivers/iommu/dma-iommu.c lo = iova_pfn(iovad, region->start); iovad 261 drivers/iommu/dma-iommu.c hi = iova_pfn(iovad, region->start + region->length - 1); iovad 262 drivers/iommu/dma-iommu.c reserve_iova(iovad, lo, hi); iovad 275 drivers/iommu/dma-iommu.c static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad) iovad 280 drivers/iommu/dma-iommu.c cookie = container_of(iovad, struct iommu_dma_cookie, iovad); iovad 306 drivers/iommu/dma-iommu.c struct iova_domain *iovad; iovad 312 drivers/iommu/dma-iommu.c iovad = &cookie->iovad; iovad 331 drivers/iommu/dma-iommu.c if (iovad->start_pfn) { iovad 332 drivers/iommu/dma-iommu.c if (1UL << order != iovad->granule || iovad 333 drivers/iommu/dma-iommu.c base_pfn != iovad->start_pfn) { iovad 341 drivers/iommu/dma-iommu.c init_iova_domain(iovad, 1UL << order, base_pfn); iovad 346 drivers/iommu/dma-iommu.c init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL); iovad 388 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 396 drivers/iommu/dma-iommu.c shift = iova_shift(iovad); iovad 415 drivers/iommu/dma-iommu.c iova = alloc_iova_fast(iovad, iova_len, iovad 419 drivers/iommu/dma-iommu.c iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, iovad 428 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 434 drivers/iommu/dma-iommu.c queue_iova(iovad, iova_pfn(iovad, iova), iovad 435 drivers/iommu/dma-iommu.c size >> iova_shift(iovad), 0); iovad 437 drivers/iommu/dma-iommu.c free_iova_fast(iovad, iova_pfn(iovad, iova), iovad 438 drivers/iommu/dma-iommu.c size >> iova_shift(iovad)); iovad 446 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 447 drivers/iommu/dma-iommu.c size_t iova_off = iova_offset(iovad, dma_addr); iovad 452 drivers/iommu/dma-iommu.c size = iova_align(iovad, size + iova_off); iovad 468 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 469 drivers/iommu/dma-iommu.c size_t iova_off = iova_offset(iovad, phys); iovad 472 drivers/iommu/dma-iommu.c size = iova_align(iovad, size + iova_off); iovad 569 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 597 drivers/iommu/dma-iommu.c size = iova_align(iovad, size); iovad 815 drivers/iommu/dma-iommu.c struct iova_domain *iovad = &cookie->iovad; iovad 833 drivers/iommu/dma-iommu.c size_t s_iova_off = iova_offset(iovad, s->offset); iovad 840 drivers/iommu/dma-iommu.c s_length = iova_align(iovad, s_length + s_iova_off); iovad 1532 drivers/iommu/intel-iommu.c static void iommu_flush_iova(struct iova_domain *iovad) iovad 1537 drivers/iommu/intel-iommu.c domain = container_of(iovad, struct dmar_domain, iovad); iovad 1830 drivers/iommu/intel-iommu.c copy_reserved_iova(&reserved_iova_list, &domain->iovad); iovad 1854 drivers/iommu/intel-iommu.c init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); iovad 1856 drivers/iommu/intel-iommu.c err = init_iova_flush_queue(&domain->iovad, iovad 1911 drivers/iommu/intel-iommu.c put_iova_domain(&domain->iovad); iovad 2675 drivers/iommu/intel-iommu.c if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), iovad 3385 drivers/iommu/intel-iommu.c iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, iovad 3390 drivers/iommu/intel-iommu.c iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, iovad 3545 drivers/iommu/intel-iommu.c free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); iovad 3598 drivers/iommu/intel-iommu.c !has_iova_flush_queue(&domain->iovad)) { iovad 3602 drivers/iommu/intel-iommu.c free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); iovad 3605 drivers/iommu/intel-iommu.c queue_iova(&domain->iovad, iova_pfn, nrpages, iovad 3758 drivers/iommu/intel-iommu.c free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); iovad 3886 drivers/iommu/intel-iommu.c free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); iovad 4677 drivers/iommu/intel-iommu.c iova = find_iova(&si_domain->iovad, start_vpfn); iovad 4684 drivers/iommu/intel-iommu.c iova = split_and_remove_iova(&si_domain->iovad, iova, iovad 4734 drivers/iommu/intel-iommu.c free_cpu_cached_iovas(cpu, &domain->iovad); iovad 5142 drivers/iommu/intel-iommu.c init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); iovad 5184 drivers/iommu/intel-iommu.c init_iova_flush_queue(&dmar_domain->iovad, iovad 5793 drivers/iommu/intel-iommu.c WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); iovad 18 drivers/iommu/iova.c static bool iova_rcache_insert(struct iova_domain *iovad, iovad 21 drivers/iommu/iova.c static unsigned long iova_rcache_get(struct iova_domain *iovad, iovad 24 drivers/iommu/iova.c static void init_iova_rcaches(struct iova_domain *iovad); iovad 25 drivers/iommu/iova.c static void free_iova_rcaches(struct iova_domain *iovad); iovad 26 drivers/iommu/iova.c static void fq_destroy_all_entries(struct iova_domain *iovad); iovad 30 drivers/iommu/iova.c init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad 40 drivers/iommu/iova.c spin_lock_init(&iovad->iova_rbtree_lock); iovad 41 drivers/iommu/iova.c iovad->rbroot = RB_ROOT; iovad 42 drivers/iommu/iova.c iovad->cached_node = &iovad->anchor.node; iovad 43 drivers/iommu/iova.c iovad->cached32_node = &iovad->anchor.node; iovad 44 drivers/iommu/iova.c iovad->granule = granule; iovad 45 drivers/iommu/iova.c iovad->start_pfn = start_pfn; iovad 46 drivers/iommu/iova.c iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad)); iovad 47 drivers/iommu/iova.c iovad->max32_alloc_size = iovad->dma_32bit_pfn; iovad 48 drivers/iommu/iova.c iovad->flush_cb = NULL; iovad 49 drivers/iommu/iova.c iovad->fq = NULL; iovad 50 drivers/iommu/iova.c iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR; iovad 51 drivers/iommu/iova.c rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node); iovad 52 drivers/iommu/iova.c rb_insert_color(&iovad->anchor.node, &iovad->rbroot); iovad 53 drivers/iommu/iova.c init_iova_rcaches(iovad); iovad 57 drivers/iommu/iova.c bool has_iova_flush_queue(struct iova_domain *iovad) iovad 59 drivers/iommu/iova.c return !!iovad->fq; iovad 62 drivers/iommu/iova.c static void free_iova_flush_queue(struct iova_domain *iovad) iovad 64 drivers/iommu/iova.c if (!has_iova_flush_queue(iovad)) iovad 67 drivers/iommu/iova.c if (timer_pending(&iovad->fq_timer)) iovad 68 drivers/iommu/iova.c del_timer(&iovad->fq_timer); iovad 70 drivers/iommu/iova.c fq_destroy_all_entries(iovad); iovad 72 drivers/iommu/iova.c free_percpu(iovad->fq); iovad 74 drivers/iommu/iova.c iovad->fq = NULL; iovad 75 drivers/iommu/iova.c iovad->flush_cb = NULL; iovad 76 drivers/iommu/iova.c iovad->entry_dtor = NULL; iovad 79 drivers/iommu/iova.c int init_iova_flush_queue(struct iova_domain *iovad, iovad 85 drivers/iommu/iova.c atomic64_set(&iovad->fq_flush_start_cnt, 0); iovad 86 drivers/iommu/iova.c atomic64_set(&iovad->fq_flush_finish_cnt, 0); iovad 92 drivers/iommu/iova.c iovad->flush_cb = flush_cb; iovad 93 drivers/iommu/iova.c iovad->entry_dtor = entry_dtor; iovad 107 drivers/iommu/iova.c iovad->fq = queue; iovad 109 drivers/iommu/iova.c timer_setup(&iovad->fq_timer, fq_flush_timeout, 0); iovad 110 drivers/iommu/iova.c atomic_set(&iovad->fq_timer_on, 0); iovad 117 drivers/iommu/iova.c __get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn) iovad 119 drivers/iommu/iova.c if (limit_pfn <= iovad->dma_32bit_pfn) iovad 120 drivers/iommu/iova.c return iovad->cached32_node; iovad 122 drivers/iommu/iova.c return iovad->cached_node; iovad 126 drivers/iommu/iova.c __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) iovad 128 drivers/iommu/iova.c if (new->pfn_hi < iovad->dma_32bit_pfn) iovad 129 drivers/iommu/iova.c iovad->cached32_node = &new->node; iovad 131 drivers/iommu/iova.c iovad->cached_node = &new->node; iovad 135 drivers/iommu/iova.c __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) iovad 139 drivers/iommu/iova.c cached_iova = rb_entry(iovad->cached32_node, struct iova, node); iovad 141 drivers/iommu/iova.c (free->pfn_hi < iovad->dma_32bit_pfn && iovad 143 drivers/iommu/iova.c iovad->cached32_node = rb_next(&free->node); iovad 144 drivers/iommu/iova.c iovad->max32_alloc_size = iovad->dma_32bit_pfn; iovad 147 drivers/iommu/iova.c cached_iova = rb_entry(iovad->cached_node, struct iova, node); iovad 149 drivers/iommu/iova.c iovad->cached_node = rb_next(&free->node); iovad 180 drivers/iommu/iova.c static int __alloc_and_insert_iova_range(struct iova_domain *iovad, iovad 194 drivers/iommu/iova.c spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); iovad 195 drivers/iommu/iova.c if (limit_pfn <= iovad->dma_32bit_pfn && iovad 196 drivers/iommu/iova.c size >= iovad->max32_alloc_size) iovad 199 drivers/iommu/iova.c curr = __get_cached_rbnode(iovad, limit_pfn); iovad 209 drivers/iommu/iova.c if (limit_pfn < size || new_pfn < iovad->start_pfn) { iovad 210 drivers/iommu/iova.c iovad->max32_alloc_size = size; iovad 219 drivers/iommu/iova.c iova_insert_rbtree(&iovad->rbroot, new, prev); iovad 220 drivers/iommu/iova.c __cached_rbnode_insert_update(iovad, new); iovad 222 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 226 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 294 drivers/iommu/iova.c alloc_iova(struct iova_domain *iovad, unsigned long size, iovad 305 drivers/iommu/iova.c ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, iovad 318 drivers/iommu/iova.c private_find_iova(struct iova_domain *iovad, unsigned long pfn) iovad 320 drivers/iommu/iova.c struct rb_node *node = iovad->rbroot.rb_node; iovad 322 drivers/iommu/iova.c assert_spin_locked(&iovad->iova_rbtree_lock); iovad 338 drivers/iommu/iova.c static void private_free_iova(struct iova_domain *iovad, struct iova *iova) iovad 340 drivers/iommu/iova.c assert_spin_locked(&iovad->iova_rbtree_lock); iovad 341 drivers/iommu/iova.c __cached_rbnode_delete_update(iovad, iova); iovad 342 drivers/iommu/iova.c rb_erase(&iova->node, &iovad->rbroot); iovad 353 drivers/iommu/iova.c struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) iovad 359 drivers/iommu/iova.c spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); iovad 360 drivers/iommu/iova.c iova = private_find_iova(iovad, pfn); iovad 361 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 373 drivers/iommu/iova.c __free_iova(struct iova_domain *iovad, struct iova *iova) iovad 377 drivers/iommu/iova.c spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); iovad 378 drivers/iommu/iova.c private_free_iova(iovad, iova); iovad 379 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 391 drivers/iommu/iova.c free_iova(struct iova_domain *iovad, unsigned long pfn) iovad 393 drivers/iommu/iova.c struct iova *iova = find_iova(iovad, pfn); iovad 396 drivers/iommu/iova.c __free_iova(iovad, iova); iovad 412 drivers/iommu/iova.c alloc_iova_fast(struct iova_domain *iovad, unsigned long size, iovad 418 drivers/iommu/iova.c iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1); iovad 423 drivers/iommu/iova.c new_iova = alloc_iova(iovad, size, limit_pfn, true); iovad 433 drivers/iommu/iova.c free_cpu_cached_iovas(cpu, iovad); iovad 450 drivers/iommu/iova.c free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size) iovad 452 drivers/iommu/iova.c if (iova_rcache_insert(iovad, pfn, size)) iovad 455 drivers/iommu/iova.c free_iova(iovad, pfn); iovad 479 drivers/iommu/iova.c static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) iovad 481 drivers/iommu/iova.c u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt); iovad 491 drivers/iommu/iova.c if (iovad->entry_dtor) iovad 492 drivers/iommu/iova.c iovad->entry_dtor(fq->entries[idx].data); iovad 494 drivers/iommu/iova.c free_iova_fast(iovad, iovad 502 drivers/iommu/iova.c static void iova_domain_flush(struct iova_domain *iovad) iovad 504 drivers/iommu/iova.c atomic64_inc(&iovad->fq_flush_start_cnt); iovad 505 drivers/iommu/iova.c iovad->flush_cb(iovad); iovad 506 drivers/iommu/iova.c atomic64_inc(&iovad->fq_flush_finish_cnt); iovad 509 drivers/iommu/iova.c static void fq_destroy_all_entries(struct iova_domain *iovad) iovad 518 drivers/iommu/iova.c if (!iovad->entry_dtor) iovad 522 drivers/iommu/iova.c struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); iovad 526 drivers/iommu/iova.c iovad->entry_dtor(fq->entries[idx].data); iovad 532 drivers/iommu/iova.c struct iova_domain *iovad = from_timer(iovad, t, fq_timer); iovad 535 drivers/iommu/iova.c atomic_set(&iovad->fq_timer_on, 0); iovad 536 drivers/iommu/iova.c iova_domain_flush(iovad); iovad 542 drivers/iommu/iova.c fq = per_cpu_ptr(iovad->fq, cpu); iovad 544 drivers/iommu/iova.c fq_ring_free(iovad, fq); iovad 549 drivers/iommu/iova.c void queue_iova(struct iova_domain *iovad, iovad 553 drivers/iommu/iova.c struct iova_fq *fq = raw_cpu_ptr(iovad->fq); iovad 564 drivers/iommu/iova.c fq_ring_free(iovad, fq); iovad 567 drivers/iommu/iova.c iova_domain_flush(iovad); iovad 568 drivers/iommu/iova.c fq_ring_free(iovad, fq); iovad 576 drivers/iommu/iova.c fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); iovad 581 drivers/iommu/iova.c if (!atomic_read(&iovad->fq_timer_on) && iovad 582 drivers/iommu/iova.c !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1)) iovad 583 drivers/iommu/iova.c mod_timer(&iovad->fq_timer, iovad 593 drivers/iommu/iova.c void put_iova_domain(struct iova_domain *iovad) iovad 597 drivers/iommu/iova.c free_iova_flush_queue(iovad); iovad 598 drivers/iommu/iova.c free_iova_rcaches(iovad); iovad 599 drivers/iommu/iova.c rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) iovad 630 drivers/iommu/iova.c __insert_new_range(struct iova_domain *iovad, iovad 637 drivers/iommu/iova.c iova_insert_rbtree(&iovad->rbroot, iova, NULL); iovad 661 drivers/iommu/iova.c reserve_iova(struct iova_domain *iovad, iovad 670 drivers/iommu/iova.c if (WARN_ON((pfn_hi | pfn_lo) > (ULLONG_MAX >> iova_shift(iovad)))) iovad 673 drivers/iommu/iova.c spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); iovad 674 drivers/iommu/iova.c for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { iovad 690 drivers/iommu/iova.c iova = __insert_new_range(iovad, pfn_lo, pfn_hi); iovad 693 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 729 drivers/iommu/iova.c split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, iovad 735 drivers/iommu/iova.c spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); iovad 747 drivers/iommu/iova.c __cached_rbnode_delete_update(iovad, iova); iovad 748 drivers/iommu/iova.c rb_erase(&iova->node, &iovad->rbroot); iovad 751 drivers/iommu/iova.c iova_insert_rbtree(&iovad->rbroot, prev, NULL); iovad 755 drivers/iommu/iova.c iova_insert_rbtree(&iovad->rbroot, next, NULL); iovad 758 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 763 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 801 drivers/iommu/iova.c iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) iovad 809 drivers/iommu/iova.c spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); iovad 812 drivers/iommu/iova.c struct iova *iova = private_find_iova(iovad, mag->pfns[i]); iovad 815 drivers/iommu/iova.c private_free_iova(iovad, iova); iovad 818 drivers/iommu/iova.c spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); iovad 860 drivers/iommu/iova.c static void init_iova_rcaches(struct iova_domain *iovad) iovad 868 drivers/iommu/iova.c rcache = &iovad->rcaches[i]; iovad 889 drivers/iommu/iova.c static bool __iova_rcache_insert(struct iova_domain *iovad, iovad 930 drivers/iommu/iova.c iova_magazine_free_pfns(mag_to_free, iovad); iovad 937 drivers/iommu/iova.c static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, iovad 945 drivers/iommu/iova.c return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn); iovad 992 drivers/iommu/iova.c static unsigned long iova_rcache_get(struct iova_domain *iovad, iovad 1001 drivers/iommu/iova.c return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size); iovad 1007 drivers/iommu/iova.c static void free_iova_rcaches(struct iova_domain *iovad) iovad 1015 drivers/iommu/iova.c rcache = &iovad->rcaches[i]; iovad 1030 drivers/iommu/iova.c void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) iovad 1038 drivers/iommu/iova.c rcache = &iovad->rcaches[i]; iovad 1041 drivers/iommu/iova.c iova_magazine_free_pfns(cpu_rcache->loaded, iovad); iovad 1042 drivers/iommu/iova.c iova_magazine_free_pfns(cpu_rcache->prev, iovad); iovad 98 drivers/misc/mic/scif/scif_epd.c put_iova_domain(&ep->rma_info.iovad); iovad 33 drivers/misc/mic/scif/scif_rma.c init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN); iovad 1001 drivers/misc/mic/scif/scif_rma.c iova_ptr = reserve_iova(&ep->rma_info.iovad, page_index, iovad 1006 drivers/misc/mic/scif/scif_rma.c iova_ptr = alloc_iova(&ep->rma_info.iovad, num_pages, iovad 1029 drivers/misc/mic/scif/scif_rma.c free_iova(&ep->rma_info.iovad, offset >> PAGE_SHIFT); iovad 102 drivers/misc/mic/scif/scif_rma.h struct iova_domain iovad; iovad 494 include/linux/intel-iommu.h struct iova_domain iovad; /* iova's that belong to this domain */ iovad 105 include/linux/iova.h static inline unsigned long iova_shift(struct iova_domain *iovad) iovad 107 include/linux/iova.h return __ffs(iovad->granule); iovad 110 include/linux/iova.h static inline unsigned long iova_mask(struct iova_domain *iovad) iovad 112 include/linux/iova.h return iovad->granule - 1; iovad 115 include/linux/iova.h static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) iovad 117 include/linux/iova.h return iova & iova_mask(iovad); iovad 120 include/linux/iova.h static inline size_t iova_align(struct iova_domain *iovad, size_t size) iovad 122 include/linux/iova.h return ALIGN(size, iovad->granule); iovad 125 include/linux/iova.h static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) iovad 127 include/linux/iova.h return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); iovad 130 include/linux/iova.h static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) iovad 132 include/linux/iova.h return iova >> iova_shift(iovad); iovad 141 include/linux/iova.h void free_iova(struct iova_domain *iovad, unsigned long pfn); iovad 142 include/linux/iova.h void __free_iova(struct iova_domain *iovad, struct iova *iova); iovad 143 include/linux/iova.h struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, iovad 146 include/linux/iova.h void free_iova_fast(struct iova_domain *iovad, unsigned long pfn, iovad 148 include/linux/iova.h void queue_iova(struct iova_domain *iovad, iovad 151 include/linux/iova.h unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, iovad 153 include/linux/iova.h struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, iovad 156 include/linux/iova.h void init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad 158 include/linux/iova.h bool has_iova_flush_queue(struct iova_domain *iovad); iovad 159 include/linux/iova.h int init_iova_flush_queue(struct iova_domain *iovad, iovad 161 include/linux/iova.h struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); iovad 162 include/linux/iova.h void put_iova_domain(struct iova_domain *iovad); iovad 163 include/linux/iova.h struct iova *split_and_remove_iova(struct iova_domain *iovad, iovad 165 include/linux/iova.h void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); iovad 185 include/linux/iova.h static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) iovad 189 include/linux/iova.h static inline void __free_iova(struct iova_domain *iovad, struct iova *iova) iovad 193 include/linux/iova.h static inline struct iova *alloc_iova(struct iova_domain *iovad, iovad 201 include/linux/iova.h static inline void free_iova_fast(struct iova_domain *iovad, iovad 207 include/linux/iova.h static inline void queue_iova(struct iova_domain *iovad, iovad 213 include/linux/iova.h static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, iovad 221 include/linux/iova.h static inline struct iova *reserve_iova(struct iova_domain *iovad, iovad 233 include/linux/iova.h static inline void init_iova_domain(struct iova_domain *iovad, iovad 239 include/linux/iova.h static inline bool has_iova_flush_queue(struct iova_domain *iovad) iovad 244 include/linux/iova.h static inline int init_iova_flush_queue(struct iova_domain *iovad, iovad 251 include/linux/iova.h static inline struct iova *find_iova(struct iova_domain *iovad, iovad 257 include/linux/iova.h static inline void put_iova_domain(struct iova_domain *iovad) iovad 261 include/linux/iova.h static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, iovad 270 include/linux/iova.h struct iova_domain *iovad)