Searched refs:iovad (Results 1 - 3 of 3) sorted by relevance

/linux-4.1.27/include/linux/
H A Diova.h41 static inline unsigned long iova_shift(struct iova_domain *iovad) iova_shift() argument
43 return __ffs(iovad->granule); iova_shift()
46 static inline unsigned long iova_mask(struct iova_domain *iovad) iova_mask() argument
48 return iovad->granule - 1; iova_mask()
51 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) iova_offset() argument
53 return iova & iova_mask(iovad); iova_offset()
56 static inline size_t iova_align(struct iova_domain *iovad, size_t size) iova_align() argument
58 return ALIGN(size, iovad->granule); iova_align()
61 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) iova_dma_addr() argument
63 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); iova_dma_addr()
66 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) iova_pfn() argument
68 return iova >> iova_shift(iovad); iova_pfn()
76 void free_iova(struct iova_domain *iovad, unsigned long pfn);
77 void __free_iova(struct iova_domain *iovad, struct iova *iova);
78 struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
81 struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
84 void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
86 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
87 void put_iova_domain(struct iova_domain *iovad);
88 struct iova *split_and_remove_iova(struct iova_domain *iovad,
/linux-4.1.27/drivers/iommu/
H A Diova.c58 init_iova_domain(struct iova_domain *iovad, unsigned long granule, init_iova_domain() argument
68 spin_lock_init(&iovad->iova_rbtree_lock); init_iova_domain()
69 iovad->rbroot = RB_ROOT; init_iova_domain()
70 iovad->cached32_node = NULL; init_iova_domain()
71 iovad->granule = granule; init_iova_domain()
72 iovad->start_pfn = start_pfn; init_iova_domain()
73 iovad->dma_32bit_pfn = pfn_32bit; init_iova_domain()
77 __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) __get_cached_rbnode() argument
79 if ((*limit_pfn != iovad->dma_32bit_pfn) || __get_cached_rbnode()
80 (iovad->cached32_node == NULL)) __get_cached_rbnode()
81 return rb_last(&iovad->rbroot); __get_cached_rbnode()
83 struct rb_node *prev_node = rb_prev(iovad->cached32_node); __get_cached_rbnode()
85 container_of(iovad->cached32_node, struct iova, node); __get_cached_rbnode()
92 __cached_rbnode_insert_update(struct iova_domain *iovad, __cached_rbnode_insert_update() argument
95 if (limit_pfn != iovad->dma_32bit_pfn) __cached_rbnode_insert_update()
97 iovad->cached32_node = &new->node; __cached_rbnode_insert_update()
101 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) __cached_rbnode_delete_update() argument
106 if (!iovad->cached32_node) __cached_rbnode_delete_update()
108 curr = iovad->cached32_node; __cached_rbnode_delete_update()
116 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) __cached_rbnode_delete_update()
117 iovad->cached32_node = node; __cached_rbnode_delete_update()
119 iovad->cached32_node = NULL; __cached_rbnode_delete_update()
138 static int __alloc_and_insert_iova_range(struct iova_domain *iovad, __alloc_and_insert_iova_range() argument
148 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); __alloc_and_insert_iova_range()
150 curr = __get_cached_rbnode(iovad, &limit_pfn); __alloc_and_insert_iova_range()
175 if ((iovad->start_pfn + size + pad_size) > limit_pfn) { __alloc_and_insert_iova_range()
176 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); __alloc_and_insert_iova_range()
195 entry = &iovad->rbroot.rb_node; __alloc_and_insert_iova_range()
213 rb_insert_color(&new->node, &iovad->rbroot); __alloc_and_insert_iova_range()
215 __cached_rbnode_insert_update(iovad, saved_pfn, new); __alloc_and_insert_iova_range()
217 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); __alloc_and_insert_iova_range()
246 * @iovad: - iova domain in question
250 * This function allocates an iova in the range iovad->start_pfn to limit_pfn,
251 * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned
256 alloc_iova(struct iova_domain *iovad, unsigned long size, alloc_iova() argument
273 ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, alloc_iova()
286 * @iovad: - iova domain in question.
291 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) find_iova() argument
297 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); find_iova()
298 node = iovad->rbroot.rb_node; find_iova()
304 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); find_iova()
320 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); find_iova()
326 * @iovad: iova domain in question.
331 __free_iova(struct iova_domain *iovad, struct iova *iova) __free_iova() argument
335 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); __free_iova()
336 __cached_rbnode_delete_update(iovad, iova); __free_iova()
337 rb_erase(&iova->node, &iovad->rbroot); __free_iova()
338 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); __free_iova()
344 * @iovad: - iova domain in question.
350 free_iova(struct iova_domain *iovad, unsigned long pfn) free_iova() argument
352 struct iova *iova = find_iova(iovad, pfn); free_iova()
354 __free_iova(iovad, iova); free_iova()
360 * @iovad: - iova domain in question.
363 void put_iova_domain(struct iova_domain *iovad) put_iova_domain() argument
368 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); put_iova_domain()
369 node = rb_first(&iovad->rbroot); put_iova_domain()
372 rb_erase(node, &iovad->rbroot); put_iova_domain()
374 node = rb_first(&iovad->rbroot); put_iova_domain()
376 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); put_iova_domain()
405 __insert_new_range(struct iova_domain *iovad, __insert_new_range() argument
412 iova_insert_rbtree(&iovad->rbroot, iova); __insert_new_range()
429 * @iovad: - iova domain pointer
436 reserve_iova(struct iova_domain *iovad, reserve_iova() argument
444 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); reserve_iova()
445 for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { reserve_iova()
461 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); reserve_iova()
464 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); reserve_iova()
494 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, split_and_remove_iova() argument
500 spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); split_and_remove_iova()
512 __cached_rbnode_delete_update(iovad, iova); split_and_remove_iova()
513 rb_erase(&iova->node, &iovad->rbroot); split_and_remove_iova()
516 iova_insert_rbtree(&iovad->rbroot, prev); split_and_remove_iova()
520 iova_insert_rbtree(&iovad->rbroot, next); split_and_remove_iova()
523 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); split_and_remove_iova()
528 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); split_and_remove_iova()
H A Dintel-iommu.c323 struct iova_domain iovad; /* iova's that belong to this domain */ member in struct:dmar_domain
1688 copy_reserved_iova(&reserved_iova_list, &domain->iovad); domain_reserve_special_ranges()
1711 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, domain_init()
1775 put_iova_domain(&domain->iovad); domain_exit()
2331 if (!reserve_iova(&domain->iovad, dma_to_mm_pfn(first_vpfn), iommu_domain_identity_map()
2947 iova = alloc_iova(&domain->iovad, nrpages, intel_alloc_iova()
2952 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); intel_alloc_iova()
3105 __free_iova(&domain->iovad, iova); __intel_map_single()
3154 __free_iova(&deferred_flush[i].domain[j]->iovad, iova); flush_unmaps()
3216 iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); intel_unmap()
3233 __free_iova(&domain->iovad, iova); intel_unmap()
3382 __free_iova(&domain->iovad, iova); intel_map_sg()
4077 iova = find_iova(&si_domain->iovad, start_vpfn); intel_iommu_memory_notifier()
4084 iova = split_and_remove_iova(&si_domain->iovad, iova, intel_iommu_memory_notifier()
4345 init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, md_domain_init()

Completed in 111 milliseconds