Home
last modified time | relevance | path

Searched refs:iova (Results 1 – 84 of 84) sorted by relevance

/linux-4.4.14/drivers/iommu/
Diova.c52 struct iova *curr_iova = in __get_cached_rbnode()
53 container_of(iovad->cached32_node, struct iova, node); in __get_cached_rbnode()
61 unsigned long limit_pfn, struct iova *new) in __cached_rbnode_insert_update()
69 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
71 struct iova *cached_iova; in __cached_rbnode_delete_update()
77 cached_iova = container_of(curr, struct iova, node); in __cached_rbnode_delete_update()
81 struct iova *iova = container_of(node, struct iova, node); in __cached_rbnode_delete_update() local
84 if (node && iova->pfn_lo < iovad->dma_32bit_pfn) in __cached_rbnode_delete_update()
103 struct iova *new, bool size_aligned) in __alloc_and_insert_iova_range()
116 struct iova *curr_iova = container_of(curr, struct iova, node); in __alloc_and_insert_iova_range()
[all …]
Drockchip-iommu.c259 static u32 rk_iova_dte_index(dma_addr_t iova) in rk_iova_dte_index() argument
261 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; in rk_iova_dte_index()
264 static u32 rk_iova_pte_index(dma_addr_t iova) in rk_iova_pte_index() argument
266 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; in rk_iova_pte_index()
269 static u32 rk_iova_page_offset(dma_addr_t iova) in rk_iova_page_offset() argument
271 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; in rk_iova_page_offset()
289 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, in rk_iommu_zap_lines() argument
292 dma_addr_t iova_end = iova + size; in rk_iommu_zap_lines()
297 for (; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines()
298 rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
[all …]
Dexynos-iommu.c61 static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size) in sysmmu_page_offset() argument
63 return iova & (size - 1); in sysmmu_page_offset()
67 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE) argument
69 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE) argument
71 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE) argument
76 static u32 lv1ent_offset(sysmmu_iova_t iova) in lv1ent_offset() argument
78 return iova >> SECT_ORDER; in lv1ent_offset()
81 static u32 lv2ent_offset(sysmmu_iova_t iova) in lv2ent_offset() argument
83 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
141 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) in section_entry() argument
[all …]
Dtegra-gart.c88 #define for_each_gart_pte(gart, iova) \ argument
89 for (iova = gart->iovmm_base; \
90 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
91 iova += GART_PAGE_SIZE)
116 unsigned long iova; in do_gart_setup() local
118 for_each_gart_pte(gart, iova) in do_gart_setup()
119 gart_set_pte(gart, iova, data ? *(data++) : 0); in do_gart_setup()
128 unsigned long iova; in gart_dump_table() local
132 for_each_gart_pte(gart, iova) { in gart_dump_table()
135 pte = gart_read_pte(gart, iova); in gart_dump_table()
[all …]
Dio-pgtable-arm.c264 unsigned long iova, size_t size, int lvl,
268 unsigned long iova, phys_addr_t paddr, in arm_lpae_init_pte() argument
287 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte()
288 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) in arm_lpae_init_pte()
307 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, in __arm_lpae_map() argument
316 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map()
320 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map()
343 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_lpae_map()
379 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_lpae_map() argument
392 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); in arm_lpae_map()
[all …]
Ddma-iommu.c154 static struct iova *__alloc_iova(struct iova_domain *iovad, size_t size, in __alloc_iova()
173 struct iova *iova = find_iova(iovad, pfn); in __iommu_dma_unmap() local
176 if (WARN_ON(!iova)) in __iommu_dma_unmap()
179 size = iova_size(iova) << shift; in __iommu_dma_unmap()
183 __free_iova(iovad, iova); in __iommu_dma_unmap()
287 struct iova *iova; in iommu_dma_alloc() local
299 iova = __alloc_iova(iovad, size, dev->coherent_dma_mask); in iommu_dma_alloc()
300 if (!iova) in iommu_dma_alloc()
319 dma_addr = iova_dma_addr(iovad, iova); in iommu_dma_alloc()
331 __free_iova(iovad, iova); in iommu_dma_alloc()
[all …]
Dtegra-smmu.c141 static unsigned int iova_pd_index(unsigned long iova) in iova_pd_index() argument
143 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index()
146 static unsigned int iova_pt_index(unsigned long iova) in iova_pt_index() argument
148 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index()
204 unsigned long iova) in smmu_flush_tlb_section() argument
209 SMMU_TLB_FLUSH_VA_SECTION(iova); in smmu_flush_tlb_section()
215 unsigned long iova) in smmu_flush_tlb_group() argument
220 SMMU_TLB_FLUSH_VA_GROUP(iova); in smmu_flush_tlb_group()
500 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument
503 unsigned int pd_index = iova_pd_index(iova); in tegra_smmu_set_pde()
[all …]
Dshmobile-iommu.c220 static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, in shmobile_iommu_map() argument
228 l1index = iova >> 20; in shmobile_iommu_map()
231 l2index = (iova >> 12) & 0xff; in shmobile_iommu_map()
240 l2index = (iova >> 12) & 0xf0; in shmobile_iommu_map()
265 unsigned long iova, size_t size) in shmobile_iommu_unmap() argument
273 l1index = iova >> 20; in shmobile_iommu_unmap()
274 if (!(iova & 0xfffff) && size >= SZ_1M) { in shmobile_iommu_unmap()
281 l2index = (iova >> 12) & 0xff; in shmobile_iommu_unmap()
306 dma_addr_t iova) in shmobile_iommu_iova_to_phys() argument
312 l1index = iova >> 20; in shmobile_iommu_iova_to_phys()
[all …]
Ds390-iommu.c269 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, in s390_iommu_map() argument
281 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, in s390_iommu_map()
288 dma_addr_t iova) in s390_iommu_iova_to_phys() argument
295 if (iova < domain->geometry.aperture_start || in s390_iommu_iova_to_phys()
296 iova > domain->geometry.aperture_end) in s390_iommu_iova_to_phys()
299 rtx = calc_rtx(iova); in s390_iommu_iova_to_phys()
300 sx = calc_sx(iova); in s390_iommu_iova_to_phys()
301 px = calc_px(iova); in s390_iommu_iova_to_phys()
319 unsigned long iova, size_t size) in s390_iommu_unmap() argument
326 paddr = s390_iommu_iova_to_phys(domain, iova); in s390_iommu_unmap()
[all …]
Dio-pgtable.h29 void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
82 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
84 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
87 unsigned long iova);
Dipmmu-vmsa.c280 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, in ipmmu_tlb_add_flush() argument
394 u32 iova; in ipmmu_domain_irq() local
400 iova = ipmmu_ctx_read(domain, IMEAR); in ipmmu_domain_irq()
413 iova); in ipmmu_domain_irq()
416 iova); in ipmmu_domain_irq()
427 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
432 status, iova); in ipmmu_domain_irq()
542 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, in ipmmu_map() argument
550 return domain->iop->map(domain->iop, iova, paddr, size, prot); in ipmmu_map()
553 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, in ipmmu_unmap() argument
[all …]
Dintel-iommu.c464 struct iova *iova[HIGH_WATER_MARK]; member
1805 struct iova *iova; in dmar_init_reserved_ranges() local
1815 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), in dmar_init_reserved_ranges()
1817 if (!iova) { in dmar_init_reserved_ranges()
1830 iova = reserve_iova(&reserved_iova_list, in dmar_init_reserved_ranges()
1833 if (!iova) { in dmar_init_reserved_ranges()
3302 static struct iova *intel_alloc_iova(struct device *dev, in intel_alloc_iova()
3306 struct iova *iova = NULL; in intel_alloc_iova() local
3319 iova = alloc_iova(&domain->iovad, nrpages, in intel_alloc_iova()
3321 if (iova) in intel_alloc_iova()
[all …]
Diommu.c1271 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
1276 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
1312 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
1315 unsigned long orig_iova = iova; in iommu_map()
1335 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { in iommu_map()
1337 iova, &paddr, size, min_pagesz); in iommu_map()
1341 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); in iommu_map()
1344 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); in iommu_map()
1347 iova, &paddr, pgsize); in iommu_map()
1349 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); in iommu_map()
[all …]
Darm-smmu.c584 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, in arm_smmu_tlb_inv_range_nosync() argument
598 iova &= ~12UL; in arm_smmu_tlb_inv_range_nosync()
599 iova |= ARM_SMMU_CB_ASID(cfg); in arm_smmu_tlb_inv_range_nosync()
600 writel_relaxed(iova, reg); in arm_smmu_tlb_inv_range_nosync()
603 iova >>= 12; in arm_smmu_tlb_inv_range_nosync()
604 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; in arm_smmu_tlb_inv_range_nosync()
605 writeq_relaxed(iova, reg); in arm_smmu_tlb_inv_range_nosync()
613 writeq_relaxed(iova >> 12, reg); in arm_smmu_tlb_inv_range_nosync()
631 unsigned long iova; in arm_smmu_context_fault() local
653 iova = far; in arm_smmu_context_fault()
[all …]
Dfsl_pamu_domain.c67 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) in get_phys_addr() argument
86 subwin_iova = iova & ~(subwin_size - 1); in get_phys_addr()
92 return win_ptr->paddr + (iova & (win_ptr->size - 1)); in get_phys_addr()
285 static int check_size(u64 size, dma_addr_t iova) in check_size() argument
297 if (iova & (size - 1)) { in check_size()
390 dma_addr_t iova) in fsl_pamu_iova_to_phys() argument
394 if (iova < domain->geometry.aperture_start || in fsl_pamu_iova_to_phys()
395 iova > domain->geometry.aperture_end) in fsl_pamu_iova_to_phys()
398 return get_phys_addr(dma_domain, iova); in fsl_pamu_iova_to_phys()
DMakefile7 obj-$(CONFIG_IOMMU_IOVA) += iova.o
Darm-smmu-v3.c1337 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, in arm_smmu_tlb_inv_range_nosync() argument
1345 .addr = iova, in arm_smmu_tlb_inv_range_nosync()
1686 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, in arm_smmu_map() argument
1698 ret = ops->map(ops, iova, paddr, size, prot); in arm_smmu_map()
1704 arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) in arm_smmu_unmap() argument
1715 ret = ops->unmap(ops, iova, size); in arm_smmu_unmap()
1721 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in arm_smmu_iova_to_phys() argument
1732 ret = ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
Damd_iommu.c3042 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, in amd_iommu_map() argument
3058 ret = iommu_map_page(domain, iova, paddr, prot, page_size); in amd_iommu_map()
3064 static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, in amd_iommu_unmap() argument
3074 unmap_size = iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
3083 dma_addr_t iova) in amd_iommu_iova_to_phys() argument
3090 return iova; in amd_iommu_iova_to_phys()
3092 pte = fetch_pte(domain, iova, &pte_pgsize); in amd_iommu_iova_to_phys()
3100 return (__pte & ~offset_mask) | (iova & offset_mask); in amd_iommu_iova_to_phys()
DKconfig288 allocated from the IPMMU (iova) for DMA with this option
/linux-4.4.14/include/linux/
Diova.h20 struct iova { struct
36 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument
38 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size()
51 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) in iova_offset() argument
53 return iova & iova_mask(iovad); in iova_offset()
61 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument
63 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); in iova_dma_addr()
66 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) in iova_pfn() argument
68 return iova >> iova_shift(iovad); in iova_pfn()
74 struct iova *alloc_iova_mem(void);
[all …]
Diommu.h162 int (*map)(struct iommu_domain *domain, unsigned long iova,
164 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
166 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
168 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
216 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
218 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
220 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
223 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
297 struct device *dev, unsigned long iova, int flags) in report_iommu_fault() argument
306 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
[all …]
/linux-4.4.14/include/trace/events/
Diommu.h88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
90 TP_ARGS(iova, paddr, size),
93 __field(u64, iova)
99 __entry->iova = iova;
105 __entry->iova, __entry->paddr, __entry->size
111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
113 TP_ARGS(iova, size, unmapped_size),
116 __field(u64, iova)
122 __entry->iova = iova;
128 __entry->iova, __entry->size, __entry->unmapped_size
[all …]
/linux-4.4.14/drivers/vfio/
Dvfio_iommu_type1.c74 dma_addr_t iova; /* Device address */ member
98 if (start + size <= dma->iova) in vfio_find_dma()
100 else if (start >= dma->iova + dma->size) in vfio_find_dma()
118 if (new->iova + new->size <= dma->iova) in vfio_link_dma()
338 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; in vfio_unmap_unpin() local
355 iommu_unmap(d->domain, dma->iova, dma->size); in vfio_unmap_unpin()
359 while (iova < end) { in vfio_unmap_unpin()
363 phys = iommu_iova_to_phys(domain->domain, iova); in vfio_unmap_unpin()
365 iova += PAGE_SIZE; in vfio_unmap_unpin()
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { in vfio_unmap_unpin()
[all …]
Dvfio_iommu_spapr_tce.c802 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
823 ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr); in tce_iommu_ioctl()
829 param.iova >> tbl->it_page_shift, in tce_iommu_ioctl()
835 param.iova >> tbl->it_page_shift, in tce_iommu_ioctl()
865 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
872 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, in tce_iommu_ioctl()
878 param.iova >> tbl->it_page_shift, in tce_iommu_ioctl()
/linux-4.4.14/drivers/gpu/drm/msm/
Dmsm_gem_submit.c87 submit->bos[i].iova = submit_bo.presumed; in submit_lookup_objects()
133 submit->bos[i].iova = 0; in submit_unlock_unpin_bo()
148 uint32_t iova; in submit_validate_objects() local
166 submit->gpu->id, &iova); in submit_validate_objects()
179 if (iova == submit->bos[i].iova) { in submit_validate_objects()
182 submit->bos[i].iova = iova; in submit_validate_objects()
215 struct msm_gem_object **obj, uint32_t *iova, bool *valid) in submit_bo() argument
225 if (iova) in submit_bo()
226 *iova = submit->bos[idx].iova; in submit_bo()
261 uint32_t iova, off; in submit_reloc() local
[all …]
Dmsm_iommu.c28 unsigned long iova, int flags, void *arg) in msm_fault_handler() argument
30 pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); in msm_fault_handler()
46 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_map() argument
52 unsigned int da = iova; in msm_iommu_map()
63 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); in msm_iommu_map()
75 da = iova; in msm_iommu_map()
85 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_unmap() argument
91 unsigned int da = iova; in msm_iommu_unmap()
102 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); in msm_iommu_unmap()
Dmsm_gem.c284 uint32_t *iova) in msm_gem_get_iova_locked() argument
289 if (!msm_obj->domain[id].iova) { in msm_gem_get_iova_locked()
306 msm_obj->domain[id].iova = offset; in msm_gem_get_iova_locked()
308 msm_obj->domain[id].iova = physaddr(obj); in msm_gem_get_iova_locked()
313 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova_locked()
319 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) in msm_gem_get_iova() argument
327 if (msm_obj->domain[id].iova) { in msm_gem_get_iova()
328 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova()
333 ret = msm_gem_get_iova_locked(obj, id, iova); in msm_gem_get_iova()
344 WARN_ON(!msm_obj->domain[id].iova); in msm_gem_iova()
[all …]
Dmsm_gem.h57 uint32_t iova; member
109 uint32_t iova; member
115 uint32_t iova; member
Dmsm_rd.c310 uint32_t iova = submit->cmd[i].iova; in msm_rd_dump_submit() local
315 buf += iova - submit->bos[idx].iova; in msm_rd_dump_submit()
318 (uint32_t[2]){ iova, szd * 4 }, 8); in msm_rd_dump_submit()
332 (uint32_t[2]){ iova, szd }, 8); in msm_rd_dump_submit()
Dmsm_mmu.h26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
Dmsm_fb.c99 uint32_t iova; in msm_framebuffer_prepare() local
102 ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); in msm_framebuffer_prepare()
103 DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret); in msm_framebuffer_prepare()
Dmsm_drv.h201 uint32_t *iova);
202 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
Dmsm_gpu.c531 uint32_t iova; in msm_gpu_submit() local
536 submit->gpu->id, &iova); in msm_gpu_submit()
/linux-4.4.14/arch/arm/mm/
Ddma-mapping.c1040 dma_addr_t iova; in __alloc_iova() local
1084 iova = mapping->base + (mapping_size * i); in __alloc_iova()
1085 iova += start << PAGE_SHIFT; in __alloc_iova()
1087 return iova; in __alloc_iova()
1251 dma_addr_t dma_addr, iova; in __iommu_create_mapping() local
1258 iova = dma_addr; in __iommu_create_mapping()
1271 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
1275 iova += len; in __iommu_create_mapping()
1280 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1285 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
[all …]
/linux-4.4.14/arch/ia64/hp/common/
Dsba_iommu.c223 dma_addr_t iova; member
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) argument
669 sba_mark_invalid(ioc, d->iova, d->size); in sba_alloc_range()
670 sba_free_range(ioc, d->iova, d->size); in sba_alloc_range()
729 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
731 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
755 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size, in sba_free_range()
847 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in sba_mark_invalid() argument
849 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_invalid()
1016 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_mark_clean() argument
[all …]
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_mr.c148 mr->mr.iova = *iova_start; in ipath_reg_phys_mr()
215 mr->mr.iova = virt_addr; in ipath_reg_user_mr()
317 fmr->mr.iova = 0; in ipath_alloc_fmr()
348 int list_len, u64 iova) in ipath_map_phys_fmr() argument
363 fmr->mr.user_base = iova; in ipath_map_phys_fmr()
364 fmr->mr.iova = iova; in ipath_map_phys_fmr()
401 fmr->mr.iova = 0; in ipath_unmap_fmr()
Dipath_keys.c239 off = vaddr - mr->iova; in ipath_rkey_ok()
240 if (unlikely(vaddr < mr->iova || off + len > mr->length || in ipath_rkey_ok()
Dipath_verbs.h244 u64 iova; /* IB start address of this region */ member
845 int list_len, u64 iova);
/linux-4.4.14/arch/parisc/include/asm/
Ddma-mapping.h11 void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
12 void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
13 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
15 …void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir…
18 …void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t …
19 …void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size…
Dropes.h49 dma_addr_t iova; member
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dmr.c214 mr->mr.iova = *iova_start; in hfi1_reg_phys_mr()
276 mr->mr.iova = virt_addr; in hfi1_reg_user_mr()
434 int list_len, u64 iova) in hfi1_map_phys_fmr() argument
453 fmr->mr.user_base = iova; in hfi1_map_phys_fmr()
454 fmr->mr.iova = iova; in hfi1_map_phys_fmr()
490 fmr->mr.iova = 0; in hfi1_unmap_fmr()
Dkeys.c313 off = vaddr - mr->iova; in hfi1_rkey_ok()
314 if (unlikely(vaddr < mr->iova || off + len > mr->length || in hfi1_rkey_ok()
Dverbs.h311 u64 iova; /* IB start address of this region */ member
1033 int list_len, u64 iova);
/linux-4.4.14/drivers/parisc/
Dsba_iommu.c307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) argument
311 #define SBA_IOVP(ioc,iova) (iova) argument
489 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
491 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
502 __func__, (uint) iova, size, in sba_free_range()
610 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in sba_mark_invalid() argument
612 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_invalid()
793 sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, in sba_unmap_single() argument
803 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); in sba_unmap_single()
806 offset = iova & ~IOVP_MASK; in sba_unmap_single()
[all …]
Dccio-dma.c284 #define CCIO_IOVP(iova) ((iova) & IOVP_MASK) argument
432 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) in ccio_free_range() argument
434 unsigned long iovp = CCIO_IOVP(iova); in ccio_free_range()
664 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in ccio_mark_invalid() argument
666 u32 iovp = (u32)CCIO_IOVP(iova); in ccio_mark_invalid()
694 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt); in ccio_mark_invalid()
799 ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, in ccio_unmap_single() argument
804 dma_addr_t offset = iova & ~IOVP_MASK; in ccio_unmap_single()
810 __func__, (long)iova, size); in ccio_unmap_single()
812 iova ^= offset; /* clear offset bits */ in ccio_unmap_single()
[all …]
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_mr.c197 mr->mr.iova = *iova_start; in qib_reg_phys_mr()
259 mr->mr.iova = virt_addr; in qib_reg_user_mr()
444 int list_len, u64 iova) in qib_map_phys_fmr() argument
463 fmr->mr.user_base = iova; in qib_map_phys_fmr()
464 fmr->mr.iova = iova; in qib_map_phys_fmr()
500 fmr->mr.iova = 0; in qib_unmap_fmr()
Dqib_keys.c292 off = vaddr - mr->iova; in qib_rkey_ok()
293 if (unlikely(vaddr < mr->iova || off + len > mr->length || in qib_rkey_ok()
371 mrg->user_base = mr->ibmr.iova; in qib_reg_mr()
372 mrg->iova = mr->ibmr.iova; in qib_reg_mr()
Dqib_verbs.h300 u64 iova; /* IB start address of this region */ member
1059 int list_len, u64 iova);
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/
Dmr.c420 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
423 mr->iova = iova; in mlx4_mr_alloc_reserved()
529 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, in mlx4_mr_alloc() argument
539 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, in mlx4_mr_alloc()
592 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
601 mpt_entry->start = cpu_to_be64(iova); in mlx4_mr_rereg_mem_write()
651 mpt_entry->start = cpu_to_be64(mr->iova); in mlx4_mr_enable()
973 int npages, u64 iova) in mlx4_check_fmr() argument
983 if (iova & page_mask) in mlx4_check_fmr()
1000 int npages, u64 iova, u32 *lkey, u32 *rkey) in mlx4_map_phys_fmr() argument
[all …]
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_mr.c430 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) in mthca_mr_alloc() argument
469 mpt_entry->start = cpu_to_be64(iova); in mthca_mr_alloc()
521 int list_len, u64 iova, u64 total_size, in mthca_mr_alloc_phys() argument
536 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, in mthca_mr_alloc_phys()
690 int list_len, u64 iova) in mthca_check_fmr() argument
700 if (iova & page_mask) in mthca_check_fmr()
718 int list_len, u64 iova) in mthca_tavor_map_phys_fmr() argument
726 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr()
746 mpt_entry.start = cpu_to_be64(iova); in mthca_tavor_map_phys_fmr()
759 int list_len, u64 iova) in mthca_arbel_map_phys_fmr() argument
[all …]
Dmthca_dev.h472 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
477 int list_len, u64 iova, u64 total_size,
484 int list_len, u64 iova);
487 int list_len, u64 iova);
/linux-4.4.14/drivers/gpu/drm/msm/mdp/mdp4/
Dmdp4_crtc.c390 uint32_t iova = mdp4_crtc->cursor.next_iova; in update_cursor() local
395 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova); in update_cursor()
401 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); in update_cursor()
435 uint32_t iova; in mdp4_crtc_cursor_set() local
452 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova); in mdp4_crtc_cursor_set()
456 iova = 0; in mdp4_crtc_cursor_set()
462 mdp4_crtc->cursor.next_iova = iova; in mdp4_crtc_cursor_set()
/linux-4.4.14/drivers/gpu/drm/msm/dsi/
Ddsi.h92 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
146 u32 iova, u32 len);
Ddsi_host.c839 u32 iova; in dsi_tx_buf_alloc() local
851 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); in dsi_tx_buf_alloc()
858 if (iova & 0x07) { in dsi_tx_buf_alloc()
977 u32 iova; in dsi_cmd_dma_tx() local
980 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova); in dsi_cmd_dma_tx()
991 msm_host->id, iova, len); in dsi_cmd_dma_tx()
1753 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) in msm_dsi_host_cmd_xfer_commit() argument
1757 dsi_write(msm_host, REG_DSI_DMA_BASE, iova); in msm_dsi_host_cmd_xfer_commit()
Ddsi_manager.c777 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len) in msm_dsi_manager_cmd_xfer_trigger() argument
787 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len); in msm_dsi_manager_cmd_xfer_trigger()
789 msm_dsi_host_cmd_xfer_commit(host, iova, len); in msm_dsi_manager_cmd_xfer_trigger()
/linux-4.4.14/include/uapi/linux/
Dvfio.h413 __u64 iova; /* IO virtual address */ member
432 __u64 iova; /* IO virtual address */ member
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dmr.c250 mmr->mmr.iova = virt_addr; in mlx4_ib_rereg_user_mr()
481 int npages, u64 iova) in mlx4_ib_map_phys_fmr() argument
486 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
Dmlx4_ib.h770 u64 iova);
Dqp.c2523 fseg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_seg()
/linux-4.4.14/drivers/infiniband/hw/usnic/
Dusnic_uiom.c70 unsigned long iova, int flags, in usnic_uiom_dma_fault() argument
75 domain, iova, flags); in usnic_uiom_dma_fault()
/linux-4.4.14/include/linux/mlx4/
Ddevice.h661 u64 iova; member
1072 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
1361 int npages, u64 iova, u32 *lkey, u32 *rkey);
1478 u64 iova, u64 size, int npages,
/linux-4.4.14/drivers/staging/rdma/ehca/
Dehca_iverbs.h114 u64 *page_list, int list_len, u64 iova);
Dehca_mrmw.c858 u64 iova) in ehca_map_phys_fmr() argument
877 if (iova % e_fmr->fmr_page_size) { in ehca_map_phys_fmr()
880 iova, e_fmr->fmr_page_size); in ehca_map_phys_fmr()
899 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size; in ehca_map_phys_fmr()
902 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, in ehca_map_phys_fmr()
917 "iova=%llx", ret, fmr, page_list, list_len, iova); in ehca_map_phys_fmr()
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Dmr.c86 mr->iova = be64_to_cpu(in->seg.start_addr); in mlx5_core_create_mkey()
/linux-4.4.14/include/rdma/
Dib_verbs.h1363 u64 iova; member
1771 u64 iova);
2942 u64 iova) in ib_map_phys_fmr() argument
2944 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr()
3043 mr->iova = 0; in ib_map_mr_sg_zbva()
/linux-4.4.14/net/sunrpc/xprtrdma/
Dfrwr_ops.c403 seg1->mr_base = mr->iova; in frwr_op_map()
Dsvc_rdma_recvfrom.c295 ctxt->sge[0].addr = frmr->mr->iova; in rdma_read_chunk_frmr()
/linux-4.4.14/drivers/gpu/drm/msm/adreno/
Dadreno_gpu.c141 OUT_RING(ring, submit->cmd[i].iova); in adreno_submit()
/linux-4.4.14/drivers/remoteproc/
Dremoteproc_core.c79 unsigned long iova, int flags, void *token) in rproc_iommu_fault() argument
83 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); in rproc_iommu_fault()
/linux-4.4.14/Documentation/
Dvfio.txt205 dma_map.iova = 0; /* 1MB starting at 0x0 from device view */
354 dma_map.iova = 0; /* 1MB starting at 0x0 from device view */
357 /* Check here is .iova/.size are within DMA window from spapr_iommu_info */
/linux-4.4.14/arch/parisc/kernel/
Dpci-dma.c582 void *vaddr, dma_addr_t iova) in pa11_dma_free_noncoherent() argument
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dodp.c233 start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT; in pagefault_single_data_segment()
Dmr.c833 mr->mmr.iova = virt_addr; in reg_umr()
Dqp.c2002 seg->start_addr = cpu_to_be64(mr->ibmr.iova); in set_reg_mkey_seg()
/linux-4.4.14/drivers/infiniband/ulp/iser/
Diser_memory.c517 reg->sge.addr = mr->iova; in iser_fast_reg_mr()
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_qp.c161 wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32); in build_memreg()
163 cpu_to_be32(mhp->ibmr.iova & 0xffffffff); in build_memreg()
/linux-4.4.14/include/linux/mlx5/
Ddriver.h337 u64 iova; member
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c2172 fbo = mr->ibmr.iova - mr->pages[0]; in ocrdma_build_reg()
2174 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova); in ocrdma_build_reg()
2175 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff); in ocrdma_build_reg()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dqp.c632 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32); in build_memreg()
633 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & in build_memreg()
/linux-4.4.14/drivers/infiniband/core/
Dverbs.c1538 mr->iova = sg_dma_address(&sgl[0]); in ib_sg_to_pages()
/linux-4.4.14/drivers/misc/mic/scif/
Dscif_rma.c1012 struct iova *iova_ptr; in scif_get_window_offset()
/linux-4.4.14/drivers/infiniband/hw/nes/
Dnes_verbs.c3417 mr->ibmr.iova); in nes_post_send()
3464 (unsigned long long) mr->ibmr.iova, in nes_post_send()
/linux-4.4.14/drivers/infiniband/ulp/isert/
Dib_isert.c2582 sge->addr = mr->iova; in isert_fast_reg_mr()
/linux-4.4.14/drivers/infiniband/ulp/srp/
Dib_srp.c1366 srp_map_desc(state, desc->mr->iova, in srp_map_finish_fr()