Home
last modified time | relevance | path

Searched refs:iova (Results 1 – 69 of 69) sorted by relevance

/linux-4.1.27/drivers/iommu/
Diova.c30 sizeof(struct iova), in iommu_iova_cache_init()
47 struct iova *alloc_iova_mem(void) in alloc_iova_mem()
52 void free_iova_mem(struct iova *iova) in free_iova_mem() argument
54 kmem_cache_free(iommu_iova_cache, iova); in free_iova_mem()
84 struct iova *curr_iova = in __get_cached_rbnode()
85 container_of(iovad->cached32_node, struct iova, node); in __get_cached_rbnode()
93 unsigned long limit_pfn, struct iova *new) in __cached_rbnode_insert_update()
101 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
103 struct iova *cached_iova; in __cached_rbnode_delete_update()
109 cached_iova = container_of(curr, struct iova, node); in __cached_rbnode_delete_update()
[all …]
Drockchip-iommu.c259 static u32 rk_iova_dte_index(dma_addr_t iova) in rk_iova_dte_index() argument
261 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; in rk_iova_dte_index()
264 static u32 rk_iova_pte_index(dma_addr_t iova) in rk_iova_pte_index() argument
266 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; in rk_iova_pte_index()
269 static u32 rk_iova_page_offset(dma_addr_t iova) in rk_iova_page_offset() argument
271 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; in rk_iova_page_offset()
289 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova, in rk_iommu_zap_lines() argument
292 dma_addr_t iova_end = iova + size; in rk_iommu_zap_lines()
297 for (; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines()
298 rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
[all …]
Dexynos-iommu.c60 static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size) in sysmmu_page_offset() argument
62 return iova & (size - 1); in sysmmu_page_offset()
66 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE) argument
68 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE) argument
70 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE) argument
75 static u32 lv1ent_offset(sysmmu_iova_t iova) in lv1ent_offset() argument
77 return iova >> SECT_ORDER; in lv1ent_offset()
80 static u32 lv2ent_offset(sysmmu_iova_t iova) in lv2ent_offset() argument
82 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
140 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) in section_entry() argument
[all …]
Dtegra-gart.c88 #define for_each_gart_pte(gart, iova) \ argument
89 for (iova = gart->iovmm_base; \
90 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \
91 iova += GART_PAGE_SIZE)
116 unsigned long iova; in do_gart_setup() local
118 for_each_gart_pte(gart, iova) in do_gart_setup()
119 gart_set_pte(gart, iova, data ? *(data++) : 0); in do_gart_setup()
128 unsigned long iova; in gart_dump_table() local
132 for_each_gart_pte(gart, iova) { in gart_dump_table()
135 pte = gart_read_pte(gart, iova); in gart_dump_table()
[all …]
Dio-pgtable-arm.c204 unsigned long iova, size_t size, int lvl,
208 unsigned long iova, phys_addr_t paddr, in arm_lpae_init_pte() argument
226 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte()
227 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) in arm_lpae_init_pte()
247 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, in __arm_lpae_map() argument
256 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map()
260 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map()
286 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_lpae_map()
322 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_lpae_map() argument
335 return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); in arm_lpae_map()
[all …]
Dshmobile-iommu.c220 static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova, in shmobile_iommu_map() argument
228 l1index = iova >> 20; in shmobile_iommu_map()
231 l2index = (iova >> 12) & 0xff; in shmobile_iommu_map()
240 l2index = (iova >> 12) & 0xf0; in shmobile_iommu_map()
265 unsigned long iova, size_t size) in shmobile_iommu_unmap() argument
273 l1index = iova >> 20; in shmobile_iommu_unmap()
274 if (!(iova & 0xfffff) && size >= SZ_1M) { in shmobile_iommu_unmap()
281 l2index = (iova >> 12) & 0xff; in shmobile_iommu_unmap()
306 dma_addr_t iova) in shmobile_iommu_iova_to_phys() argument
312 l1index = iova >> 20; in shmobile_iommu_iova_to_phys()
[all …]
Dtegra-smmu.c179 unsigned long iova) in smmu_flush_tlb_section() argument
184 SMMU_TLB_FLUSH_VA_SECTION(iova); in smmu_flush_tlb_section()
190 unsigned long iova) in smmu_flush_tlb_group() argument
195 SMMU_TLB_FLUSH_VA_GROUP(iova); in smmu_flush_tlb_group()
467 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, in as_get_pte() argument
471 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; in as_get_pte()
472 u32 pte = (iova >> SMMU_PTE_SHIFT) & 0x3ff; in as_get_pte()
494 smmu_flush_tlb_section(smmu, as->id, iova); in as_get_pte()
511 static void as_put_pte(struct tegra_smmu_as *as, dma_addr_t iova) in as_put_pte() argument
513 u32 pde = (iova >> SMMU_PDE_SHIFT) & 0x3ff; in as_put_pte()
[all …]
Dio-pgtable.h28 void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
79 int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
81 int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
84 unsigned long iova);
Diommu.c987 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) in iommu_iova_to_phys() argument
992 return domain->ops->iova_to_phys(domain, iova); in iommu_iova_to_phys()
1028 int iommu_map(struct iommu_domain *domain, unsigned long iova, in iommu_map() argument
1031 unsigned long orig_iova = iova; in iommu_map()
1051 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { in iommu_map()
1053 iova, &paddr, size, min_pagesz); in iommu_map()
1057 pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size); in iommu_map()
1060 size_t pgsize = iommu_pgsize(domain, iova | paddr, size); in iommu_map()
1063 iova, &paddr, pgsize); in iommu_map()
1065 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); in iommu_map()
[all …]
Dintel-iommu.c387 struct iova *iova[HIGH_WATER_MARK]; member
1649 struct iova *iova; in dmar_init_reserved_ranges() local
1659 iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), in dmar_init_reserved_ranges()
1661 if (!iova) { in dmar_init_reserved_ranges()
1674 iova = reserve_iova(&reserved_iova_list, in dmar_init_reserved_ranges()
1677 if (!iova) { in dmar_init_reserved_ranges()
2932 static struct iova *intel_alloc_iova(struct device *dev, in intel_alloc_iova()
2936 struct iova *iova = NULL; in intel_alloc_iova() local
2947 iova = alloc_iova(&domain->iovad, nrpages, in intel_alloc_iova()
2949 if (iova) in intel_alloc_iova()
[all …]
Dipmmu-vmsa.c280 static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, in ipmmu_tlb_add_flush() argument
403 u32 iova; in ipmmu_domain_irq() local
409 iova = ipmmu_ctx_read(domain, IMEAR); in ipmmu_domain_irq()
422 iova); in ipmmu_domain_irq()
425 iova); in ipmmu_domain_irq()
436 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
441 status, iova); in ipmmu_domain_irq()
551 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, in ipmmu_map() argument
559 return domain->iop->map(domain->iop, iova, paddr, size, prot); in ipmmu_map()
562 static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, in ipmmu_unmap() argument
[all …]
Darm-smmu.c574 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, in arm_smmu_tlb_inv_range_nosync() argument
588 iova &= ~12UL; in arm_smmu_tlb_inv_range_nosync()
589 iova |= ARM_SMMU_CB_ASID(cfg); in arm_smmu_tlb_inv_range_nosync()
590 writel_relaxed(iova, reg); in arm_smmu_tlb_inv_range_nosync()
593 iova >>= 12; in arm_smmu_tlb_inv_range_nosync()
594 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; in arm_smmu_tlb_inv_range_nosync()
595 writeq_relaxed(iova, reg); in arm_smmu_tlb_inv_range_nosync()
603 writeq_relaxed(iova >> 12, reg); in arm_smmu_tlb_inv_range_nosync()
645 unsigned long iova; in arm_smmu_context_fault() local
667 iova = far; in arm_smmu_context_fault()
[all …]
Dfsl_pamu_domain.c67 static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova) in get_phys_addr() argument
86 subwin_iova = iova & ~(subwin_size - 1); in get_phys_addr()
92 return win_ptr->paddr + (iova & (win_ptr->size - 1)); in get_phys_addr()
285 static int check_size(u64 size, dma_addr_t iova) in check_size() argument
297 if (iova & (size - 1)) { in check_size()
390 dma_addr_t iova) in fsl_pamu_iova_to_phys() argument
394 if (iova < domain->geometry.aperture_start || in fsl_pamu_iova_to_phys()
395 iova > domain->geometry.aperture_end) in fsl_pamu_iova_to_phys()
398 return get_phys_addr(dma_domain, iova); in fsl_pamu_iova_to_phys()
DMakefile6 obj-$(CONFIG_IOMMU_IOVA) += iova.o
Damd_iommu.c3347 static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, in amd_iommu_map() argument
3363 ret = iommu_map_page(domain, iova, paddr, prot, page_size); in amd_iommu_map()
3369 static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, in amd_iommu_unmap() argument
3379 unmap_size = iommu_unmap_page(domain, iova, page_size); in amd_iommu_unmap()
3388 dma_addr_t iova) in amd_iommu_iova_to_phys() argument
3395 return iova; in amd_iommu_iova_to_phys()
3397 pte = fetch_pte(domain, iova, &pte_pgsize); in amd_iommu_iova_to_phys()
3405 return (__pte & ~offset_mask) | (iova & offset_mask); in amd_iommu_iova_to_phys()
DKconfig268 allocated from the IPMMU (iova) for DMA with this option
/linux-4.1.27/include/linux/
Diova.h20 struct iova { struct
36 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument
38 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size()
51 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) in iova_offset() argument
53 return iova & iova_mask(iovad); in iova_offset()
61 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument
63 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); in iova_dma_addr()
66 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) in iova_pfn() argument
68 return iova >> iova_shift(iovad); in iova_pfn()
74 struct iova *alloc_iova_mem(void);
[all …]
Diommu.h147 int (*map)(struct iommu_domain *domain, unsigned long iova,
149 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
151 size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
153 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
196 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
198 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
200 extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
203 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
272 struct device *dev, unsigned long iova, int flags) in report_iommu_fault() argument
281 ret = domain->handler(domain, dev, iova, flags, in report_iommu_fault()
[all …]
/linux-4.1.27/include/trace/events/
Diommu.h88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
90 TP_ARGS(iova, paddr, size),
93 __field(u64, iova)
99 __entry->iova = iova;
105 __entry->iova, __entry->paddr, __entry->size
111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
113 TP_ARGS(iova, size, unmapped_size),
116 __field(u64, iova)
122 __entry->iova = iova;
128 __entry->iova, __entry->size, __entry->unmapped_size
[all …]
/linux-4.1.27/drivers/vfio/
Dvfio_iommu_type1.c74 dma_addr_t iova; /* Device address */ member
98 if (start + size <= dma->iova) in vfio_find_dma()
100 else if (start >= dma->iova + dma->size) in vfio_find_dma()
118 if (new->iova + new->size <= dma->iova) in vfio_link_dma()
338 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; in vfio_unmap_unpin() local
355 iommu_unmap(d->domain, dma->iova, dma->size); in vfio_unmap_unpin()
359 while (iova < end) { in vfio_unmap_unpin()
363 phys = iommu_iova_to_phys(domain->domain, iova); in vfio_unmap_unpin()
365 iova += PAGE_SIZE; in vfio_unmap_unpin()
375 !domain->fgsp && iova + len < end; len += PAGE_SIZE) { in vfio_unmap_unpin()
[all …]
Dvfio_iommu_spapr_tce.c228 ret = iommu_tce_put_param_check(tbl, param.iova, tce); in tce_iommu_ioctl()
234 (param.iova >> IOMMU_PAGE_SHIFT_4K) + i, in tce_iommu_ioctl()
242 param.iova >> IOMMU_PAGE_SHIFT_4K, i); in tce_iommu_ioctl()
271 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, in tce_iommu_ioctl()
277 param.iova >> IOMMU_PAGE_SHIFT_4K, in tce_iommu_ioctl()
/linux-4.1.27/drivers/gpu/drm/msm/
Dmsm_gem_submit.c87 submit->bos[i].iova = submit_bo.presumed; in submit_lookup_objects()
133 submit->bos[i].iova = 0; in submit_unlock_unpin_bo()
148 uint32_t iova; in submit_validate_objects() local
166 submit->gpu->id, &iova); in submit_validate_objects()
179 if (iova == submit->bos[i].iova) { in submit_validate_objects()
182 submit->bos[i].iova = iova; in submit_validate_objects()
215 struct msm_gem_object **obj, uint32_t *iova, bool *valid) in submit_bo() argument
225 if (iova) in submit_bo()
226 *iova = submit->bos[idx].iova; in submit_bo()
261 uint32_t iova, off; in submit_reloc() local
[all …]
Dmsm_iommu.c28 unsigned long iova, int flags, void *arg) in msm_fault_handler() argument
30 pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); in msm_fault_handler()
46 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_map() argument
52 unsigned int da = iova; in msm_iommu_map()
63 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); in msm_iommu_map()
75 da = iova; in msm_iommu_map()
85 static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova, in msm_iommu_unmap() argument
91 unsigned int da = iova; in msm_iommu_unmap()
102 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); in msm_iommu_unmap()
Dmsm_gem.c284 uint32_t *iova) in msm_gem_get_iova_locked() argument
289 if (!msm_obj->domain[id].iova) { in msm_gem_get_iova_locked()
306 msm_obj->domain[id].iova = offset; in msm_gem_get_iova_locked()
308 msm_obj->domain[id].iova = physaddr(obj); in msm_gem_get_iova_locked()
313 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova_locked()
319 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) in msm_gem_get_iova() argument
327 if (msm_obj->domain[id].iova) { in msm_gem_get_iova()
328 *iova = msm_obj->domain[id].iova; in msm_gem_get_iova()
333 ret = msm_gem_get_iova_locked(obj, id, iova); in msm_gem_get_iova()
344 WARN_ON(!msm_obj->domain[id].iova); in msm_gem_iova()
[all …]
Dmsm_gem.h57 uint32_t iova; member
108 uint32_t iova; member
114 uint32_t iova; member
Dmsm_rd.c310 uint32_t iova = submit->cmd[i].iova; in msm_rd_dump_submit() local
315 buf += iova - submit->bos[idx].iova; in msm_rd_dump_submit()
318 (uint32_t[2]){ iova, szd * 4 }, 8); in msm_rd_dump_submit()
332 (uint32_t[2]){ iova, szd }, 8); in msm_rd_dump_submit()
Dmsm_mmu.h26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
Dmsm_fb.c99 uint32_t iova; in msm_framebuffer_prepare() local
102 ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova); in msm_framebuffer_prepare()
103 DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret); in msm_framebuffer_prepare()
Dmsm_drv.h182 uint32_t *iova);
183 int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
Dmsm_gpu.c494 uint32_t iova; in msm_gpu_submit() local
499 submit->gpu->id, &iova); in msm_gpu_submit()
/linux-4.1.27/arch/arm/mm/
Ddma-mapping.c1030 dma_addr_t iova; in __alloc_iova() local
1074 iova = mapping->base + (mapping_size * i); in __alloc_iova()
1075 iova += start << PAGE_SHIFT; in __alloc_iova()
1077 return iova; in __alloc_iova()
1241 dma_addr_t dma_addr, iova; in __iommu_create_mapping() local
1248 iova = dma_addr; in __iommu_create_mapping()
1259 ret = iommu_map(mapping->domain, iova, phys, len, in __iommu_create_mapping()
1263 iova += len; in __iommu_create_mapping()
1268 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); in __iommu_create_mapping()
1273 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
[all …]
/linux-4.1.27/arch/ia64/hp/common/
Dsba_iommu.c223 dma_addr_t iova; member
436 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) argument
669 sba_mark_invalid(ioc, d->iova, d->size); in sba_alloc_range()
670 sba_free_range(ioc, d->iova, d->size); in sba_alloc_range()
729 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
731 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
755 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size, in sba_free_range()
847 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in sba_mark_invalid() argument
849 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_invalid()
1016 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_mark_clean() argument
[all …]
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_mr.c148 mr->mr.iova = *iova_start; in ipath_reg_phys_mr()
215 mr->mr.iova = virt_addr; in ipath_reg_user_mr()
317 fmr->mr.iova = 0; in ipath_alloc_fmr()
348 int list_len, u64 iova) in ipath_map_phys_fmr() argument
363 fmr->mr.user_base = iova; in ipath_map_phys_fmr()
364 fmr->mr.iova = iova; in ipath_map_phys_fmr()
401 fmr->mr.iova = 0; in ipath_unmap_fmr()
Dipath_keys.c239 off = vaddr - mr->iova; in ipath_rkey_ok()
240 if (unlikely(vaddr < mr->iova || off + len > mr->length || in ipath_rkey_ok()
Dipath_verbs.h244 u64 iova; /* IB start address of this region */ member
836 int list_len, u64 iova);
/linux-4.1.27/arch/parisc/include/asm/
Ddma-mapping.h11 void *(*alloc_consistent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
12 void *(*alloc_noncoherent)(struct device *dev, size_t size, dma_addr_t *iova, gfp_t flag);
13 void (*free_consistent)(struct device *dev, size_t size, void *vaddr, dma_addr_t iova);
15 …void (*unmap_single)(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir…
18 …void (*dma_sync_single_for_cpu)(struct device *dev, dma_addr_t iova, unsigned long offset, size_t …
19 …void (*dma_sync_single_for_device)(struct device *dev, dma_addr_t iova, unsigned long offset, size…
Dropes.h49 dma_addr_t iova; member
/linux-4.1.27/drivers/infiniband/hw/qib/
Dqib_mr.c197 mr->mr.iova = *iova_start; in qib_reg_phys_mr()
259 mr->mr.iova = virt_addr; in qib_reg_user_mr()
437 int list_len, u64 iova) in qib_map_phys_fmr() argument
456 fmr->mr.user_base = iova; in qib_map_phys_fmr()
457 fmr->mr.iova = iova; in qib_map_phys_fmr()
493 fmr->mr.iova = 0; in qib_unmap_fmr()
Dqib_keys.c292 off = vaddr - mr->iova; in qib_rkey_ok()
293 if (unlikely(vaddr < mr->iova || off + len > mr->length || in qib_rkey_ok()
371 mr->iova = wr->wr.fast_reg.iova_start; in qib_fast_reg_mr()
Dqib_verbs.h300 u64 iova; /* IB start address of this region */ member
1047 int list_len, u64 iova);
/linux-4.1.27/drivers/parisc/
Dsba_iommu.c307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask) argument
311 #define SBA_IOVP(ioc,iova) (iova) argument
489 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) in sba_free_range() argument
491 unsigned long iovp = SBA_IOVP(ioc, iova); in sba_free_range()
502 __func__, (uint) iova, size, in sba_free_range()
610 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in sba_mark_invalid() argument
612 u32 iovp = (u32) SBA_IOVP(ioc,iova); in sba_mark_invalid()
793 sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, in sba_unmap_single() argument
803 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); in sba_unmap_single()
806 offset = iova & ~IOVP_MASK; in sba_unmap_single()
[all …]
Dccio-dma.c284 #define CCIO_IOVP(iova) ((iova) & IOVP_MASK) argument
432 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) in ccio_free_range() argument
434 unsigned long iovp = CCIO_IOVP(iova); in ccio_free_range()
664 ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) in ccio_mark_invalid() argument
666 u32 iovp = (u32)CCIO_IOVP(iova); in ccio_mark_invalid()
694 ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt); in ccio_mark_invalid()
801 ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, in ccio_unmap_single() argument
806 dma_addr_t offset = iova & ~IOVP_MASK; in ccio_unmap_single()
812 __func__, (long)iova, size); in ccio_unmap_single()
814 iova ^= offset; /* clear offset bits */ in ccio_unmap_single()
[all …]
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx4/
Dmr.c420 u64 iova, u64 size, u32 access, int npages, in mlx4_mr_alloc_reserved() argument
423 mr->iova = iova; in mlx4_mr_alloc_reserved()
529 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access, in mlx4_mr_alloc() argument
539 err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size, in mlx4_mr_alloc()
592 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument
601 mpt_entry->start = cpu_to_be64(iova); in mlx4_mr_rereg_mem_write()
651 mpt_entry->start = cpu_to_be64(mr->iova); in mlx4_mr_enable()
973 int npages, u64 iova) in mlx4_check_fmr() argument
983 if (iova & page_mask) in mlx4_check_fmr()
1000 int npages, u64 iova, u32 *lkey, u32 *rkey) in mlx4_map_phys_fmr() argument
[all …]
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_mr.c430 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr) in mthca_mr_alloc() argument
469 mpt_entry->start = cpu_to_be64(iova); in mthca_mr_alloc()
521 int list_len, u64 iova, u64 total_size, in mthca_mr_alloc_phys() argument
536 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, in mthca_mr_alloc_phys()
690 int list_len, u64 iova) in mthca_check_fmr() argument
700 if (iova & page_mask) in mthca_check_fmr()
718 int list_len, u64 iova) in mthca_tavor_map_phys_fmr() argument
726 err = mthca_check_fmr(fmr, page_list, list_len, iova); in mthca_tavor_map_phys_fmr()
746 mpt_entry.start = cpu_to_be64(iova); in mthca_tavor_map_phys_fmr()
759 int list_len, u64 iova) in mthca_arbel_map_phys_fmr() argument
[all …]
Dmthca_dev.h472 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
477 int list_len, u64 iova, u64 total_size,
484 int list_len, u64 iova);
487 int list_len, u64 iova);
/linux-4.1.27/drivers/gpu/drm/msm/mdp/mdp4/
Dmdp4_crtc.c381 uint32_t iova = mdp4_crtc->cursor.next_iova; in update_cursor() local
386 msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova); in update_cursor()
392 mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); in update_cursor()
426 uint32_t iova; in mdp4_crtc_cursor_set() local
443 ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova); in mdp4_crtc_cursor_set()
447 iova = 0; in mdp4_crtc_cursor_set()
453 mdp4_crtc->cursor.next_iova = iova; in mdp4_crtc_cursor_set()
/linux-4.1.27/drivers/gpu/drm/msm/dsi/
Ddsi.h69 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
86 u32 iova, u32 len);
Ddsi_host.c924 u32 iova; in dsi_tx_buf_alloc() local
936 ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova); in dsi_tx_buf_alloc()
943 if (iova & 0x07) { in dsi_tx_buf_alloc()
1062 u32 iova; in dsi_cmd_dma_tx() local
1065 ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova); in dsi_cmd_dma_tx()
1076 msm_host->id, iova, len); in dsi_cmd_dma_tx()
1815 void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len) in msm_dsi_host_cmd_xfer_commit() argument
1819 dsi_write(msm_host, REG_DSI_DMA_BASE, iova); in msm_dsi_host_cmd_xfer_commit()
Ddsi_manager.c634 bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len) in msm_dsi_manager_cmd_xfer_trigger() argument
644 msm_dsi_host_cmd_xfer_commit(msm_dsi0->host, iova, len); in msm_dsi_manager_cmd_xfer_trigger()
646 msm_dsi_host_cmd_xfer_commit(host, iova, len); in msm_dsi_manager_cmd_xfer_trigger()
/linux-4.1.27/include/uapi/linux/
Dvfio.h411 __u64 iova; /* IO virtual address */ member
430 __u64 iova; /* IO virtual address */ member
/linux-4.1.27/drivers/infiniband/hw/mlx4/
Dmr.c250 mmr->mmr.iova = virt_addr; in mlx4_ib_rereg_user_mr()
469 int npages, u64 iova) in mlx4_ib_map_phys_fmr() argument
474 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova, in mlx4_ib_map_phys_fmr()
Dmlx4_ib.h720 u64 iova);
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dehca_iverbs.h110 u64 *page_list, int list_len, u64 iova);
Dehca_mrmw.c858 u64 iova) in ehca_map_phys_fmr() argument
877 if (iova % e_fmr->fmr_page_size) { in ehca_map_phys_fmr()
880 iova, e_fmr->fmr_page_size); in ehca_map_phys_fmr()
899 (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size; in ehca_map_phys_fmr()
902 ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova, in ehca_map_phys_fmr()
917 "iova=%llx", ret, fmr, page_list, list_len, iova); in ehca_map_phys_fmr()
/linux-4.1.27/drivers/infiniband/hw/usnic/
Dusnic_uiom.c70 unsigned long iova, int flags, in usnic_uiom_dma_fault() argument
75 domain, iova, flags); in usnic_uiom_dma_fault()
/linux-4.1.27/include/linux/mlx4/
Ddevice.h644 u64 iova; member
1039 int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
1326 int npages, u64 iova, u32 *lkey, u32 *rkey);
1439 u64 iova, u64 size, int npages,
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
Dmr.c85 mr->iova = be64_to_cpu(in->seg.start_addr); in mlx5_core_create_mkey()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
Do2iblnd.c1802 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr) in kiblnd_pmr_pool_map() argument
1830 iova); in kiblnd_pmr_pool_map()
1832 pmr->pmr_iova = *iova; in kiblnd_pmr_pool_map()
2374 __u64 iova; in kiblnd_hdev_setup_mrs() local
2378 iova = ipb.addr; in kiblnd_hdev_setup_mrs()
2380 mr = ib_reg_phys_mr(hdev->ibh_pd, &ipb, 1, acflags, &iova); in kiblnd_hdev_setup_mrs()
2388 LASSERT(iova == ipb.addr); in kiblnd_hdev_setup_mrs()
Do2iblnd_cb.c600 __u64 iova; in kiblnd_pmr_map_tx() local
609 iova = rd->rd_frags[0].rf_addr & ~hdev->ibh_page_mask; in kiblnd_pmr_map_tx()
614 rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr); in kiblnd_pmr_map_tx()
625 rd->rd_frags[0].rf_addr = iova; in kiblnd_pmr_map_tx()
Do2iblnd.h965 kib_rdma_desc_t *rd, __u64 *iova, kib_phys_mr_t **pp_pmr);
/linux-4.1.27/drivers/remoteproc/
Dremoteproc_core.c76 unsigned long iova, int flags, void *token) in rproc_iommu_fault() argument
80 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); in rproc_iommu_fault()
/linux-4.1.27/drivers/gpu/drm/msm/adreno/
Dadreno_gpu.c141 OUT_RING(ring, submit->cmd[i].iova); in adreno_submit()
/linux-4.1.27/include/rdma/
Dib_verbs.h1628 u64 iova);
2588 u64 iova) in ib_map_phys_fmr() argument
2590 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); in ib_map_phys_fmr()
/linux-4.1.27/Documentation/
Dvfio.txt205 dma_map.iova = 0; /* 1MB starting at 0x0 from device view */
352 dma_map.iova = 0; /* 1MB starting at 0x0 from device view */
355 /* Check here is .iova/.size are within DMA window from spapr_iommu_info */
/linux-4.1.27/arch/parisc/kernel/
Dpci-dma.c577 void *vaddr, dma_addr_t iova) in pa11_dma_free_noncoherent() argument
/linux-4.1.27/drivers/infiniband/hw/mlx5/
Dmlx5_ib.h585 int npages, u64 iova);
Dodp.c240 start_idx = (io_virt - (mr->mmr.iova & PAGE_MASK)) >> PAGE_SHIFT; in pagefault_single_data_segment()
Dmr.c824 mr->mmr.iova = virt_addr; in reg_umr()
/linux-4.1.27/include/linux/mlx5/
Ddriver.h382 u64 iova; member