iova             1148 arch/arm/mm/dma-mapping.c 	dma_addr_t iova;
iova             1192 arch/arm/mm/dma-mapping.c 	iova = mapping->base + (mapping_size * i);
iova             1193 arch/arm/mm/dma-mapping.c 	iova += start << PAGE_SHIFT;
iova             1195 arch/arm/mm/dma-mapping.c 	return iova;
iova             1354 arch/arm/mm/dma-mapping.c 	dma_addr_t dma_addr, iova;
iova             1361 arch/arm/mm/dma-mapping.c 	iova = dma_addr;
iova             1374 arch/arm/mm/dma-mapping.c 		ret = iommu_map(mapping->domain, iova, phys, len,
iova             1378 arch/arm/mm/dma-mapping.c 		iova += len;
iova             1383 arch/arm/mm/dma-mapping.c 	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
iova             1388 arch/arm/mm/dma-mapping.c static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
iova             1396 arch/arm/mm/dma-mapping.c 	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
iova             1397 arch/arm/mm/dma-mapping.c 	iova &= PAGE_MASK;
iova             1399 arch/arm/mm/dma-mapping.c 	iommu_unmap(mapping->domain, iova, size);
iova             1400 arch/arm/mm/dma-mapping.c 	__free_iova(mapping, iova, size);
iova             1621 arch/arm/mm/dma-mapping.c 	dma_addr_t iova, iova_base;
iova             1630 arch/arm/mm/dma-mapping.c 	iova_base = iova = __alloc_iova(mapping, size);
iova             1631 arch/arm/mm/dma-mapping.c 	if (iova == DMA_MAPPING_ERROR)
iova             1643 arch/arm/mm/dma-mapping.c 		ret = iommu_map(mapping->domain, iova, phys, len, prot);
iova             1647 arch/arm/mm/dma-mapping.c 		iova += len;
iova             1894 arch/arm/mm/dma-mapping.c 	dma_addr_t iova = handle & PAGE_MASK;
iova             1898 arch/arm/mm/dma-mapping.c 	if (!iova)
iova             1901 arch/arm/mm/dma-mapping.c 	iommu_unmap(mapping->domain, iova, len);
iova             1902 arch/arm/mm/dma-mapping.c 	__free_iova(mapping, iova, len);
iova             1918 arch/arm/mm/dma-mapping.c 	dma_addr_t iova = handle & PAGE_MASK;
iova             1919 arch/arm/mm/dma-mapping.c 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
iova             1923 arch/arm/mm/dma-mapping.c 	if (!iova)
iova             1929 arch/arm/mm/dma-mapping.c 	iommu_unmap(mapping->domain, iova, len);
iova             1930 arch/arm/mm/dma-mapping.c 	__free_iova(mapping, iova, len);
iova             1979 arch/arm/mm/dma-mapping.c 	dma_addr_t iova = dma_handle & PAGE_MASK;
iova             1983 arch/arm/mm/dma-mapping.c 	if (!iova)
iova             1986 arch/arm/mm/dma-mapping.c 	iommu_unmap(mapping->domain, iova, len);
iova             1987 arch/arm/mm/dma-mapping.c 	__free_iova(mapping, iova, len);
iova             1994 arch/arm/mm/dma-mapping.c 	dma_addr_t iova = handle & PAGE_MASK;
iova             1995 arch/arm/mm/dma-mapping.c 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
iova             1998 arch/arm/mm/dma-mapping.c 	if (!iova)
iova             2008 arch/arm/mm/dma-mapping.c 	dma_addr_t iova = handle & PAGE_MASK;
iova             2009 arch/arm/mm/dma-mapping.c 	struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
iova             2012 arch/arm/mm/dma-mapping.c 	if (!iova)
iova              219 arch/ia64/hp/common/sba_iommu.c 		dma_addr_t	iova;
iova              428 arch/ia64/hp/common/sba_iommu.c #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
iova              661 arch/ia64/hp/common/sba_iommu.c 					sba_mark_invalid(ioc, d->iova, d->size);
iova              662 arch/ia64/hp/common/sba_iommu.c 					sba_free_range(ioc, d->iova, d->size);
iova              721 arch/ia64/hp/common/sba_iommu.c sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
iova              723 arch/ia64/hp/common/sba_iommu.c 	unsigned long iovp = SBA_IOVP(ioc, iova);
iova              747 arch/ia64/hp/common/sba_iommu.c 			DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
iova              839 arch/ia64/hp/common/sba_iommu.c sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iova              841 arch/ia64/hp/common/sba_iommu.c 	u32 iovp = (u32) SBA_IOVP(ioc,iova);
iova             1001 arch/ia64/hp/common/sba_iommu.c sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
iova             1003 arch/ia64/hp/common/sba_iommu.c 	u32	iovp = (u32) SBA_IOVP(ioc,iova);
iova             1033 arch/ia64/hp/common/sba_iommu.c static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
iova             1047 arch/ia64/hp/common/sba_iommu.c 	if (likely((iova & ioc->imask) != ioc->ibase)) {
iova             1052 arch/ia64/hp/common/sba_iommu.c 			   iova);
iova             1056 arch/ia64/hp/common/sba_iommu.c 			mark_clean(phys_to_virt(iova), size);
iova             1062 arch/ia64/hp/common/sba_iommu.c 	offset = iova & ~iovp_mask;
iova             1064 arch/ia64/hp/common/sba_iommu.c 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
iova             1066 arch/ia64/hp/common/sba_iommu.c 	iova ^= offset;        /* clear offset bits */
iova             1072 arch/ia64/hp/common/sba_iommu.c 		sba_mark_clean(ioc, iova, size);
iova             1078 arch/ia64/hp/common/sba_iommu.c 	d->iova = iova;
iova             1084 arch/ia64/hp/common/sba_iommu.c 			sba_mark_invalid(ioc, d->iova, d->size);
iova             1085 arch/ia64/hp/common/sba_iommu.c 			sba_free_range(ioc, d->iova, d->size);
iova             1095 arch/ia64/hp/common/sba_iommu.c 	sba_mark_invalid(ioc, iova, size);
iova             1096 arch/ia64/hp/common/sba_iommu.c 	sba_free_range(ioc, iova, size);
iova               50 arch/parisc/include/asm/ropes.h 			dma_addr_t	iova;
iova              288 arch/s390/pci/pci.c 	void __iomem *iova;
iova              290 arch/s390/pci/pci.c 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
iova              291 arch/s390/pci/pci.c 	return iova ? iova + offset : iova;
iova              318 arch/s390/pci/pci.c 	void __iomem *iova;
iova              320 arch/s390/pci/pci.c 	iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
iova              321 arch/s390/pci/pci.c 	return iova ? iova + offset : iova;
iova              134 drivers/fpga/dfl-afu-dma-region.c 				  u64 iova, u64 size)
iova              136 drivers/fpga/dfl-afu-dma-region.c 	if (!size && region->iova != iova)
iova              139 drivers/fpga/dfl-afu-dma-region.c 	return (region->iova <= iova) &&
iova              140 drivers/fpga/dfl-afu-dma-region.c 		(region->length + region->iova >= iova + size);
iova              159 drivers/fpga/dfl-afu-dma-region.c 		(unsigned long long)region->iova);
iova              170 drivers/fpga/dfl-afu-dma-region.c 		if (dma_region_check_iova(this, region->iova, region->length))
iova              173 drivers/fpga/dfl-afu-dma-region.c 		if (region->iova < this->iova)
iova              175 drivers/fpga/dfl-afu-dma-region.c 		else if (region->iova > this->iova)
iova              200 drivers/fpga/dfl-afu-dma-region.c 		(unsigned long long)region->iova);
iova              222 drivers/fpga/dfl-afu-dma-region.c 			(unsigned long long)region->iova);
iova              226 drivers/fpga/dfl-afu-dma-region.c 		if (region->iova)
iova              228 drivers/fpga/dfl-afu-dma-region.c 				       region->iova, region->length,
iova              254 drivers/fpga/dfl-afu-dma-region.c afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size)
iova              265 drivers/fpga/dfl-afu-dma-region.c 		if (dma_region_check_iova(region, iova, size)) {
iova              267 drivers/fpga/dfl-afu-dma-region.c 				(unsigned long long)region->iova);
iova              271 drivers/fpga/dfl-afu-dma-region.c 		if (iova < region->iova)
iova              273 drivers/fpga/dfl-afu-dma-region.c 		else if (iova > region->iova)
iova              281 drivers/fpga/dfl-afu-dma-region.c 		(unsigned long long)iova, (unsigned long long)size);
iova              294 drivers/fpga/dfl-afu-dma-region.c afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova)
iova              296 drivers/fpga/dfl-afu-dma-region.c 	return afu_dma_region_find(pdata, iova, 0);
iova              311 drivers/fpga/dfl-afu-dma-region.c 		       u64 user_addr, u64 length, u64 *iova)
iova              353 drivers/fpga/dfl-afu-dma-region.c 	region->iova = dma_map_page(dfl_fpga_pdata_to_parent(pdata),
iova              357 drivers/fpga/dfl-afu-dma-region.c 	if (dma_mapping_error(dfl_fpga_pdata_to_parent(pdata), region->iova)) {
iova              363 drivers/fpga/dfl-afu-dma-region.c 	*iova = region->iova;
iova              377 drivers/fpga/dfl-afu-dma-region.c 		       region->iova, region->length, DMA_BIDIRECTIONAL);
iova              393 drivers/fpga/dfl-afu-dma-region.c int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova)
iova              398 drivers/fpga/dfl-afu-dma-region.c 	region = afu_dma_region_find_iova(pdata, iova);
iova              413 drivers/fpga/dfl-afu-dma-region.c 		       region->iova, region->length, DMA_BIDIRECTIONAL);
iova              665 drivers/fpga/dfl-afu-main.c 	minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
iova              673 drivers/fpga/dfl-afu-main.c 	ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
iova              678 drivers/fpga/dfl-afu-main.c 		afu_dma_unmap_region(pdata, map.iova);
iova              685 drivers/fpga/dfl-afu-main.c 		(unsigned long long)map.iova);
iova              696 drivers/fpga/dfl-afu-main.c 	minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
iova              704 drivers/fpga/dfl-afu-main.c 	return afu_dma_unmap_region(pdata, unmap.iova);
iova               56 drivers/fpga/dfl-afu.h 	u64 iova;
iova               99 drivers/fpga/dfl-afu.h 		       u64 user_addr, u64 length, u64 *iova);
iova              100 drivers/fpga/dfl-afu.h int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
iova              103 drivers/fpga/dfl-afu.h 		    u64 iova, u64 size);
iova              136 drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.c 	return mapping->iova + buf->suballoc_offset;
iova              104 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	void *ptr, size_t size, u64 iova)
iova              108 drivers/gpu/drm/etnaviv/etnaviv_dump.c 	iter->hdr->iova = cpu_to_le64(iova);
iova              216 drivers/gpu/drm/etnaviv/etnaviv_dump.c 		iter.hdr->iova = cpu_to_le64(vram->iova);
iova               27 drivers/gpu/drm/etnaviv/etnaviv_dump.h 	__le64 iova;
iova               31 drivers/gpu/drm/etnaviv/etnaviv_gem.h 	u32 iova;
iova              243 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		     submit->bos[i].va != mapping->iova) {
iova              318 drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c 		ptr[off] = bo->mapping->iova + r->reloc_offset;
iova               48 drivers/gpu/drm/etnaviv/etnaviv_iommu.c 			       unsigned long iova, phys_addr_t paddr,
iova               52 drivers/gpu/drm/etnaviv/etnaviv_iommu.c 	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
iova               63 drivers/gpu/drm/etnaviv/etnaviv_iommu.c 	unsigned long iova, size_t size)
iova               66 drivers/gpu/drm/etnaviv/etnaviv_iommu.c 	unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
iova               93 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 			       unsigned long iova, phys_addr_t paddr,
iova              109 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
iova              110 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
iova              122 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 				    unsigned long iova, size_t size)
iova              130 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 	mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
iova              131 drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c 	stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
iova               17 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 				 unsigned long iova, size_t size)
iova               22 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	if (!IS_ALIGNED(iova | size, pgsize)) {
iova               24 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		       iova, size, pgsize);
iova               29 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		unmapped_page = context->global->ops->unmap(context, iova,
iova               34 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		iova += unmapped_page;
iova               40 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 			      unsigned long iova, phys_addr_t paddr,
iova               43 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	unsigned long orig_iova = iova;
iova               48 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
iova               50 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		       iova, &paddr, size, pgsize);
iova               55 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		ret = context->global->ops->map(context, iova, paddr, pgsize,
iova               60 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		iova += pgsize;
iova               72 drivers/gpu/drm/etnaviv/etnaviv_mmu.c static int etnaviv_iommu_map(struct etnaviv_iommu_context *context, u32 iova,
iova               75 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	unsigned int da = iova;
iova               86 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
iova               98 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	da = iova;
iova              109 drivers/gpu/drm/etnaviv/etnaviv_mmu.c static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
iova              113 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	unsigned int da = iova;
iova              121 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
iova              245 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		u32 iova;
iova              247 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		iova = sg_dma_address(sgt->sgl) - memory_base;
iova              248 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
iova              249 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 			mapping->iova = iova;
iova              267 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	mapping->iova = node->start;
iova              335 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 	    ctx->cmdbuf_mapping.iova > 0x80000000) {
iova              376 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		mapping->iova = paddr - memory_base;
iova              387 drivers/gpu/drm/etnaviv/etnaviv_mmu.c 		mapping->iova = node->start;
iova               25 drivers/gpu/drm/etnaviv/etnaviv_mmu.h 	int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
iova               27 drivers/gpu/drm/etnaviv/etnaviv_mmu.h 	size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
iova              708 drivers/gpu/drm/i915/gvt/kvmgt.c 		iov_pfn = unmap->iova >> PAGE_SHIFT;
iova              156 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
iova              157 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
iova              837 drivers/gpu/drm/msm/adreno/a5xx_gpu.c static int a5xx_fault_handler(void *arg, unsigned long iova, int flags)
iova              841 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 			iova, flags,
iova             1138 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	u64 iova;
iova             1151 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		&dumper->bo, &dumper->iova);
iova             1168 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 		REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
iova             1218 drivers/gpu/drm/msm/adreno/a5xx_gpu.c 	offset = dumper.iova + (256 * SZ_1K);
iova              212 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
iova              230 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	u64 iova = 0;
iova              234 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 		MSM_BO_UNCACHED, gpu->aspace, &bo, &iova);
iova              242 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	a5xx_gpu->preempt_iova[ring->id] = iova;
iova              252 drivers/gpu/drm/msm/adreno/a5xx_preempt.c 	ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
iova              604 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 	gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
iova              881 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 	u64 iova;
iova              887 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 	iova = bo->iova;
iova              889 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 	for (i = 0; i < count; i++, iova += PAGE_SIZE) {
iova              890 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 		iommu_unmap(gmu->domain, iova, PAGE_SIZE);
iova              924 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 	bo->iova = gmu->uncached_iova_base;
iova              928 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 			bo->iova + (PAGE_SIZE * i),
iova              937 drivers/gpu/drm/msm/adreno/a6xx_gmu.c 					bo->iova + (PAGE_SIZE * i),
iova               15 drivers/gpu/drm/msm/adreno/a6xx_gmu.h 	u64 iova;
iova               74 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 		u64 iova)
iova               78 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, lower_32_bits(iova));
iova               79 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 	OUT_RING(ring, upper_32_bits(iova));
iova              121 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
iova              122 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
iova              605 drivers/gpu/drm/msm/adreno/a6xx_gpu.c static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
iova              610 drivers/gpu/drm/msm/adreno/a6xx_gpu.c 			iova, flags,
iova               75 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	u64 iova;
iova              117 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		&dumper->bo, &dumper->iova);
iova              143 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 		REG_A6XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
iova              434 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
iova              502 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
iova              582 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 			block->size, dumper->iova + A6XX_CD_DATA_OFFSET);
iova              623 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
iova              661 drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c 	u64 out = dumper->iova + A6XX_CD_DATA_OFFSET;
iova              178 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	msg.dbg_buffer_addr = (u32) gmu->debug->iova;
iova              328 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
iova              337 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 	header->iova = iova;
iova              378 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		hfi->iova + offset, 0);
iova              383 drivers/gpu/drm/msm/adreno/a6xx_hfi.c 		hfi->iova + offset, 4);
iova               18 drivers/gpu/drm/msm/adreno/a6xx_hfi.h 	u32 iova;
iova              314 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		const struct firmware *fw, u64 *iova)
iova              320 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
iova              369 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
iova              435 drivers/gpu/drm/msm/adreno/adreno_gpu.c 			OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
iova              536 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		state->ring[i].iova = gpu->rb[i]->iova;
iova              715 drivers/gpu/drm/msm/adreno/adreno_gpu.c 		drm_printf(p, "    iova: 0x%016llx\n", state->ring[i].iova);
iova              731 drivers/gpu/drm/msm/adreno/adreno_gpu.c 				state->bos[i].iova);
iova              223 drivers/gpu/drm/msm/adreno/adreno_gpu.h 		const struct firmware *fw, u64 *iova);
iova              366 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 		uint64_t iova = mdp4_crtc->cursor.next_iova;
iova              371 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 			msm_gem_get_and_pin_iova(next_bo, kms->aspace, &iova);
iova              377 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
iova              412 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 	uint64_t iova;
iova              429 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 		ret = msm_gem_get_and_pin_iova(cursor_bo, kms->aspace, &iova);
iova              433 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 		iova = 0;
iova              439 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c 	mdp4_crtc->cursor.next_iova = iova;
iova               58 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 		uint64_t iova;
iova              477 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 		if (mdp5_crtc->cursor.iova) {
iova              848 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 			mdp5_crtc->cursor.iova);
iova              895 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 		mdp5_crtc->cursor.iova = 0;
iova              905 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c 			&mdp5_crtc->cursor.iova);
iova              189 drivers/gpu/drm/msm/dsi/dsi.h int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
iova              190 drivers/gpu/drm/msm/dsi/dsi.h int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
iova               43 drivers/gpu/drm/msm/dsi/dsi_cfg.h 	int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
iova             1068 drivers/gpu/drm/msm/dsi/dsi_host.c 	uint64_t iova;
iova             1073 drivers/gpu/drm/msm/dsi/dsi_host.c 					&msm_host->tx_gem_obj, &iova);
iova              778 drivers/gpu/drm/msm/msm_drv.c 		struct drm_gem_object *obj, uint64_t *iova)
iova              789 drivers/gpu/drm/msm/msm_drv.c 	return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
iova              275 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_address_space *aspace, uint64_t *iova);
iova              277 drivers/gpu/drm/msm/msm_drv.h 		struct msm_gem_address_space *aspace, uint64_t *iova);
iova              316 drivers/gpu/drm/msm/msm_drv.h 		struct drm_gem_object **bo, uint64_t *iova);
iova              319 drivers/gpu/drm/msm/msm_drv.h 		struct drm_gem_object **bo, uint64_t *iova);
iova               59 drivers/gpu/drm/msm/msm_fb.c 	uint64_t iova;
iova               62 drivers/gpu/drm/msm/msm_fb.c 		ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &iova);
iova               63 drivers/gpu/drm/msm/msm_fb.c 		DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
iova              392 drivers/gpu/drm/msm/msm_gem.c 		struct msm_gem_address_space *aspace, uint64_t *iova)
iova              414 drivers/gpu/drm/msm/msm_gem.c 	*iova = vma->iova;
iova              448 drivers/gpu/drm/msm/msm_gem.c 		struct msm_gem_address_space *aspace, uint64_t *iova)
iova              462 drivers/gpu/drm/msm/msm_gem.c 		*iova = local;
iova              473 drivers/gpu/drm/msm/msm_gem.c 		struct msm_gem_address_space *aspace, uint64_t *iova)
iova              479 drivers/gpu/drm/msm/msm_gem.c 	ret = msm_gem_get_iova_locked(obj, aspace, iova);
iova              499 drivers/gpu/drm/msm/msm_gem.c 	return vma ? vma->iova : 0;
iova              838 drivers/gpu/drm/msm/msm_gem.c 				vma->iova, vma->mapped ? "mapped" : "unmapped",
iova             1075 drivers/gpu/drm/msm/msm_gem.c 		vma->iova = physaddr(obj);
iova             1158 drivers/gpu/drm/msm/msm_gem.c 		struct drm_gem_object **bo, uint64_t *iova, bool locked)
iova             1167 drivers/gpu/drm/msm/msm_gem.c 	if (iova) {
iova             1168 drivers/gpu/drm/msm/msm_gem.c 		ret = msm_gem_get_and_pin_iova(obj, aspace, iova);
iova             1196 drivers/gpu/drm/msm/msm_gem.c 		struct drm_gem_object **bo, uint64_t *iova)
iova             1198 drivers/gpu/drm/msm/msm_gem.c 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, false);
iova             1203 drivers/gpu/drm/msm/msm_gem.c 		struct drm_gem_object **bo, uint64_t *iova)
iova             1205 drivers/gpu/drm/msm/msm_gem.c 	return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova, true);
iova               30 drivers/gpu/drm/msm/msm_gem.h 	uint64_t iova;
iova              150 drivers/gpu/drm/msm/msm_gem.h 		uint64_t iova;
iova              159 drivers/gpu/drm/msm/msm_gem.h 		uint64_t iova;
iova              108 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].iova  = submit_bo.presumed;
iova              164 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->bos[i].iova = 0;
iova              258 drivers/gpu/drm/msm/msm_gem_submit.c 		uint64_t iova;
iova              262 drivers/gpu/drm/msm/msm_gem_submit.c 				submit->aspace, &iova);
iova              269 drivers/gpu/drm/msm/msm_gem_submit.c 		if (iova == submit->bos[i].iova) {
iova              272 drivers/gpu/drm/msm/msm_gem_submit.c 			submit->bos[i].iova = iova;
iova              283 drivers/gpu/drm/msm/msm_gem_submit.c 		struct msm_gem_object **obj, uint64_t *iova, bool *valid)
iova              293 drivers/gpu/drm/msm/msm_gem_submit.c 	if (iova)
iova              294 drivers/gpu/drm/msm/msm_gem_submit.c 		*iova = submit->bos[idx].iova;
iova              333 drivers/gpu/drm/msm/msm_gem_submit.c 		uint64_t iova;
iova              358 drivers/gpu/drm/msm/msm_gem_submit.c 		ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
iova              365 drivers/gpu/drm/msm/msm_gem_submit.c 		iova += submit_reloc.reloc_offset;
iova              368 drivers/gpu/drm/msm/msm_gem_submit.c 			iova >>= -submit_reloc.shift;
iova              370 drivers/gpu/drm/msm/msm_gem_submit.c 			iova <<= submit_reloc.shift;
iova              372 drivers/gpu/drm/msm/msm_gem_submit.c 		ptr[off] = iova | submit_reloc.or;
iova              509 drivers/gpu/drm/msm/msm_gem_submit.c 		uint64_t iova;
iova              530 drivers/gpu/drm/msm/msm_gem_submit.c 				&msm_obj, &iova, NULL);
iova              551 drivers/gpu/drm/msm/msm_gem_submit.c 		submit->cmd[i].iova = iova + submit_cmd.submit_offset;
iova               45 drivers/gpu/drm/msm/msm_gem_vma.c 		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, size);
iova               54 drivers/gpu/drm/msm/msm_gem_vma.c 	if (!WARN_ON(!vma->iova))
iova               66 drivers/gpu/drm/msm/msm_gem_vma.c 	if (WARN_ON(!vma->iova))
iova               78 drivers/gpu/drm/msm/msm_gem_vma.c 		ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
iova               95 drivers/gpu/drm/msm/msm_gem_vma.c 	if (vma->iova)
iova               99 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->iova = 0;
iova              110 drivers/gpu/drm/msm/msm_gem_vma.c 	if (WARN_ON(vma->iova))
iova              120 drivers/gpu/drm/msm/msm_gem_vma.c 	vma->iova = vma->node.start << PAGE_SHIFT;
iova              306 drivers/gpu/drm/msm/msm_gpu.c 		struct msm_gem_object *obj, u64 iova, u32 flags)
iova              312 drivers/gpu/drm/msm/msm_gpu.c 	state_bo->iova = iova;
iova              367 drivers/gpu/drm/msm/msm_gpu.c 				submit->bos[idx].iova, submit->bos[idx].flags);
iova              753 drivers/gpu/drm/msm/msm_gpu.c 		uint64_t iova;
iova              762 drivers/gpu/drm/msm/msm_gpu.c 		msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
iova              181 drivers/gpu/drm/msm/msm_gpu.h 	u64 iova;
iova              192 drivers/gpu/drm/msm/msm_gpu.h 		u64 iova;
iova               35 drivers/gpu/drm/msm/msm_gpummu.c static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
iova               39 drivers/gpu/drm/msm/msm_gpummu.c 	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
iova               64 drivers/gpu/drm/msm/msm_gpummu.c static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
iova               67 drivers/gpu/drm/msm/msm_gpummu.c 	unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
iova               17 drivers/gpu/drm/msm/msm_iommu.c 		unsigned long iova, int flags, void *arg)
iova               21 drivers/gpu/drm/msm/msm_iommu.c 		return iommu->base.handler(iommu->base.arg, iova, flags);
iova               22 drivers/gpu/drm/msm/msm_iommu.c 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
iova               42 drivers/gpu/drm/msm/msm_iommu.c static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
iova               48 drivers/gpu/drm/msm/msm_iommu.c 	ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
iova               54 drivers/gpu/drm/msm/msm_iommu.c static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
iova               58 drivers/gpu/drm/msm/msm_iommu.c 	iommu_unmap(iommu->domain, iova, len);
iova               15 drivers/gpu/drm/msm/msm_mmu.h 	int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
iova               17 drivers/gpu/drm/msm/msm_mmu.h 	int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
iova               24 drivers/gpu/drm/msm/msm_mmu.h 	int (*handler)(void *arg, unsigned long iova, int flags);
iova               39 drivers/gpu/drm/msm/msm_mmu.h 		int (*handler)(void *arg, unsigned long iova, int flags))
iova              301 drivers/gpu/drm/msm/msm_rd.c 		uint64_t iova, uint32_t size)
iova              307 drivers/gpu/drm/msm/msm_rd.c 	if (iova) {
iova              308 drivers/gpu/drm/msm/msm_rd.c 		offset = iova - submit->bos[idx].iova;
iova              310 drivers/gpu/drm/msm/msm_rd.c 		iova = submit->bos[idx].iova;
iova              319 drivers/gpu/drm/msm/msm_rd.c 			(uint32_t[3]){ iova, size, iova >> 32 }, 12);
iova              388 drivers/gpu/drm/msm/msm_rd.c 		uint64_t iova = submit->cmd[i].iova;
iova              394 drivers/gpu/drm/msm/msm_rd.c 					submit->cmd[i].iova, szd * 4);
iova              407 drivers/gpu/drm/msm/msm_rd.c 				(uint32_t[3]){ iova, szd, iova >> 32 }, 12);
iova               30 drivers/gpu/drm/msm/msm_ringbuffer.c 		MSM_BO_WC, gpu->aspace, &ring->bo, &ring->iova);
iova               42 drivers/gpu/drm/msm/msm_ringbuffer.h 	uint64_t iova;
iova               55 drivers/gpu/drm/panfrost/panfrost_mmu.c 			u64 iova, size_t size)
iova               58 drivers/gpu/drm/panfrost/panfrost_mmu.c 	u64 region = iova & PAGE_MASK;
iova               84 drivers/gpu/drm/panfrost/panfrost_mmu.c 				      u64 iova, size_t size, u32 op)
iova               90 drivers/gpu/drm/panfrost/panfrost_mmu.c 		lock_region(pfdev, as_nr, iova, size);
iova              101 drivers/gpu/drm/panfrost/panfrost_mmu.c 			       u64 iova, size_t size, u32 op)
iova              106 drivers/gpu/drm/panfrost/panfrost_mmu.c 	ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
iova              234 drivers/gpu/drm/panfrost/panfrost_mmu.c 				     u64 iova, size_t size)
iova              243 drivers/gpu/drm/panfrost/panfrost_mmu.c 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
iova              249 drivers/gpu/drm/panfrost/panfrost_mmu.c 		      u64 iova, int prot, struct sg_table *sgt)
iova              254 drivers/gpu/drm/panfrost/panfrost_mmu.c 	u64 start_iova = iova;
iova              260 drivers/gpu/drm/panfrost/panfrost_mmu.c 		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
iova              263 drivers/gpu/drm/panfrost/panfrost_mmu.c 			size_t pgsize = get_pgsize(iova | paddr, len);
iova              265 drivers/gpu/drm/panfrost/panfrost_mmu.c 			ops->map(ops, iova, paddr, pgsize, prot);
iova              266 drivers/gpu/drm/panfrost/panfrost_mmu.c 			iova += pgsize;
iova              272 drivers/gpu/drm/panfrost/panfrost_mmu.c 	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
iova              308 drivers/gpu/drm/panfrost/panfrost_mmu.c 	u64 iova = mapping->mmnode.start << PAGE_SHIFT;
iova              316 drivers/gpu/drm/panfrost/panfrost_mmu.c 		mapping->mmu->as, iova, len);
iova              320 drivers/gpu/drm/panfrost/panfrost_mmu.c 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
iova              322 drivers/gpu/drm/panfrost/panfrost_mmu.c 		if (ops->iova_to_phys(ops, iova)) {
iova              323 drivers/gpu/drm/panfrost/panfrost_mmu.c 			unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
iova              326 drivers/gpu/drm/panfrost/panfrost_mmu.c 		iova += pgsize;
iova              344 drivers/gpu/drm/panfrost/panfrost_mmu.c static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
iova              350 drivers/gpu/drm/panfrost/panfrost_mmu.c static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule,
iova             1127 drivers/gpu/drm/tegra/drm.c 	struct iova *alloc;
iova              111 drivers/gpu/drm/tegra/drm.h void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *iova);
iova              113 drivers/gpu/drm/tegra/drm.h 		    dma_addr_t iova);
iova              161 drivers/gpu/drm/tegra/vic.c 			      dma_addr_t *iova)
iova              165 drivers/gpu/drm/tegra/vic.c 	return tegra_drm_alloc(tegra, size, iova);
iova              169 drivers/gpu/drm/tegra/vic.c 			    dma_addr_t iova, void *va)
iova              173 drivers/gpu/drm/tegra/vic.c 	return tegra_drm_free(tegra, size, va, iova);
iova               58 drivers/gpu/host1x/cdma.c 		free_iova(&host1x->iova, iova_pfn(&host1x->iova, pb->dma));
iova               74 drivers/gpu/host1x/cdma.c 	struct iova *alloc;
iova               91 drivers/gpu/host1x/cdma.c 		size = iova_align(&host1x->iova, size);
iova               98 drivers/gpu/host1x/cdma.c 		shift = iova_shift(&host1x->iova);
iova               99 drivers/gpu/host1x/cdma.c 		alloc = alloc_iova(&host1x->iova, size >> shift,
iova              106 drivers/gpu/host1x/cdma.c 		pb->dma = iova_dma_addr(&host1x->iova, alloc);
iova              127 drivers/gpu/host1x/cdma.c 	__free_iova(&host1x->iova, alloc);
iova              311 drivers/gpu/host1x/dev.c 		init_iova_domain(&host->iova, 1UL << order, start >> order);
iova              370 drivers/gpu/host1x/dev.c 		put_iova_domain(&host->iova);
iova              396 drivers/gpu/host1x/dev.c 		put_iova_domain(&host->iova);
iova              118 drivers/gpu/host1x/dev.h 	struct iova_domain iova;
iova              133 drivers/gpu/host1x/job.c 		struct iova *alloc;
iova              147 drivers/gpu/host1x/job.c 			gather_size = iova_align(&host->iova, gather_size);
iova              149 drivers/gpu/host1x/job.c 			shift = iova_shift(&host->iova);
iova              150 drivers/gpu/host1x/job.c 			alloc = alloc_iova(&host->iova, gather_size >> shift,
iova              158 drivers/gpu/host1x/job.c 					iova_dma_addr(&host->iova, alloc),
iova              161 drivers/gpu/host1x/job.c 				__free_iova(&host->iova, alloc);
iova              167 drivers/gpu/host1x/job.c 				iova_dma_addr(&host->iova, alloc);
iova              566 drivers/gpu/host1x/job.c 			free_iova(&host->iova,
iova              567 drivers/gpu/host1x/job.c 				iova_pfn(&host->iova, job->addr_phys[i]));
iova              113 drivers/infiniband/core/rw.c 	reg->sge.addr = reg->mr->iova;
iova              439 drivers/infiniband/core/rw.c 	ctx->reg->sge.addr = ctx->reg->mr->iova;
iova             2559 drivers/infiniband/core/verbs.c 	mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
iova             2209 drivers/infiniband/hw/bnxt_re/ib_verbs.c 	wqe->frmr.va = wr->mr->iova;
iova              161 drivers/infiniband/hw/cxgb3/iwch_qp.c 	wqe->fastreg.va_base_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
iova              163 drivers/infiniband/hw/cxgb3/iwch_qp.c 				cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
iova              402 drivers/infiniband/hw/cxgb4/mem.c 	mhp->ibmr.iova = mhp->attr.va_fbo;
iova              810 drivers/infiniband/hw/cxgb4/qp.c 	fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
iova              811 drivers/infiniband/hw/cxgb4/qp.c 	fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
iova              839 drivers/infiniband/hw/cxgb4/qp.c 	wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
iova              840 drivers/infiniband/hw/cxgb4/qp.c 	wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
iova              381 drivers/infiniband/hw/efa/efa_admin_cmds_defs.h 	u64 iova;
iova              232 drivers/infiniband/hw/efa/efa_com_cmd.c 	mr_cmd.iova = params->iova;
iova              158 drivers/infiniband/hw/efa/efa_com_cmd.h 	u64 iova;
iova             1435 drivers/infiniband/hw/efa/efa_verbs.c 	params.iova = virt_addr;
iova              387 drivers/infiniband/hw/hns/hns_roce_device.h 	u64			iova; /* MR's virtual orignal addr */
iova              949 drivers/infiniband/hw/hns/hns_roce_device.h 				int mr_access_flags, u64 iova, u64 size,
iova             1870 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
iova             1871 drivers/infiniband/hw/hns/hns_roce_hw_v1.c 	mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
iova               89 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	rc_sq_wqe->va = cpu_to_le64(wr->mr->iova);
iova             2311 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	mpt_entry->va_l = cpu_to_le32(lower_32_bits(mr->iova));
iova             2312 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 	mpt_entry->va_h = cpu_to_le32(upper_32_bits(mr->iova));
iova             2324 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 					u32 pdn, int mr_access_flags, u64 iova,
iova             2355 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		mpt_entry->va_l = cpu_to_le32(lower_32_bits(iova));
iova             2356 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		mpt_entry->va_h = cpu_to_le32(upper_32_bits(iova));
iova             2360 drivers/infiniband/hw/hns/hns_roce_hw_v2.c 		mr->iova = iova;
iova              571 drivers/infiniband/hw/hns/hns_roce_mr.c static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
iova              584 drivers/infiniband/hw/hns/hns_roce_mr.c 	mr->iova = iova;			/* MR va starting addr */
iova             2249 drivers/infiniband/hw/i40iw/i40iw_verbs.c 			info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
iova              800 drivers/infiniband/hw/mlx4/mlx4_ib.h 			 u64 iova);
iova              442 drivers/infiniband/hw/mlx4/mr.c 	mr->ibmr.iova = virt_addr;
iova              525 drivers/infiniband/hw/mlx4/mr.c 		mmr->mmr.iova       = virt_addr;
iova              736 drivers/infiniband/hw/mlx4/mr.c 		      int npages, u64 iova)
iova              741 drivers/infiniband/hw/mlx4/mr.c 	return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
iova             3326 drivers/infiniband/hw/mlx4/qp.c 	fseg->start_addr	= cpu_to_be64(mr->ibmr.iova);
iova             1263 drivers/infiniband/hw/mlx5/devx.c 	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
iova              878 drivers/infiniband/hw/mlx5/mr.c 	mr->mmkey.iova = virt_addr;
iova              998 drivers/infiniband/hw/mlx5/mr.c 	wr.virt_addr = mr->mmkey.iova;
iova             1472 drivers/infiniband/hw/mlx5/mr.c 		mr->mmkey.iova = addr;
iova             2076 drivers/infiniband/hw/mlx5/mr.c 	mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
iova             2174 drivers/infiniband/hw/mlx5/mr.c 	pi_mr->data_iova = pi_mr->ibmr.iova;
iova             2181 drivers/infiniband/hw/mlx5/mr.c 		u64 iova = pi_mr->data_iova;
iova             2193 drivers/infiniband/hw/mlx5/mr.c 		pi_mr->pi_iova = (iova & page_mask) +
iova             2195 drivers/infiniband/hw/mlx5/mr.c 				 (pi_mr->ibmr.iova & ~page_mask);
iova             2203 drivers/infiniband/hw/mlx5/mr.c 		pi_mr->ibmr.length = pi_mr->pi_iova + pi_mr->meta_length - iova;
iova             2204 drivers/infiniband/hw/mlx5/mr.c 		pi_mr->ibmr.iova = iova;
iova             2242 drivers/infiniband/hw/mlx5/mr.c 	pi_mr->ibmr.iova = 0;
iova             2300 drivers/infiniband/hw/mlx5/mr.c 	ibmr->iova = 0;
iova              437 drivers/infiniband/hw/mlx5/odp.c 	mr->mmkey.iova = 0;
iova              512 drivers/infiniband/hw/mlx5/odp.c 		mtt->mmkey.iova = addr;
iova              639 drivers/infiniband/hw/mlx5/odp.c 	start_idx = (io_virt - (mr->mmkey.iova & page_mask)) >> page_shift;
iova             4358 drivers/infiniband/hw/mlx5/qp.c 	seg->start_addr = cpu_to_be64(mr->ibmr.iova);
iova              472 drivers/infiniband/hw/mthca/mthca_dev.h 		   u64 iova, u64 total_size, u32 access, struct mthca_mr *mr);
iova              477 drivers/infiniband/hw/mthca/mthca_dev.h 			int list_len, u64 iova, u64 total_size,
iova              484 drivers/infiniband/hw/mthca/mthca_dev.h 			     int list_len, u64 iova);
iova              487 drivers/infiniband/hw/mthca/mthca_dev.h 			     int list_len, u64 iova);
iova              430 drivers/infiniband/hw/mthca/mthca_mr.c 		   u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
iova              469 drivers/infiniband/hw/mthca/mthca_mr.c 	mpt_entry->start     = cpu_to_be64(iova);
iova              521 drivers/infiniband/hw/mthca/mthca_mr.c 			int list_len, u64 iova, u64 total_size,
iova              536 drivers/infiniband/hw/mthca/mthca_mr.c 	err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
iova              690 drivers/infiniband/hw/mthca/mthca_mr.c 				  int list_len, u64 iova)
iova              700 drivers/infiniband/hw/mthca/mthca_mr.c 	if (iova & page_mask)
iova              718 drivers/infiniband/hw/mthca/mthca_mr.c 			     int list_len, u64 iova)
iova              726 drivers/infiniband/hw/mthca/mthca_mr.c 	err = mthca_check_fmr(fmr, page_list, list_len, iova);
iova              746 drivers/infiniband/hw/mthca/mthca_mr.c 	mpt_entry.start  = cpu_to_be64(iova);
iova              759 drivers/infiniband/hw/mthca/mthca_mr.c 			     int list_len, u64 iova)
iova              766 drivers/infiniband/hw/mthca/mthca_mr.c 	err = mthca_check_fmr(fmr, page_list, list_len, iova);
iova              796 drivers/infiniband/hw/mthca/mthca_mr.c 	fmr->mem.arbel.mpt->start  = cpu_to_be64(iova);
iova             2064 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	fbo = mr->ibmr.iova - mr->pages[0];
iova             2066 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
iova             2067 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c 	fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
iova             3083 drivers/infiniband/hw/qedr/verbs.c 	fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
iova             3084 drivers/infiniband/hw/qedr/verbs.c 	fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
iova               57 drivers/infiniband/hw/usnic/usnic_uiom.c 				unsigned long iova, int flags,
iova               62 drivers/infiniband/hw/usnic/usnic_uiom.c 		domain, iova, flags);
iova              136 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h 	u64 iova;
iova              150 drivers/infiniband/hw/vmw_pvrdma/pvrdma_mr.c 	mr->mmr.iova = virt_addr;
iova              598 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c 	wqe_hdr->wr.fast_reg.iova_start = mr->ibmr.iova;
iova              406 drivers/infiniband/sw/rdmavt/mr.c 	mr->mr.iova = virt_addr;
iova              641 drivers/infiniband/sw/rdmavt/mr.c 	mr->mr.user_base = ibmr->iova;
iova              642 drivers/infiniband/sw/rdmavt/mr.c 	mr->mr.iova = ibmr->iova;
iova              643 drivers/infiniband/sw/rdmavt/mr.c 	mr->mr.offset = ibmr->iova - (u64)mr->mr.map[0]->segs[0].vaddr;
iova              677 drivers/infiniband/sw/rdmavt/mr.c 	mr->mr.iova = ibmr->iova;
iova              785 drivers/infiniband/sw/rdmavt/mr.c 		     int list_len, u64 iova)
iova              804 drivers/infiniband/sw/rdmavt/mr.c 	fmr->mr.user_base = iova;
iova              805 drivers/infiniband/sw/rdmavt/mr.c 	fmr->mr.iova = iova;
iova              841 drivers/infiniband/sw/rdmavt/mr.c 		fmr->mr.iova = 0;
iova             1070 drivers/infiniband/sw/rdmavt/mr.c 	off = vaddr - mr->iova;
iova             1071 drivers/infiniband/sw/rdmavt/mr.c 	if (unlikely(vaddr < mr->iova || off + len > mr->length ||
iova               89 drivers/infiniband/sw/rdmavt/mr.h 		     int list_len, u64 iova);
iova               69 drivers/infiniband/sw/rdmavt/trace_mr.h 		__field(u64, iova)
iova               82 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->iova = mr->iova;
iova               95 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->iova,
iova              190 drivers/infiniband/sw/rdmavt/trace_mr.h 		__field(u64, iova)
iova              199 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->ibmr_iova = ibmr->iova;
iova              200 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->iova = to_imr(ibmr)->mr.iova;
iova              210 drivers/infiniband/sw/rdmavt/trace_mr.h 		__entry->iova,
iova              110 drivers/infiniband/sw/rxe/rxe_loc.h 		      u64 length, u64 iova, int access, struct ib_udata *udata,
iova              116 drivers/infiniband/sw/rxe/rxe_loc.h int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr,
iova              123 drivers/infiniband/sw/rxe/rxe_loc.h void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length);
iova              133 drivers/infiniband/sw/rxe/rxe_loc.h int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length);
iova              136 drivers/infiniband/sw/rxe/rxe_loc.h 		      u64 *page, int num_pages, u64 iova);
iova               54 drivers/infiniband/sw/rxe/rxe_mr.c int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length)
iova               62 drivers/infiniband/sw/rxe/rxe_mr.c 		if (iova < mem->iova ||
iova               64 drivers/infiniband/sw/rxe/rxe_mr.c 		    iova > mem->iova + mem->length - length)
iova              161 drivers/infiniband/sw/rxe/rxe_mr.c 		      u64 length, u64 iova, int access, struct ib_udata *udata,
iova              226 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->iova		= iova;
iova              265 drivers/infiniband/sw/rxe/rxe_mr.c 	u64			iova,
iova              270 drivers/infiniband/sw/rxe/rxe_mr.c 	size_t			offset = iova - mem->iova + mem->offset;
iova              303 drivers/infiniband/sw/rxe/rxe_mr.c void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length)
iova              316 drivers/infiniband/sw/rxe/rxe_mr.c 		addr = (void *)(uintptr_t)iova;
iova              320 drivers/infiniband/sw/rxe/rxe_mr.c 	if (mem_check_range(mem, iova, length)) {
iova              326 drivers/infiniband/sw/rxe/rxe_mr.c 	lookup_iova(mem, iova, &m, &n, &offset);
iova              344 drivers/infiniband/sw/rxe/rxe_mr.c int rxe_mem_copy(struct rxe_mem *mem, u64 iova, void *addr, int length,
iova              364 drivers/infiniband/sw/rxe/rxe_mr.c 			addr : ((void *)(uintptr_t)iova);
iova              367 drivers/infiniband/sw/rxe/rxe_mr.c 			((void *)(uintptr_t)iova) : addr;
iova              380 drivers/infiniband/sw/rxe/rxe_mr.c 	err = mem_check_range(mem, iova, length);
iova              386 drivers/infiniband/sw/rxe/rxe_mr.c 	lookup_iova(mem, iova, &m, &i, &offset);
iova              449 drivers/infiniband/sw/rxe/rxe_mr.c 	u64			iova;
iova              501 drivers/infiniband/sw/rxe/rxe_mr.c 			iova = sge->addr + offset;
iova              503 drivers/infiniband/sw/rxe/rxe_mr.c 			err = rxe_mem_copy(mem, iova, addr, bytes, dir, crcp);
iova              592 drivers/infiniband/sw/rxe/rxe_mr.c 		      u64 *page, int num_pages, u64 iova)
iova              624 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->iova	= iova;
iova              625 drivers/infiniband/sw/rxe/rxe_mr.c 	mem->va		= iova;
iova               64 drivers/infiniband/sw/rxe/rxe_req.c 			wqe->iova += qp->mtu;
iova               92 drivers/infiniband/sw/rxe/rxe_req.c 		wqe->iova = (mask & WR_ATOMIC_MASK) ?
iova              116 drivers/infiniband/sw/rxe/rxe_req.c 				wqe->iova += npsn * qp->mtu;
iova              440 drivers/infiniband/sw/rxe/rxe_req.c 		reth_set_va(pkt, wqe->iova);
iova              451 drivers/infiniband/sw/rxe/rxe_req.c 		atmeth_set_va(pkt, wqe->iova);
iova              652 drivers/infiniband/sw/rxe/rxe_req.c 			rmr->iova = wqe->wr.wr.reg.mr->iova;
iova              548 drivers/infiniband/sw/rxe/rxe_resp.c 	u64 iova = atmeth_va(pkt);
iova              558 drivers/infiniband/sw/rxe/rxe_resp.c 	vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
iova             1103 drivers/infiniband/sw/rxe/rxe_resp.c 			u64 iova = reth_va(pkt);
iova             1106 drivers/infiniband/sw/rxe/rxe_resp.c 			if (iova < res->read.va_org ||
iova             1108 drivers/infiniband/sw/rxe/rxe_resp.c 			    (iova + resid) > (res->read.va_org +
iova             1126 drivers/infiniband/sw/rxe/rxe_resp.c 			res->read.va_org = iova;
iova             1127 drivers/infiniband/sw/rxe/rxe_resp.c 			res->read.va = iova;
iova              620 drivers/infiniband/sw/rxe/rxe_verbs.c 	wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
iova              928 drivers/infiniband/sw/rxe/rxe_verbs.c 				     u64 iova,
iova              946 drivers/infiniband/sw/rxe/rxe_verbs.c 	err = rxe_mem_init_user(pd, start, length, iova,
iova             1036 drivers/infiniband/sw/rxe/rxe_verbs.c 	mr->va = ibmr->iova;
iova             1037 drivers/infiniband/sw/rxe/rxe_verbs.c 	mr->iova = ibmr->iova;
iova             1041 drivers/infiniband/sw/rxe/rxe_verbs.c 	mr->offset = mr->iova & mr->page_mask;
iova              334 drivers/infiniband/sw/rxe/rxe_verbs.h 	u64			iova;
iova              959 drivers/infiniband/sw/siw/siw_qp_tx.c 	mem->va = base_mr->iova;
iova             1510 drivers/infiniband/sw/siw/siw_verbs.c 		mem->va = base_mr->iova;
iova              260 drivers/infiniband/ulp/iser/iser_memory.c 	reg->sge.addr = page_vec->fake_mr.iova;
iova              428 drivers/infiniband/ulp/iser/iser_memory.c 	sig_reg->sge.addr = mr->iova;
iova              476 drivers/infiniband/ulp/iser/iser_memory.c 	reg->sge.addr = mr->iova;
iova             1587 drivers/infiniband/ulp/srp/ib_srp.c 	srp_map_desc(state, desc->mr->iova,
iova             1333 drivers/iommu/amd_iommu.c 		dma_addr_t iova, size_t size)
iova             1339 drivers/iommu/amd_iommu.c 		domain_flush_pages(domain, iova, size);
iova             2795 drivers/iommu/amd_iommu.c 	struct iova *val;
iova             3098 drivers/iommu/amd_iommu.c static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
iova             3114 drivers/iommu/amd_iommu.c 	ret = iommu_map_page(domain, iova, paddr, page_size, prot, GFP_KERNEL);
iova             3117 drivers/iommu/amd_iommu.c 	domain_flush_np_cache(domain, iova, page_size);
iova             3122 drivers/iommu/amd_iommu.c static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
iova             3133 drivers/iommu/amd_iommu.c 	unmap_size = iommu_unmap_page(domain, iova, page_size);
iova             3140 drivers/iommu/amd_iommu.c 					  dma_addr_t iova)
iova             3147 drivers/iommu/amd_iommu.c 		return iova;
iova             3149 drivers/iommu/amd_iommu.c 	pte = fetch_pte(domain, iova, &pte_pgsize);
iova             3157 drivers/iommu/amd_iommu.c 	return (__pte & ~offset_mask) | (iova & offset_mask);
iova             1853 drivers/iommu/arm-smmu-v3.c arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
iova             1873 drivers/iommu/arm-smmu-v3.c 	page_start	= iova >> inval_grain_shift;
iova             1874 drivers/iommu/arm-smmu-v3.c 	page_end	= (iova + size - 1) >> inval_grain_shift;
iova             1922 drivers/iommu/arm-smmu-v3.c 				   int ssid, unsigned long iova, size_t size)
iova             1949 drivers/iommu/arm-smmu-v3.c 	arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
iova             1987 drivers/iommu/arm-smmu-v3.c static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
iova             1993 drivers/iommu/arm-smmu-v3.c 	unsigned long start = iova, end = iova + size;
iova             2012 drivers/iommu/arm-smmu-v3.c 	while (iova < end) {
iova             2018 drivers/iommu/arm-smmu-v3.c 		cmd.tlbi.addr = iova;
iova             2020 drivers/iommu/arm-smmu-v3.c 		iova += granule;
iova             2034 drivers/iommu/arm-smmu-v3.c 					 unsigned long iova, size_t granule,
iova             2040 drivers/iommu/arm-smmu-v3.c 	iommu_iotlb_gather_add_page(domain, gather, iova, granule);
iova             2043 drivers/iommu/arm-smmu-v3.c static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
iova             2046 drivers/iommu/arm-smmu-v3.c 	arm_smmu_tlb_inv_range(iova, size, granule, false, cookie);
iova             2049 drivers/iommu/arm-smmu-v3.c static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
iova             2052 drivers/iommu/arm-smmu-v3.c 	arm_smmu_tlb_inv_range(iova, size, granule, true, cookie);
iova             2452 drivers/iommu/arm-smmu-v3.c static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
iova             2460 drivers/iommu/arm-smmu-v3.c 	return ops->map(ops, iova, paddr, size, prot);
iova             2463 drivers/iommu/arm-smmu-v3.c static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
iova             2472 drivers/iommu/arm-smmu-v3.c 	return ops->unmap(ops, iova, size, gather);
iova             2493 drivers/iommu/arm-smmu-v3.c arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
iova             2498 drivers/iommu/arm-smmu-v3.c 		return iova;
iova             2503 drivers/iommu/arm-smmu-v3.c 	return ops->iova_to_phys(ops, iova);
iova              314 drivers/iommu/arm-smmu.c static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
iova              328 drivers/iommu/arm-smmu.c 		iova = (iova >> 12) << 12;
iova              329 drivers/iommu/arm-smmu.c 		iova |= cfg->asid;
iova              331 drivers/iommu/arm-smmu.c 			arm_smmu_cb_write(smmu, idx, reg, iova);
iova              332 drivers/iommu/arm-smmu.c 			iova += granule;
iova              335 drivers/iommu/arm-smmu.c 		iova >>= 12;
iova              336 drivers/iommu/arm-smmu.c 		iova |= (u64)cfg->asid << 48;
iova              338 drivers/iommu/arm-smmu.c 			arm_smmu_cb_writeq(smmu, idx, reg, iova);
iova              339 drivers/iommu/arm-smmu.c 			iova += granule >> 12;
iova              344 drivers/iommu/arm-smmu.c static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
iova              355 drivers/iommu/arm-smmu.c 	iova >>= 12;
iova              358 drivers/iommu/arm-smmu.c 			arm_smmu_cb_writeq(smmu, idx, reg, iova);
iova              360 drivers/iommu/arm-smmu.c 			arm_smmu_cb_write(smmu, idx, reg, iova);
iova              361 drivers/iommu/arm-smmu.c 		iova += granule >> 12;
iova              371 drivers/iommu/arm-smmu.c static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
iova              383 drivers/iommu/arm-smmu.c static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size,
iova              389 drivers/iommu/arm-smmu.c 	ops->tlb_inv_range(iova, size, granule, false, cookie);
iova              393 drivers/iommu/arm-smmu.c static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size,
iova              399 drivers/iommu/arm-smmu.c 	ops->tlb_inv_range(iova, size, granule, true, cookie);
iova              404 drivers/iommu/arm-smmu.c 				  unsigned long iova, size_t granule,
iova              410 drivers/iommu/arm-smmu.c 	ops->tlb_inv_range(iova, granule, granule, true, cookie);
iova              449 drivers/iommu/arm-smmu.c 	unsigned long iova;
iova              460 drivers/iommu/arm-smmu.c 	iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
iova              465 drivers/iommu/arm-smmu.c 			    fsr, iova, fsynr, cbfrsynra, idx);
iova             1162 drivers/iommu/arm-smmu.c static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
iova             1173 drivers/iommu/arm-smmu.c 	ret = ops->map(ops, iova, paddr, size, prot);
iova             1179 drivers/iommu/arm-smmu.c static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
iova             1190 drivers/iommu/arm-smmu.c 	ret = ops->unmap(ops, iova, size, gather);
iova             1222 drivers/iommu/arm-smmu.c 					      dma_addr_t iova)
iova             1240 drivers/iommu/arm-smmu.c 	va = iova & ~0xfffUL;
iova             1251 drivers/iommu/arm-smmu.c 			&iova);
iova             1252 drivers/iommu/arm-smmu.c 		return ops->iova_to_phys(ops, iova);
iova             1265 drivers/iommu/arm-smmu.c 	return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
iova             1269 drivers/iommu/arm-smmu.c 					dma_addr_t iova)
iova             1275 drivers/iommu/arm-smmu.c 		return iova;
iova             1282 drivers/iommu/arm-smmu.c 		return arm_smmu_iova_to_phys_hard(domain, iova);
iova             1284 drivers/iommu/arm-smmu.c 	return ops->iova_to_phys(ops, iova);
iova              309 drivers/iommu/arm-smmu.h 	void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
iova               29 drivers/iommu/dma-iommu.c 	dma_addr_t		iova;
iova              185 drivers/iommu/dma-iommu.c 		msi_page->iova = start;
iova              389 drivers/iommu/dma-iommu.c 	unsigned long shift, iova_len, iova = 0;
iova              415 drivers/iommu/dma-iommu.c 		iova = alloc_iova_fast(iovad, iova_len,
iova              418 drivers/iommu/dma-iommu.c 	if (!iova)
iova              419 drivers/iommu/dma-iommu.c 		iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
iova              422 drivers/iommu/dma-iommu.c 	return (dma_addr_t)iova << shift;
iova              426 drivers/iommu/dma-iommu.c 		dma_addr_t iova, size_t size)
iova              434 drivers/iommu/dma-iommu.c 		queue_iova(iovad, iova_pfn(iovad, iova),
iova              437 drivers/iommu/dma-iommu.c 		free_iova_fast(iovad, iova_pfn(iovad, iova),
iova              470 drivers/iommu/dma-iommu.c 	dma_addr_t iova;
iova              474 drivers/iommu/dma-iommu.c 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
iova              475 drivers/iommu/dma-iommu.c 	if (!iova)
iova              478 drivers/iommu/dma-iommu.c 	if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
iova              479 drivers/iommu/dma-iommu.c 		iommu_dma_free_iova(cookie, iova, size);
iova              482 drivers/iommu/dma-iommu.c 	return iova + iova_off;
iova              576 drivers/iommu/dma-iommu.c 	dma_addr_t iova;
iova              598 drivers/iommu/dma-iommu.c 	iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
iova              599 drivers/iommu/dma-iommu.c 	if (!iova)
iova              613 drivers/iommu/dma-iommu.c 	if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
iova              622 drivers/iommu/dma-iommu.c 	*dma_handle = iova;
iova              627 drivers/iommu/dma-iommu.c 	__iommu_dma_unmap(dev, iova, size);
iova              631 drivers/iommu/dma-iommu.c 	iommu_dma_free_iova(cookie, iova, size);
iova              818 drivers/iommu/dma-iommu.c 	dma_addr_t iova;
iova              865 drivers/iommu/dma-iommu.c 	iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
iova              866 drivers/iommu/dma-iommu.c 	if (!iova)
iova              873 drivers/iommu/dma-iommu.c 	if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
iova              876 drivers/iommu/dma-iommu.c 	return __finalise_sg(dev, sg, nents, iova);
iova              879 drivers/iommu/dma-iommu.c 	iommu_dma_free_iova(cookie, iova, iova_len);
iova             1143 drivers/iommu/dma-iommu.c 	dma_addr_t iova;
iova             1156 drivers/iommu/dma-iommu.c 	iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
iova             1157 drivers/iommu/dma-iommu.c 	if (!iova)
iova             1160 drivers/iommu/dma-iommu.c 	if (iommu_map(domain, iova, msi_addr, size, prot))
iova             1165 drivers/iommu/dma-iommu.c 	msi_page->iova = iova;
iova             1170 drivers/iommu/dma-iommu.c 	iommu_dma_free_iova(cookie, iova, size);
iova             1216 drivers/iommu/dma-iommu.c 	msg->address_hi = upper_32_bits(msi_page->iova);
iova             1218 drivers/iommu/dma-iommu.c 	msg->address_lo += lower_32_bits(msi_page->iova);
iova               99 drivers/iommu/exynos-iommu.c #define section_offs(iova) (iova & (SECT_SIZE - 1))
iova              101 drivers/iommu/exynos-iommu.c #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
iova              103 drivers/iommu/exynos-iommu.c #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
iova              108 drivers/iommu/exynos-iommu.c static u32 lv1ent_offset(sysmmu_iova_t iova)
iova              110 drivers/iommu/exynos-iommu.c 	return iova >> SECT_ORDER;
iova              113 drivers/iommu/exynos-iommu.c static u32 lv2ent_offset(sysmmu_iova_t iova)
iova              115 drivers/iommu/exynos-iommu.c 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
iova              183 drivers/iommu/exynos-iommu.c static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
iova              185 drivers/iommu/exynos-iommu.c 	return pgtable + lv1ent_offset(iova);
iova              188 drivers/iommu/exynos-iommu.c static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
iova              191 drivers/iommu/exynos-iommu.c 				lv2table_base(sent)) + lv2ent_offset(iova);
iova              316 drivers/iommu/exynos-iommu.c 				sysmmu_iova_t iova, unsigned int num_inv)
iova              322 drivers/iommu/exynos-iommu.c 			writel((iova & SPAGE_MASK) | 1,
iova              324 drivers/iommu/exynos-iommu.c 			iova += SPAGE_SIZE;
iova              328 drivers/iommu/exynos-iommu.c 			writel((iova & SPAGE_MASK) | 1,
iova              331 drivers/iommu/exynos-iommu.c 			writel((iova & SPAGE_MASK),
iova              333 drivers/iommu/exynos-iommu.c 			writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
iova              515 drivers/iommu/exynos-iommu.c 					    sysmmu_iova_t iova)
iova              526 drivers/iommu/exynos-iommu.c 				__sysmmu_tlb_invalidate_entry(data, iova, 1);
iova              535 drivers/iommu/exynos-iommu.c 					sysmmu_iova_t iova, size_t size)
iova              559 drivers/iommu/exynos-iommu.c 			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
iova              919 drivers/iommu/exynos-iommu.c 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
iova              922 drivers/iommu/exynos-iommu.c 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
iova              968 drivers/iommu/exynos-iommu.c 				sysmmu_tlb_invalidate_flpdcache(data, iova);
iova              973 drivers/iommu/exynos-iommu.c 	return page_entry(sent, iova);
iova              977 drivers/iommu/exynos-iommu.c 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
iova              982 drivers/iommu/exynos-iommu.c 			iova);
iova              989 drivers/iommu/exynos-iommu.c 				iova);
iova             1007 drivers/iommu/exynos-iommu.c 			sysmmu_tlb_invalidate_flpdcache(data, iova);
iova             1080 drivers/iommu/exynos-iommu.c 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
iova             1089 drivers/iommu/exynos-iommu.c 	entry = section_entry(domain->pgtable, iova);
iova             1092 drivers/iommu/exynos-iommu.c 		ret = lv1set_section(domain, entry, iova, paddr, prot,
iova             1093 drivers/iommu/exynos-iommu.c 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
iova             1097 drivers/iommu/exynos-iommu.c 		pent = alloc_lv2entry(domain, entry, iova,
iova             1098 drivers/iommu/exynos-iommu.c 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
iova             1104 drivers/iommu/exynos-iommu.c 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
iova             1109 drivers/iommu/exynos-iommu.c 			__func__, ret, size, iova);
iova             1117 drivers/iommu/exynos-iommu.c 					      sysmmu_iova_t iova, size_t size)
iova             1125 drivers/iommu/exynos-iommu.c 		sysmmu_tlb_invalidate_entry(data, iova, size);
iova             1135 drivers/iommu/exynos-iommu.c 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
iova             1144 drivers/iommu/exynos-iommu.c 	ent = section_entry(domain->pgtable, iova);
iova             1166 drivers/iommu/exynos-iommu.c 	ent = page_entry(ent, iova);
iova             1176 drivers/iommu/exynos-iommu.c 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
iova             1194 drivers/iommu/exynos-iommu.c 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
iova             1198 drivers/iommu/exynos-iommu.c 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
iova             1205 drivers/iommu/exynos-iommu.c 		__func__, size, iova, err_pgsize);
iova             1211 drivers/iommu/exynos-iommu.c 					  dma_addr_t iova)
iova             1220 drivers/iommu/exynos-iommu.c 	entry = section_entry(domain->pgtable, iova);
iova             1223 drivers/iommu/exynos-iommu.c 		phys = section_phys(entry) + section_offs(iova);
iova             1225 drivers/iommu/exynos-iommu.c 		entry = page_entry(entry, iova);
iova             1228 drivers/iommu/exynos-iommu.c 			phys = lpage_phys(entry) + lpage_offs(iova);
iova             1230 drivers/iommu/exynos-iommu.c 			phys = spage_phys(entry) + spage_offs(iova);
iova               57 drivers/iommu/fsl_pamu_domain.c static phys_addr_t get_phys_addr(struct fsl_dma_domain *dma_domain, dma_addr_t iova)
iova               76 drivers/iommu/fsl_pamu_domain.c 		subwin_iova = iova & ~(subwin_size - 1);
iova               82 drivers/iommu/fsl_pamu_domain.c 		return win_ptr->paddr + (iova & (win_ptr->size - 1));
iova              275 drivers/iommu/fsl_pamu_domain.c static int check_size(u64 size, dma_addr_t iova)
iova              287 drivers/iommu/fsl_pamu_domain.c 	if (iova & (size - 1)) {
iova              380 drivers/iommu/fsl_pamu_domain.c 					 dma_addr_t iova)
iova              384 drivers/iommu/fsl_pamu_domain.c 	if (iova < domain->geometry.aperture_start ||
iova              385 drivers/iommu/fsl_pamu_domain.c 	    iova > domain->geometry.aperture_end)
iova              388 drivers/iommu/fsl_pamu_domain.c 	return get_phys_addr(dma_domain, iova);
iova              352 drivers/iommu/intel-iommu.c 					    dma_addr_t iova);
iova             1792 drivers/iommu/intel-iommu.c 	struct iova *iova;
iova             1801 drivers/iommu/intel-iommu.c 	iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START),
iova             1803 drivers/iommu/intel-iommu.c 	if (!iova) {
iova             1816 drivers/iommu/intel-iommu.c 			iova = reserve_iova(&reserved_iova_list,
iova             1819 drivers/iommu/intel-iommu.c 			if (!iova) {
iova             4672 drivers/iommu/intel-iommu.c 			struct iova *iova;
iova             4677 drivers/iommu/intel-iommu.c 			iova = find_iova(&si_domain->iovad, start_vpfn);
iova             4678 drivers/iommu/intel-iommu.c 			if (iova == NULL) {
iova             4684 drivers/iommu/intel-iommu.c 			iova = split_and_remove_iova(&si_domain->iovad, iova,
iova             4686 drivers/iommu/intel-iommu.c 			if (iova == NULL) {
iova             4692 drivers/iommu/intel-iommu.c 			freelist = domain_unmap(si_domain, iova->pfn_lo,
iova             4693 drivers/iommu/intel-iommu.c 					       iova->pfn_hi);
iova             4698 drivers/iommu/intel-iommu.c 					iova->pfn_lo, iova_size(iova),
iova             4703 drivers/iommu/intel-iommu.c 			start_vpfn = iova->pfn_hi + 1;
iova             4704 drivers/iommu/intel-iommu.c 			free_iova_mem(iova);
iova             5440 drivers/iommu/intel-iommu.c 			   unsigned long iova, phys_addr_t hpa,
iova             5455 drivers/iommu/intel-iommu.c 	max_addr = iova + size;
iova             5472 drivers/iommu/intel-iommu.c 	ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT,
iova             5478 drivers/iommu/intel-iommu.c 				unsigned long iova, size_t size,
iova             5489 drivers/iommu/intel-iommu.c 	BUG_ON(!pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level));
iova             5494 drivers/iommu/intel-iommu.c 	start_pfn = iova >> VTD_PAGE_SHIFT;
iova             5495 drivers/iommu/intel-iommu.c 	last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
iova             5507 drivers/iommu/intel-iommu.c 	if (dmar_domain->max_addr == iova + size)
iova             5508 drivers/iommu/intel-iommu.c 		dmar_domain->max_addr = iova;
iova             5514 drivers/iommu/intel-iommu.c 					    dma_addr_t iova)
iova             5521 drivers/iommu/intel-iommu.c 	pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
iova             5524 drivers/iommu/intel-iommu.c 			(iova & (BIT_MASK(level_to_offset_bits(level) +
iova              413 drivers/iommu/io-pgtable-arm-v7s.c 			    unsigned long iova, phys_addr_t paddr, int prot,
iova              429 drivers/iommu/io-pgtable-arm-v7s.c 			tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl);
iova              430 drivers/iommu/io-pgtable-arm-v7s.c 			if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
iova              473 drivers/iommu/io-pgtable-arm-v7s.c static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
iova              482 drivers/iommu/io-pgtable-arm-v7s.c 	ptep += ARM_V7S_LVL_IDX(iova, lvl);
iova              486 drivers/iommu/io-pgtable-arm-v7s.c 		return arm_v7s_init_pte(data, iova, paddr, prot,
iova              517 drivers/iommu/io-pgtable-arm-v7s.c 	return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
iova              520 drivers/iommu/io-pgtable-arm-v7s.c static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
iova              531 drivers/iommu/io-pgtable-arm-v7s.c 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
iova              535 drivers/iommu/io-pgtable-arm-v7s.c 	ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
iova              541 drivers/iommu/io-pgtable-arm-v7s.c 		io_pgtable_tlb_flush_walk(iop, iova, size,
iova              568 drivers/iommu/io-pgtable-arm-v7s.c 					unsigned long iova, int idx, int lvl,
iova              589 drivers/iommu/io-pgtable-arm-v7s.c 	io_pgtable_tlb_flush_leaf(iop, iova, size, size);
iova              595 drivers/iommu/io-pgtable-arm-v7s.c 				      unsigned long iova, size_t size,
iova              609 drivers/iommu/io-pgtable-arm-v7s.c 	unmap_idx = ARM_V7S_LVL_IDX(iova, 2);
iova              631 drivers/iommu/io-pgtable-arm-v7s.c 		return __arm_v7s_unmap(data, gather, iova, size, 2, tablep);
iova              634 drivers/iommu/io-pgtable-arm-v7s.c 	io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
iova              640 drivers/iommu/io-pgtable-arm-v7s.c 			      unsigned long iova, size_t size, int lvl,
iova              651 drivers/iommu/io-pgtable-arm-v7s.c 	idx = ARM_V7S_LVL_IDX(iova, lvl);
iova              673 drivers/iommu/io-pgtable-arm-v7s.c 		pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep);
iova              686 drivers/iommu/io-pgtable-arm-v7s.c 				io_pgtable_tlb_flush_walk(iop, iova, blk_size,
iova              698 drivers/iommu/io-pgtable-arm-v7s.c 				io_pgtable_tlb_add_page(iop, gather, iova, blk_size);
iova              700 drivers/iommu/io-pgtable-arm-v7s.c 			iova += blk_size;
iova              708 drivers/iommu/io-pgtable-arm-v7s.c 		return arm_v7s_split_blk_unmap(data, gather, iova, size, pte[0],
iova              714 drivers/iommu/io-pgtable-arm-v7s.c 	return __arm_v7s_unmap(data, gather, iova, size, lvl + 1, ptep);
iova              717 drivers/iommu/io-pgtable-arm-v7s.c static size_t arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
iova              722 drivers/iommu/io-pgtable-arm-v7s.c 	if (WARN_ON(upper_32_bits(iova)))
iova              725 drivers/iommu/io-pgtable-arm-v7s.c 	return __arm_v7s_unmap(data, gather, iova, size, 1, data->pgd);
iova              729 drivers/iommu/io-pgtable-arm-v7s.c 					unsigned long iova)
iova              737 drivers/iommu/io-pgtable-arm-v7s.c 		ptep += ARM_V7S_LVL_IDX(iova, ++lvl);
iova              748 drivers/iommu/io-pgtable-arm-v7s.c 	return iopte_to_paddr(pte, lvl, &data->iop.cfg) | (iova & ~mask);
iova              856 drivers/iommu/io-pgtable-arm-v7s.c static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
iova              864 drivers/iommu/io-pgtable-arm-v7s.c 			       unsigned long iova, size_t granule, void *cookie)
iova              866 drivers/iommu/io-pgtable-arm-v7s.c 	dummy_tlb_flush(iova, granule, granule, cookie);
iova              893 drivers/iommu/io-pgtable-arm-v7s.c 	unsigned int iova, size, iova_start;
iova              922 drivers/iommu/io-pgtable-arm-v7s.c 	iova = 0;
iova              925 drivers/iommu/io-pgtable-arm-v7s.c 		if (ops->map(ops, iova, iova, size, IOMMU_READ |
iova              932 drivers/iommu/io-pgtable-arm-v7s.c 		if (!ops->map(ops, iova, iova + size, size,
iova              936 drivers/iommu/io-pgtable-arm-v7s.c 		if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
iova              939 drivers/iommu/io-pgtable-arm-v7s.c 		iova += SZ_16M;
iova              962 drivers/iommu/io-pgtable-arm-v7s.c 	iova = 0;
iova              966 drivers/iommu/io-pgtable-arm-v7s.c 		if (ops->unmap(ops, iova, size, NULL) != size)
iova              969 drivers/iommu/io-pgtable-arm-v7s.c 		if (ops->iova_to_phys(ops, iova + 42))
iova              973 drivers/iommu/io-pgtable-arm-v7s.c 		if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
iova              976 drivers/iommu/io-pgtable-arm-v7s.c 		if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
iova              979 drivers/iommu/io-pgtable-arm-v7s.c 		iova += SZ_16M;
iova              296 drivers/iommu/io-pgtable-arm.c 			       unsigned long iova, size_t size, int lvl,
iova              322 drivers/iommu/io-pgtable-arm.c 			     unsigned long iova, phys_addr_t paddr,
iova              340 drivers/iommu/io-pgtable-arm.c 		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
iova              341 drivers/iommu/io-pgtable-arm.c 		if (__arm_lpae_unmap(data, NULL, iova, sz, lvl, tblp) != sz) {
iova              382 drivers/iommu/io-pgtable-arm.c static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
iova              392 drivers/iommu/io-pgtable-arm.c 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
iova              396 drivers/iommu/io-pgtable-arm.c 		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
iova              425 drivers/iommu/io-pgtable-arm.c 	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
iova              478 drivers/iommu/io-pgtable-arm.c static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
iova              490 drivers/iommu/io-pgtable-arm.c 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
iova              495 drivers/iommu/io-pgtable-arm.c 	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
iova              546 drivers/iommu/io-pgtable-arm.c 				       unsigned long iova, size_t size,
iova              565 drivers/iommu/io-pgtable-arm.c 		unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
iova              591 drivers/iommu/io-pgtable-arm.c 		io_pgtable_tlb_add_page(&data->iop, gather, iova, size);
iova              595 drivers/iommu/io-pgtable-arm.c 	return __arm_lpae_unmap(data, gather, iova, size, lvl, tablep);
iova              600 drivers/iommu/io-pgtable-arm.c 			       unsigned long iova, size_t size, int lvl,
iova              610 drivers/iommu/io-pgtable-arm.c 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
iova              621 drivers/iommu/io-pgtable-arm.c 			io_pgtable_tlb_flush_walk(iop, iova, size,
iova              633 drivers/iommu/io-pgtable-arm.c 			io_pgtable_tlb_add_page(iop, gather, iova, size);
iova              642 drivers/iommu/io-pgtable-arm.c 		return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
iova              648 drivers/iommu/io-pgtable-arm.c 	return __arm_lpae_unmap(data, gather, iova, size, lvl + 1, ptep);
iova              651 drivers/iommu/io-pgtable-arm.c static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
iova              658 drivers/iommu/io-pgtable-arm.c 	if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
iova              661 drivers/iommu/io-pgtable-arm.c 	return __arm_lpae_unmap(data, gather, iova, size, lvl, ptep);
iova              665 drivers/iommu/io-pgtable-arm.c 					 unsigned long iova)
iova              677 drivers/iommu/io-pgtable-arm.c 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
iova              696 drivers/iommu/io-pgtable-arm.c 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
iova              697 drivers/iommu/io-pgtable-arm.c 	return iopte_to_paddr(pte, data) | iova;
iova             1107 drivers/iommu/io-pgtable-arm.c static void dummy_tlb_flush(unsigned long iova, size_t size, size_t granule,
iova             1115 drivers/iommu/io-pgtable-arm.c 			       unsigned long iova, size_t granule, void *cookie)
iova             1117 drivers/iommu/io-pgtable-arm.c 	dummy_tlb_flush(iova, granule, granule, cookie);
iova             1154 drivers/iommu/io-pgtable-arm.c 	unsigned long iova;
iova             1184 drivers/iommu/io-pgtable-arm.c 		iova = 0;
iova             1188 drivers/iommu/io-pgtable-arm.c 			if (ops->map(ops, iova, iova, size, IOMMU_READ |
iova             1195 drivers/iommu/io-pgtable-arm.c 			if (!ops->map(ops, iova, iova + size, size,
iova             1199 drivers/iommu/io-pgtable-arm.c 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
iova             1202 drivers/iommu/io-pgtable-arm.c 			iova += SZ_1G;
iova             1218 drivers/iommu/io-pgtable-arm.c 		iova = 0;
iova             1222 drivers/iommu/io-pgtable-arm.c 			if (ops->unmap(ops, iova, size, NULL) != size)
iova             1225 drivers/iommu/io-pgtable-arm.c 			if (ops->iova_to_phys(ops, iova + 42))
iova             1229 drivers/iommu/io-pgtable-arm.c 			if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
iova             1232 drivers/iommu/io-pgtable-arm.c 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
iova             1235 drivers/iommu/io-pgtable-arm.c 			iova += SZ_1G;
iova             1817 drivers/iommu/iommu.c phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
iova             1822 drivers/iommu/iommu.c 	return domain->ops->iova_to_phys(domain, iova);
iova             1858 drivers/iommu/iommu.c int iommu_map(struct iommu_domain *domain, unsigned long iova,
iova             1862 drivers/iommu/iommu.c 	unsigned long orig_iova = iova;
iova             1883 drivers/iommu/iommu.c 	if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
iova             1885 drivers/iommu/iommu.c 		       iova, &paddr, size, min_pagesz);
iova             1889 drivers/iommu/iommu.c 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
iova             1892 drivers/iommu/iommu.c 		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
iova             1895 drivers/iommu/iommu.c 			 iova, &paddr, pgsize);
iova             1897 drivers/iommu/iommu.c 		ret = ops->map(domain, iova, paddr, pgsize, prot);
iova             1901 drivers/iommu/iommu.c 		iova += pgsize;
iova             1920 drivers/iommu/iommu.c 			    unsigned long iova, size_t size,
iova             1925 drivers/iommu/iommu.c 	unsigned long orig_iova = iova;
iova             1943 drivers/iommu/iommu.c 	if (!IS_ALIGNED(iova | size, min_pagesz)) {
iova             1945 drivers/iommu/iommu.c 		       iova, size, min_pagesz);
iova             1949 drivers/iommu/iommu.c 	pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
iova             1956 drivers/iommu/iommu.c 		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
iova             1958 drivers/iommu/iommu.c 		unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
iova             1963 drivers/iommu/iommu.c 			 iova, unmapped_page);
iova             1965 drivers/iommu/iommu.c 		iova += unmapped_page;
iova             1974 drivers/iommu/iommu.c 		   unsigned long iova, size_t size)
iova             1980 drivers/iommu/iommu.c 	ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
iova             1988 drivers/iommu/iommu.c 			unsigned long iova, size_t size,
iova             1991 drivers/iommu/iommu.c 	return __iommu_unmap(domain, iova, size, iotlb_gather);
iova             1995 drivers/iommu/iommu.c size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
iova             2007 drivers/iommu/iommu.c 			ret = iommu_map(domain, iova + mapped, start, len, prot);
iova             2030 drivers/iommu/iommu.c 	iommu_unmap(domain, iova, mapped);
iova             2082 drivers/iommu/iommu.c 		       unsigned long iova, int flags)
iova             2091 drivers/iommu/iommu.c 		ret = domain->handler(domain, dev, iova, flags,
iova             2094 drivers/iommu/iommu.c 	trace_io_page_fault(dev, iova, flags);
iova              126 drivers/iommu/iova.c __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
iova              135 drivers/iommu/iova.c __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
iova              137 drivers/iommu/iova.c 	struct iova *cached_iova;
iova              139 drivers/iommu/iova.c 	cached_iova = rb_entry(iovad->cached32_node, struct iova, node);
iova              147 drivers/iommu/iova.c 	cached_iova = rb_entry(iovad->cached_node, struct iova, node);
iova              154 drivers/iommu/iova.c iova_insert_rbtree(struct rb_root *root, struct iova *iova,
iova              162 drivers/iommu/iova.c 		struct iova *this = rb_entry(*new, struct iova, node);
iova              166 drivers/iommu/iova.c 		if (iova->pfn_lo < this->pfn_lo)
iova              168 drivers/iommu/iova.c 		else if (iova->pfn_lo > this->pfn_lo)
iova              176 drivers/iommu/iova.c 	rb_link_node(&iova->node, parent, new);
iova              177 drivers/iommu/iova.c 	rb_insert_color(&iova->node, root);
iova              182 drivers/iommu/iova.c 			struct iova *new, bool size_aligned)
iova              185 drivers/iommu/iova.c 	struct iova *curr_iova;
iova              200 drivers/iommu/iova.c 	curr_iova = rb_entry(curr, struct iova, node);
iova              206 drivers/iommu/iova.c 		curr_iova = rb_entry(curr, struct iova, node);
iova              234 drivers/iommu/iova.c struct iova *alloc_iova_mem(void)
iova              240 drivers/iommu/iova.c void free_iova_mem(struct iova *iova)
iova              242 drivers/iommu/iova.c 	if (iova->pfn_lo != IOVA_ANCHOR)
iova              243 drivers/iommu/iova.c 		kmem_cache_free(iova_cache, iova);
iova              252 drivers/iommu/iova.c 			"iommu_iova", sizeof(struct iova), 0,
iova              293 drivers/iommu/iova.c struct iova *
iova              298 drivers/iommu/iova.c 	struct iova *new_iova;
iova              317 drivers/iommu/iova.c static struct iova *
iova              325 drivers/iommu/iova.c 		struct iova *iova = rb_entry(node, struct iova, node);
iova              327 drivers/iommu/iova.c 		if (pfn < iova->pfn_lo)
iova              329 drivers/iommu/iova.c 		else if (pfn > iova->pfn_hi)
iova              332 drivers/iommu/iova.c 			return iova;	/* pfn falls within iova's range */
iova              338 drivers/iommu/iova.c static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
iova              341 drivers/iommu/iova.c 	__cached_rbnode_delete_update(iovad, iova);
iova              342 drivers/iommu/iova.c 	rb_erase(&iova->node, &iovad->rbroot);
iova              343 drivers/iommu/iova.c 	free_iova_mem(iova);
iova              353 drivers/iommu/iova.c struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
iova              356 drivers/iommu/iova.c 	struct iova *iova;
iova              360 drivers/iommu/iova.c 	iova = private_find_iova(iovad, pfn);
iova              362 drivers/iommu/iova.c 	return iova;
iova              373 drivers/iommu/iova.c __free_iova(struct iova_domain *iovad, struct iova *iova)
iova              378 drivers/iommu/iova.c 	private_free_iova(iovad, iova);
iova              393 drivers/iommu/iova.c 	struct iova *iova = find_iova(iovad, pfn);
iova              395 drivers/iommu/iova.c 	if (iova)
iova              396 drivers/iommu/iova.c 		__free_iova(iovad, iova);
iova              416 drivers/iommu/iova.c 	struct iova *new_iova;
iova              595 drivers/iommu/iova.c 	struct iova *iova, *tmp;
iova              599 drivers/iommu/iova.c 	rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
iova              600 drivers/iommu/iova.c 		free_iova_mem(iova);
iova              608 drivers/iommu/iova.c 	struct iova *iova = rb_entry(node, struct iova, node);
iova              610 drivers/iommu/iova.c 	if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
iova              615 drivers/iommu/iova.c static inline struct iova *
iova              618 drivers/iommu/iova.c 	struct iova *iova;
iova              620 drivers/iommu/iova.c 	iova = alloc_iova_mem();
iova              621 drivers/iommu/iova.c 	if (iova) {
iova              622 drivers/iommu/iova.c 		iova->pfn_lo = pfn_lo;
iova              623 drivers/iommu/iova.c 		iova->pfn_hi = pfn_hi;
iova              626 drivers/iommu/iova.c 	return iova;
iova              629 drivers/iommu/iova.c static struct iova *
iova              633 drivers/iommu/iova.c 	struct iova *iova;
iova              635 drivers/iommu/iova.c 	iova = alloc_and_init_iova(pfn_lo, pfn_hi);
iova              636 drivers/iommu/iova.c 	if (iova)
iova              637 drivers/iommu/iova.c 		iova_insert_rbtree(&iovad->rbroot, iova, NULL);
iova              639 drivers/iommu/iova.c 	return iova;
iova              643 drivers/iommu/iova.c __adjust_overlap_range(struct iova *iova,
iova              646 drivers/iommu/iova.c 	if (*pfn_lo < iova->pfn_lo)
iova              647 drivers/iommu/iova.c 		iova->pfn_lo = *pfn_lo;
iova              648 drivers/iommu/iova.c 	if (*pfn_hi > iova->pfn_hi)
iova              649 drivers/iommu/iova.c 		*pfn_lo = iova->pfn_hi + 1;
iova              660 drivers/iommu/iova.c struct iova *
iova              666 drivers/iommu/iova.c 	struct iova *iova;
iova              676 drivers/iommu/iova.c 			iova = rb_entry(node, struct iova, node);
iova              677 drivers/iommu/iova.c 			__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
iova              678 drivers/iommu/iova.c 			if ((pfn_lo >= iova->pfn_lo) &&
iova              679 drivers/iommu/iova.c 				(pfn_hi <= iova->pfn_hi))
iova              690 drivers/iommu/iova.c 	iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
iova              694 drivers/iommu/iova.c 	return iova;
iova              713 drivers/iommu/iova.c 		struct iova *iova = rb_entry(node, struct iova, node);
iova              714 drivers/iommu/iova.c 		struct iova *new_iova;
iova              716 drivers/iommu/iova.c 		if (iova->pfn_lo == IOVA_ANCHOR)
iova              719 drivers/iommu/iova.c 		new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
iova              722 drivers/iommu/iova.c 				iova->pfn_lo, iova->pfn_lo);
iova              728 drivers/iommu/iova.c struct iova *
iova              729 drivers/iommu/iova.c split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
iova              733 drivers/iommu/iova.c 	struct iova *prev = NULL, *next = NULL;
iova              736 drivers/iommu/iova.c 	if (iova->pfn_lo < pfn_lo) {
iova              737 drivers/iommu/iova.c 		prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1);
iova              741 drivers/iommu/iova.c 	if (iova->pfn_hi > pfn_hi) {
iova              742 drivers/iommu/iova.c 		next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi);
iova              747 drivers/iommu/iova.c 	__cached_rbnode_delete_update(iovad, iova);
iova              748 drivers/iommu/iova.c 	rb_erase(&iova->node, &iovad->rbroot);
iova              752 drivers/iommu/iova.c 		iova->pfn_lo = pfn_lo;
iova              756 drivers/iommu/iova.c 		iova->pfn_hi = pfn_hi;
iova              760 drivers/iommu/iova.c 	return iova;
iova              812 drivers/iommu/iova.c 		struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
iova              814 drivers/iommu/iova.c 		BUG_ON(!iova);
iova              815 drivers/iommu/iova.c 		private_free_iova(iovad, iova);
iova              364 drivers/iommu/ipmmu-vmsa.c static void ipmmu_tlb_flush(unsigned long iova, size_t size,
iova              540 drivers/iommu/ipmmu-vmsa.c 	unsigned long iova;
iova              547 drivers/iommu/ipmmu-vmsa.c 	iova = ipmmu_ctx_read_root(domain, IMELAR);
iova              549 drivers/iommu/ipmmu-vmsa.c 		iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
iova              562 drivers/iommu/ipmmu-vmsa.c 				    iova);
iova              565 drivers/iommu/ipmmu-vmsa.c 				    iova);
iova              576 drivers/iommu/ipmmu-vmsa.c 	if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
iova              581 drivers/iommu/ipmmu-vmsa.c 			    status, iova);
iova              726 drivers/iommu/ipmmu-vmsa.c static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
iova              734 drivers/iommu/ipmmu-vmsa.c 	return domain->iop->map(domain->iop, iova, paddr, size, prot);
iova              737 drivers/iommu/ipmmu-vmsa.c static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
iova              742 drivers/iommu/ipmmu-vmsa.c 	return domain->iop->unmap(domain->iop, iova, size, gather);
iova              760 drivers/iommu/ipmmu-vmsa.c 				      dma_addr_t iova)
iova              766 drivers/iommu/ipmmu-vmsa.c 	return domain->iop->iova_to_phys(domain->iop, iova);
iova              139 drivers/iommu/msm_iommu.c static void __flush_iotlb_range(unsigned long iova, size_t size,
iova              156 drivers/iommu/msm_iommu.c 				iova &= TLBIVA_VA;
iova              157 drivers/iommu/msm_iommu.c 				iova |= GET_CONTEXTIDR_ASID(iommu->base,
iova              159 drivers/iommu/msm_iommu.c 				SET_TLBIVA(iommu->base, master->num, iova);
iova              160 drivers/iommu/msm_iommu.c 				iova += granule;
iova              171 drivers/iommu/msm_iommu.c static void __flush_iotlb_walk(unsigned long iova, size_t size,
iova              174 drivers/iommu/msm_iommu.c 	__flush_iotlb_range(iova, size, granule, false, cookie);
iova              177 drivers/iommu/msm_iommu.c static void __flush_iotlb_leaf(unsigned long iova, size_t size,
iova              180 drivers/iommu/msm_iommu.c 	__flush_iotlb_range(iova, size, granule, true, cookie);
iova              184 drivers/iommu/msm_iommu.c 			       unsigned long iova, size_t granule, void *cookie)
iova              186 drivers/iommu/msm_iommu.c 	__flush_iotlb_range(iova, granule, granule, true, cookie);
iova              506 drivers/iommu/msm_iommu.c static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              514 drivers/iommu/msm_iommu.c 	ret = priv->iop->map(priv->iop, iova, pa, len, prot);
iova              520 drivers/iommu/msm_iommu.c static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
iova              527 drivers/iommu/msm_iommu.c 	len = priv->iop->unmap(priv->iop, iova, len, gather);
iova              176 drivers/iommu/mtk_iommu.c static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
iova              186 drivers/iommu/mtk_iommu.c 		writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
iova              187 drivers/iommu/mtk_iommu.c 		writel_relaxed(iova + size - 1,
iova              219 drivers/iommu/mtk_iommu.c static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
iova              226 drivers/iommu/mtk_iommu.c 	mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
iova              231 drivers/iommu/mtk_iommu.c static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
iova              238 drivers/iommu/mtk_iommu.c 	mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
iova              244 drivers/iommu/mtk_iommu.c 					    unsigned long iova, size_t granule,
iova              251 drivers/iommu/mtk_iommu.c 	mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
iova              429 drivers/iommu/mtk_iommu.c static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              442 drivers/iommu/mtk_iommu.c 	ret = dom->iop->map(dom->iop, iova, paddr, size, prot);
iova              449 drivers/iommu/mtk_iommu.c 			      unsigned long iova, size_t size,
iova              457 drivers/iommu/mtk_iommu.c 	unmapsz = dom->iop->unmap(dom->iop, iova, size, gather);
iova              480 drivers/iommu/mtk_iommu.c 					  dma_addr_t iova)
iova              488 drivers/iommu/mtk_iommu.c 	pa = dom->iop->iova_to_phys(dom->iop, iova);
iova              136 drivers/iommu/mtk_iommu_v1.c 				unsigned long iova, size_t size)
iova              143 drivers/iommu/mtk_iommu_v1.c 	writel_relaxed(iova & F_MMU_FAULT_VA_MSK,
iova              145 drivers/iommu/mtk_iommu_v1.c 	writel_relaxed((iova + size - 1) & F_MMU_FAULT_VA_MSK,
iova              297 drivers/iommu/mtk_iommu_v1.c static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              304 drivers/iommu/mtk_iommu_v1.c 	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
iova              321 drivers/iommu/mtk_iommu_v1.c 	mtk_iommu_tlb_flush_range(dom->data, iova, size);
iova              327 drivers/iommu/mtk_iommu_v1.c 			      unsigned long iova, size_t size,
iova              332 drivers/iommu/mtk_iommu_v1.c 	u32 *pgt_base_iova = dom->pgt_va + (iova  >> MT2701_IOMMU_PAGE_SHIFT);
iova              339 drivers/iommu/mtk_iommu_v1.c 	mtk_iommu_tlb_flush_range(dom->data, iova, size);
iova              345 drivers/iommu/mtk_iommu_v1.c 					  dma_addr_t iova)
iova              352 drivers/iommu/mtk_iommu_v1.c 	pa = *(dom->pgt_va + (iova >> MT2701_IOMMU_PAGE_SHIFT));
iova              147 drivers/iommu/qcom_iommu.c static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
iova              159 drivers/iommu/qcom_iommu.c 		iova = (iova >> 12) << 12;
iova              160 drivers/iommu/qcom_iommu.c 		iova |= ctx->asid;
iova              162 drivers/iommu/qcom_iommu.c 			iommu_writel(ctx, reg, iova);
iova              163 drivers/iommu/qcom_iommu.c 			iova += granule;
iova              168 drivers/iommu/qcom_iommu.c static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size,
iova              171 drivers/iommu/qcom_iommu.c 	qcom_iommu_tlb_inv_range_nosync(iova, size, granule, false, cookie);
iova              175 drivers/iommu/qcom_iommu.c static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
iova              178 drivers/iommu/qcom_iommu.c 	qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie);
iova              183 drivers/iommu/qcom_iommu.c 				    unsigned long iova, size_t granule,
iova              186 drivers/iommu/qcom_iommu.c 	qcom_iommu_tlb_inv_range_nosync(iova, granule, granule, true, cookie);
iova              200 drivers/iommu/qcom_iommu.c 	u64 iova;
iova              208 drivers/iommu/qcom_iommu.c 	iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
iova              210 drivers/iommu/qcom_iommu.c 	if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
iova              214 drivers/iommu/qcom_iommu.c 				    fsr, iova, fsynr, ctx->asid);
iova              421 drivers/iommu/qcom_iommu.c static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              433 drivers/iommu/qcom_iommu.c 	ret = ops->map(ops, iova, paddr, size, prot);
iova              438 drivers/iommu/qcom_iommu.c static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
iova              456 drivers/iommu/qcom_iommu.c 	ret = ops->unmap(ops, iova, size, gather);
iova              483 drivers/iommu/qcom_iommu.c 					   dma_addr_t iova)
iova              494 drivers/iommu/qcom_iommu.c 	ret = ops->iova_to_phys(ops, iova);
iova              260 drivers/iommu/rockchip-iommu.c static u32 rk_iova_dte_index(dma_addr_t iova)
iova              262 drivers/iommu/rockchip-iommu.c 	return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
iova              265 drivers/iommu/rockchip-iommu.c static u32 rk_iova_pte_index(dma_addr_t iova)
iova              267 drivers/iommu/rockchip-iommu.c 	return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
iova              270 drivers/iommu/rockchip-iommu.c static u32 rk_iova_page_offset(dma_addr_t iova)
iova              272 drivers/iommu/rockchip-iommu.c 	return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
iova              307 drivers/iommu/rockchip-iommu.c 		dma_addr_t iova;
iova              309 drivers/iommu/rockchip-iommu.c 		for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE)
iova              310 drivers/iommu/rockchip-iommu.c 			rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
iova              473 drivers/iommu/rockchip-iommu.c static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
iova              487 drivers/iommu/rockchip-iommu.c 	dte_index = rk_iova_dte_index(iova);
iova              488 drivers/iommu/rockchip-iommu.c 	pte_index = rk_iova_pte_index(iova);
iova              489 drivers/iommu/rockchip-iommu.c 	page_offset = rk_iova_page_offset(iova);
iova              513 drivers/iommu/rockchip-iommu.c 		&iova, dte_index, pte_index, page_offset);
iova              525 drivers/iommu/rockchip-iommu.c 	dma_addr_t iova;
iova              542 drivers/iommu/rockchip-iommu.c 		iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
iova              552 drivers/iommu/rockchip-iommu.c 				&iova,
iova              555 drivers/iommu/rockchip-iommu.c 			log_iova(iommu, i, iova);
iova              563 drivers/iommu/rockchip-iommu.c 				report_iommu_fault(iommu->domain, iommu->dev, iova,
iova              573 drivers/iommu/rockchip-iommu.c 			dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
iova              590 drivers/iommu/rockchip-iommu.c 					 dma_addr_t iova)
iova              600 drivers/iommu/rockchip-iommu.c 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
iova              606 drivers/iommu/rockchip-iommu.c 	pte = page_table[rk_iova_pte_index(iova)];
iova              610 drivers/iommu/rockchip-iommu.c 	phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
iova              618 drivers/iommu/rockchip-iommu.c 			      dma_addr_t iova, size_t size)
iova              638 drivers/iommu/rockchip-iommu.c 			rk_iommu_zap_lines(iommu, iova, size);
iova              647 drivers/iommu/rockchip-iommu.c 					 dma_addr_t iova, size_t size)
iova              649 drivers/iommu/rockchip-iommu.c 	rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
iova              651 drivers/iommu/rockchip-iommu.c 		rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE,
iova              656 drivers/iommu/rockchip-iommu.c 				  dma_addr_t iova)
iova              665 drivers/iommu/rockchip-iommu.c 	dte_index = rk_iova_dte_index(iova);
iova              716 drivers/iommu/rockchip-iommu.c 			     dma_addr_t pte_dma, dma_addr_t iova,
iova              744 drivers/iommu/rockchip-iommu.c 	rk_iommu_zap_iova_first_last(rk_domain, iova, size);
iova              752 drivers/iommu/rockchip-iommu.c 	iova += pte_count * SPAGE_SIZE;
iova              755 drivers/iommu/rockchip-iommu.c 	       &iova, &page_phys, &paddr, prot);
iova              765 drivers/iommu/rockchip-iommu.c 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
iova              779 drivers/iommu/rockchip-iommu.c 	page_table = rk_dte_get_page_table(rk_domain, iova);
iova              785 drivers/iommu/rockchip-iommu.c 	dte_index = rk_domain->dt[rk_iova_dte_index(iova)];
iova              786 drivers/iommu/rockchip-iommu.c 	pte_index = rk_iova_pte_index(iova);
iova              789 drivers/iommu/rockchip-iommu.c 	ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova,
iova              802 drivers/iommu/rockchip-iommu.c 	dma_addr_t pte_dma, iova = (dma_addr_t)_iova;
iova              817 drivers/iommu/rockchip-iommu.c 	dte = rk_domain->dt[rk_iova_dte_index(iova)];
iova              825 drivers/iommu/rockchip-iommu.c 	pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
iova              826 drivers/iommu/rockchip-iommu.c 	pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32);
iova              832 drivers/iommu/rockchip-iommu.c 	rk_iommu_zap_iova(rk_domain, iova, unmap_size);
iova              267 drivers/iommu/s390-iommu.c static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              279 drivers/iommu/s390-iommu.c 	rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
iova              286 drivers/iommu/s390-iommu.c 					   dma_addr_t iova)
iova              293 drivers/iommu/s390-iommu.c 	if (iova < domain->geometry.aperture_start ||
iova              294 drivers/iommu/s390-iommu.c 	    iova > domain->geometry.aperture_end)
iova              297 drivers/iommu/s390-iommu.c 	rtx = calc_rtx(iova);
iova              298 drivers/iommu/s390-iommu.c 	sx = calc_sx(iova);
iova              299 drivers/iommu/s390-iommu.c 	px = calc_px(iova);
iova              317 drivers/iommu/s390-iommu.c 			       unsigned long iova, size_t size,
iova              325 drivers/iommu/s390-iommu.c 	paddr = s390_iommu_iova_to_phys(domain, iova);
iova              329 drivers/iommu/s390-iommu.c 	rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova,
iova               60 drivers/iommu/tegra-gart.c #define for_each_gart_pte(gart, iova)					\
iova               61 drivers/iommu/tegra-gart.c 	for (iova = gart->iovmm_base;					\
iova               62 drivers/iommu/tegra-gart.c 	     iova < gart->iovmm_end;					\
iova               63 drivers/iommu/tegra-gart.c 	     iova += GART_PAGE_SIZE)
iova               66 drivers/iommu/tegra-gart.c 				unsigned long iova, unsigned long pte)
iova               68 drivers/iommu/tegra-gart.c 	writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
iova               73 drivers/iommu/tegra-gart.c 					  unsigned long iova)
iova               77 drivers/iommu/tegra-gart.c 	writel_relaxed(iova, gart->regs + GART_ENTRY_ADDR);
iova               85 drivers/iommu/tegra-gart.c 	unsigned long iova;
iova               87 drivers/iommu/tegra-gart.c 	for_each_gart_pte(gart, iova)
iova               88 drivers/iommu/tegra-gart.c 		gart_set_pte(gart, iova, data ? *(data++) : 0);
iova               95 drivers/iommu/tegra-gart.c 					   unsigned long iova, size_t bytes)
iova               97 drivers/iommu/tegra-gart.c 	return unlikely(iova < gart->iovmm_base || bytes != GART_PAGE_SIZE ||
iova               98 drivers/iommu/tegra-gart.c 			iova + bytes > gart->iovmm_end);
iova              101 drivers/iommu/tegra-gart.c static inline bool gart_pte_valid(struct gart_device *gart, unsigned long iova)
iova              103 drivers/iommu/tegra-gart.c 	return !!(gart_read_pte(gart, iova) & GART_ENTRY_PHYS_ADDR_VALID);
iova              167 drivers/iommu/tegra-gart.c static inline int __gart_iommu_map(struct gart_device *gart, unsigned long iova,
iova              170 drivers/iommu/tegra-gart.c 	if (unlikely(gart_debug && gart_pte_valid(gart, iova))) {
iova              175 drivers/iommu/tegra-gart.c 	gart_set_pte(gart, iova, GART_ENTRY_PHYS_ADDR_VALID | pa);
iova              180 drivers/iommu/tegra-gart.c static int gart_iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              186 drivers/iommu/tegra-gart.c 	if (gart_iova_range_invalid(gart, iova, bytes))
iova              190 drivers/iommu/tegra-gart.c 	ret = __gart_iommu_map(gart, iova, (unsigned long)pa);
iova              197 drivers/iommu/tegra-gart.c 				     unsigned long iova)
iova              199 drivers/iommu/tegra-gart.c 	if (unlikely(gart_debug && !gart_pte_valid(gart, iova))) {
iova              204 drivers/iommu/tegra-gart.c 	gart_set_pte(gart, iova, 0);
iova              209 drivers/iommu/tegra-gart.c static size_t gart_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
iova              215 drivers/iommu/tegra-gart.c 	if (gart_iova_range_invalid(gart, iova, bytes))
iova              219 drivers/iommu/tegra-gart.c 	err = __gart_iommu_unmap(gart, iova);
iova              226 drivers/iommu/tegra-gart.c 					   dma_addr_t iova)
iova              231 drivers/iommu/tegra-gart.c 	if (gart_iova_range_invalid(gart, iova, GART_PAGE_SIZE))
iova              235 drivers/iommu/tegra-gart.c 	pte = gart_read_pte(gart, iova);
iova              308 drivers/iommu/tegra-gart.c 	unsigned long iova;
iova              318 drivers/iommu/tegra-gart.c 	for_each_gart_pte(gart, iova)
iova              319 drivers/iommu/tegra-gart.c 		*(data++) = gart_read_pte(gart, iova);
iova              146 drivers/iommu/tegra-smmu.c static unsigned int iova_pd_index(unsigned long iova)
iova              148 drivers/iommu/tegra-smmu.c 	return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1);
iova              151 drivers/iommu/tegra-smmu.c static unsigned int iova_pt_index(unsigned long iova)
iova              153 drivers/iommu/tegra-smmu.c 	return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1);
iova              213 drivers/iommu/tegra-smmu.c 					  unsigned long iova)
iova              222 drivers/iommu/tegra-smmu.c 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_SECTION(iova);
iova              228 drivers/iommu/tegra-smmu.c 					unsigned long iova)
iova              237 drivers/iommu/tegra-smmu.c 	value |= SMMU_TLB_FLUSH_ASID_MATCH | SMMU_TLB_FLUSH_VA_GROUP(iova);
iova              520 drivers/iommu/tegra-smmu.c static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
iova              523 drivers/iommu/tegra-smmu.c 	unsigned int pd_index = iova_pd_index(iova);
iova              537 drivers/iommu/tegra-smmu.c 	smmu_flush_tlb_section(smmu, as->id, iova);
iova              541 drivers/iommu/tegra-smmu.c static u32 *tegra_smmu_pte_offset(struct page *pt_page, unsigned long iova)
iova              545 drivers/iommu/tegra-smmu.c 	return pt + iova_pt_index(iova);
iova              548 drivers/iommu/tegra-smmu.c static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova,
iova              551 drivers/iommu/tegra-smmu.c 	unsigned int pd_index = iova_pd_index(iova);
iova              563 drivers/iommu/tegra-smmu.c 	return tegra_smmu_pte_offset(pt_page, iova);
iova              566 drivers/iommu/tegra-smmu.c static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova,
iova              569 drivers/iommu/tegra-smmu.c 	unsigned int pde = iova_pd_index(iova);
iova              596 drivers/iommu/tegra-smmu.c 		tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR |
iova              606 drivers/iommu/tegra-smmu.c 	return tegra_smmu_pte_offset(as->pts[pde], iova);
iova              609 drivers/iommu/tegra-smmu.c static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova)
iova              611 drivers/iommu/tegra-smmu.c 	unsigned int pd_index = iova_pd_index(iova);
iova              616 drivers/iommu/tegra-smmu.c static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova)
iova              618 drivers/iommu/tegra-smmu.c 	unsigned int pde = iova_pd_index(iova);
iova              630 drivers/iommu/tegra-smmu.c 		tegra_smmu_set_pde(as, iova, 0);
iova              638 drivers/iommu/tegra-smmu.c static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova,
iova              649 drivers/iommu/tegra-smmu.c 	smmu_flush_tlb_group(smmu, as->id, iova);
iova              653 drivers/iommu/tegra-smmu.c static int tegra_smmu_map(struct iommu_domain *domain, unsigned long iova,
iova              661 drivers/iommu/tegra-smmu.c 	pte = as_get_pte(as, iova, &pte_dma);
iova              667 drivers/iommu/tegra-smmu.c 		tegra_smmu_pte_get_use(as, iova);
iova              677 drivers/iommu/tegra-smmu.c 	tegra_smmu_set_pte(as, iova, pte, pte_dma,
iova              683 drivers/iommu/tegra-smmu.c static size_t tegra_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
iova              690 drivers/iommu/tegra-smmu.c 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
iova              694 drivers/iommu/tegra-smmu.c 	tegra_smmu_set_pte(as, iova, pte, pte_dma, 0);
iova              695 drivers/iommu/tegra-smmu.c 	tegra_smmu_pte_put_use(as, iova);
iova              701 drivers/iommu/tegra-smmu.c 					   dma_addr_t iova)
iova              708 drivers/iommu/tegra-smmu.c 	pte = tegra_smmu_pte_lookup(as, iova, &pte_dma);
iova               59 drivers/iommu/virtio-iommu.c 	struct interval_tree_node	iova;
iova              314 drivers/iommu/virtio-iommu.c static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
iova              325 drivers/iommu/virtio-iommu.c 	mapping->iova.start	= iova;
iova              326 drivers/iommu/virtio-iommu.c 	mapping->iova.last	= iova + size - 1;
iova              330 drivers/iommu/virtio-iommu.c 	interval_tree_insert(&mapping->iova, &vdomain->mappings);
iova              347 drivers/iommu/virtio-iommu.c 				  unsigned long iova, size_t size)
iova              351 drivers/iommu/virtio-iommu.c 	unsigned long last = iova + size - 1;
iova              356 drivers/iommu/virtio-iommu.c 	next = interval_tree_iter_first(&vdomain->mappings, iova, last);
iova              359 drivers/iommu/virtio-iommu.c 		mapping = container_of(node, struct viommu_mapping, iova);
iova              360 drivers/iommu/virtio-iommu.c 		next = interval_tree_iter_next(node, iova, last);
iova              363 drivers/iommu/virtio-iommu.c 		if (mapping->iova.start < iova)
iova              370 drivers/iommu/virtio-iommu.c 		unmapped += mapping->iova.last - mapping->iova.start + 1;
iova              398 drivers/iommu/virtio-iommu.c 		mapping = container_of(node, struct viommu_mapping, iova);
iova              402 drivers/iommu/virtio-iommu.c 			.virt_start	= cpu_to_le64(mapping->iova.start),
iova              403 drivers/iommu/virtio-iommu.c 			.virt_end	= cpu_to_le64(mapping->iova.last),
iova              717 drivers/iommu/virtio-iommu.c static int viommu_map(struct iommu_domain *domain, unsigned long iova,
iova              732 drivers/iommu/virtio-iommu.c 	ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
iova              739 drivers/iommu/virtio-iommu.c 		.virt_start	= cpu_to_le64(iova),
iova              741 drivers/iommu/virtio-iommu.c 		.virt_end	= cpu_to_le64(iova + size - 1),
iova              750 drivers/iommu/virtio-iommu.c 		viommu_del_mappings(vdomain, iova, size);
iova              755 drivers/iommu/virtio-iommu.c static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
iova              763 drivers/iommu/virtio-iommu.c 	unmapped = viommu_del_mappings(vdomain, iova, size);
iova              774 drivers/iommu/virtio-iommu.c 		.virt_start	= cpu_to_le64(iova),
iova              775 drivers/iommu/virtio-iommu.c 		.virt_end	= cpu_to_le64(iova + unmapped - 1),
iova              783 drivers/iommu/virtio-iommu.c 				       dma_addr_t iova)
iova              792 drivers/iommu/virtio-iommu.c 	node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
iova              794 drivers/iommu/virtio-iommu.c 		mapping = container_of(node, struct viommu_mapping, iova);
iova              795 drivers/iommu/virtio-iommu.c 		paddr = mapping->paddr + (iova - mapping->iova.start);
iova               97 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c 	u32 iova;
iova              286 drivers/media/platform/mtk-vcodec/venc/venc_h264_if.c 		wb[i].iova = inst->work_bufs[i].dma_addr;
iova               87 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c 	u32 iova;
iova              199 drivers/media/platform/mtk-vcodec/venc/venc_vp8_if.c 		wb[i].iova = inst->work_bufs[i].dma_addr;
iova              996 drivers/misc/mic/scif/scif_rma.c 	struct iova *iova_ptr;
iova               44 drivers/net/ethernet/marvell/octeontx2/af/common.h 	dma_addr_t	iova;
iova               68 drivers/net/ethernet/marvell/octeontx2/af/common.h 					 &qmem->iova, GFP_KERNEL);
iova               74 drivers/net/ethernet/marvell/octeontx2/af/common.h 	aligned_addr = ALIGN((u64)qmem->iova, OTX2_ALIGN);
iova               75 drivers/net/ethernet/marvell/octeontx2/af/common.h 	qmem->align = (aligned_addr - qmem->iova);
iova               77 drivers/net/ethernet/marvell/octeontx2/af/common.h 	qmem->iova += qmem->align;
iova               89 drivers/net/ethernet/marvell/octeontx2/af/common.h 				  qmem->iova - qmem->align);
iova              500 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	dma_addr_t iova;
iova              586 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	iova = dma_map_resource(rvu->dev, phy_addr,
iova              590 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	if (dma_mapping_error(rvu->dev, iova))
iova              593 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_MSIXTR_BASE, (u64)iova);
iova              594 drivers/net/ethernet/marvell/octeontx2/af/rvu.c 	rvu->msix_base_iova = iova;
iova              400 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)pfvf->rss_ctx->iova);
iova              543 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 	inst.res_addr = (u64)aq->res->iova;
iova              810 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)pfvf->rq_ctx->iova);
iova              827 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)pfvf->sq_ctx->iova);
iova              842 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)pfvf->cq_ctx->iova);
iova              862 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)pfvf->cq_ints_ctx->iova);
iova              874 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)pfvf->nix_qints_ctx->iova);
iova             1821 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)mcast->mce_ctx->iova);
iova             1835 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    (u64)mcast->mcast_buf->iova);
iova             2691 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c 		    NIX_AF_AQ_BASE, (u64)block->aq->inst->iova);
iova               95 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 	inst.res_addr = (u64)aq->res->iova;
iova              124 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 			req->aura.pool_addr = pfvf->pool_ctx->iova +
iova              353 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		    (u64)pfvf->aura_ctx->iova);
iova              358 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		    (u64)pfvf->npa_qints_ctx->iova);
iova              439 drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c 		    NPA_AF_AQ_BASE, (u64)block->aq->inst->iova);
iova              418 drivers/net/ethernet/mellanox/mlx4/mr.c 			   u64 iova, u64 size, u32 access, int npages,
iova              421 drivers/net/ethernet/mellanox/mlx4/mr.c 	mr->iova       = iova;
iova              527 drivers/net/ethernet/mellanox/mlx4/mr.c int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
iova              537 drivers/net/ethernet/mellanox/mlx4/mr.c 	err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
iova              590 drivers/net/ethernet/mellanox/mlx4/mr.c 			    u64 iova, u64 size, int npages,
iova              599 drivers/net/ethernet/mellanox/mlx4/mr.c 	mpt_entry->start       = cpu_to_be64(iova);
iova              649 drivers/net/ethernet/mellanox/mlx4/mr.c 	mpt_entry->start       = cpu_to_be64(mr->iova);
iova              970 drivers/net/ethernet/mellanox/mlx4/mr.c 				  int npages, u64 iova)
iova              980 drivers/net/ethernet/mellanox/mlx4/mr.c 	if (iova & page_mask)
iova              997 drivers/net/ethernet/mellanox/mlx4/mr.c 		      int npages, u64 iova, u32 *lkey, u32 *rkey)
iova             1002 drivers/net/ethernet/mellanox/mlx4/mr.c 	err = mlx4_check_fmr(fmr, page_list, npages, iova);
iova             1029 drivers/net/ethernet/mellanox/mlx4/mr.c 	fmr->mpt->start  = cpu_to_be64(iova);
iova               80 drivers/net/ethernet/mellanox/mlx5/core/mr.c 	mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
iova             1255 drivers/nvme/host/rdma.c 	sg->addr = cpu_to_le64(req->mr->iova);
iova              283 drivers/parisc/ccio-dma.c #define CCIO_IOVP(iova) ((iova) & IOVP_MASK)
iova              431 drivers/parisc/ccio-dma.c ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped)
iova              433 drivers/parisc/ccio-dma.c 	unsigned long iovp = CCIO_IOVP(iova);
iova              660 drivers/parisc/ccio-dma.c ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iova              662 drivers/parisc/ccio-dma.c 	u32 iovp = (u32)CCIO_IOVP(iova);
iova              687 drivers/parisc/ccio-dma.c 	ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt);
iova              803 drivers/parisc/ccio-dma.c ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
iova              808 drivers/parisc/ccio-dma.c 	dma_addr_t offset = iova & ~IOVP_MASK;
iova              818 drivers/parisc/ccio-dma.c 		__func__, (long)iova, size);
iova              820 drivers/parisc/ccio-dma.c 	iova ^= offset;        /* clear offset bits */
iova              831 drivers/parisc/ccio-dma.c 	ccio_mark_invalid(ioc, iova, size);
iova              832 drivers/parisc/ccio-dma.c 	ccio_free_range(ioc, iova, (size >> IOVP_SHIFT));
iova              306 drivers/parisc/sba_iommu.c #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
iova              310 drivers/parisc/sba_iommu.c #define SBA_IOVP(ioc,iova) (iova)
iova              488 drivers/parisc/sba_iommu.c sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
iova              490 drivers/parisc/sba_iommu.c 	unsigned long iovp = SBA_IOVP(ioc, iova);
iova              501 drivers/parisc/sba_iommu.c 		__func__, (uint) iova, size,
iova              607 drivers/parisc/sba_iommu.c sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
iova              609 drivers/parisc/sba_iommu.c 	u32 iovp = (u32) SBA_IOVP(ioc,iova);
iova              794 drivers/parisc/sba_iommu.c sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
iova              804 drivers/parisc/sba_iommu.c 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
iova              811 drivers/parisc/sba_iommu.c 	offset = iova & ~IOVP_MASK;
iova              812 drivers/parisc/sba_iommu.c 	iova ^= offset;        /* clear offset bits */
iova              823 drivers/parisc/sba_iommu.c 	sba_mark_invalid(ioc, iova, size);
iova              830 drivers/parisc/sba_iommu.c 	d->iova = iova;
iova              835 drivers/parisc/sba_iommu.c 			sba_free_range(ioc, d->iova, d->size);
iova              843 drivers/parisc/sba_iommu.c 	sba_free_range(ioc, iova, size);
iova               83 drivers/remoteproc/remoteproc_core.c 			     unsigned long iova, int flags, void *token)
iova               87 drivers/remoteproc/remoteproc_core.c 	dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
iova               58 drivers/s390/cio/vfio_ccw_cp.c static int pfn_array_alloc(struct pfn_array *pa, u64 iova, unsigned int len)
iova               65 drivers/s390/cio/vfio_ccw_cp.c 	pa->pa_iova = iova;
iova               67 drivers/s390/cio/vfio_ccw_cp.c 	pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
iova              133 drivers/s390/cio/vfio_ccw_cp.c static bool pfn_array_iova_pinned(struct pfn_array *pa, unsigned long iova)
iova              135 drivers/s390/cio/vfio_ccw_cp.c 	unsigned long iova_pfn = iova >> PAGE_SHIFT;
iova              193 drivers/s390/cio/vfio_ccw_cp.c 			   void *to, u64 iova,
iova              201 drivers/s390/cio/vfio_ccw_cp.c 	ret = pfn_array_alloc(&pa, iova, n);
iova              216 drivers/s390/cio/vfio_ccw_cp.c 			from += iova & (PAGE_SIZE - 1);
iova              217 drivers/s390/cio/vfio_ccw_cp.c 			m -= iova & (PAGE_SIZE - 1);
iova              370 drivers/s390/cio/vfio_ccw_cp.c static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
iova              394 drivers/s390/cio/vfio_ccw_cp.c 		if (!ccw_is_chain(ccw) && !is_tic_within_range(ccw, iova, cnt))
iova              512 drivers/s390/cio/vfio_ccw_cp.c 	u64 iova;
iova              528 drivers/s390/cio/vfio_ccw_cp.c 		ret = copy_from_iova(cp->mdev, &iova, ccw->cda, sizeof(iova));
iova              532 drivers/s390/cio/vfio_ccw_cp.c 		iova = ccw->cda;
iova              534 drivers/s390/cio/vfio_ccw_cp.c 	idaw_nr = idal_nr_words((void *)iova, bytes);
iova              551 drivers/s390/cio/vfio_ccw_cp.c 	ret = pfn_array_alloc(pa, iova, bytes);
iova              851 drivers/s390/cio/vfio_ccw_cp.c bool cp_iova_pinned(struct channel_program *cp, u64 iova)
iova              861 drivers/s390/cio/vfio_ccw_cp.c 			if (pfn_array_iova_pinned(chain->ch_pa + i, iova))
iova               50 drivers/s390/cio/vfio_ccw_cp.h extern bool cp_iova_pinned(struct channel_program *cp, u64 iova);
iova               61 drivers/s390/cio/vfio_ccw_ops.c 		if (!cp_iova_pinned(&private->cp, unmap->iova))
iova             1077 drivers/s390/crypto/vfio_ap_ops.c 		unsigned long g_pfn = unmap->iova >> PAGE_SHIFT;
iova              628 drivers/soc/qcom/qcom-geni-se.c 			dma_addr_t *iova)
iova              636 drivers/soc/qcom/qcom-geni-se.c 	*iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
iova              637 drivers/soc/qcom/qcom-geni-se.c 	if (dma_mapping_error(wrapper->dev, *iova))
iova              644 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L);
iova              645 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H);
iova              664 drivers/soc/qcom/qcom-geni-se.c 			dma_addr_t *iova)
iova              672 drivers/soc/qcom/qcom-geni-se.c 	*iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
iova              673 drivers/soc/qcom/qcom-geni-se.c 	if (dma_mapping_error(wrapper->dev, *iova))
iova              680 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_RX_PTR_L);
iova              681 drivers/soc/qcom/qcom-geni-se.c 	writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H);
iova              697 drivers/soc/qcom/qcom-geni-se.c void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
iova              701 drivers/soc/qcom/qcom-geni-se.c 	if (iova && !dma_mapping_error(wrapper->dev, iova))
iova              702 drivers/soc/qcom/qcom-geni-se.c 		dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
iova              714 drivers/soc/qcom/qcom-geni-se.c void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
iova              718 drivers/soc/qcom/qcom-geni-se.c 	if (iova && !dma_mapping_error(wrapper->dev, iova))
iova              719 drivers/soc/qcom/qcom-geni-se.c 		dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
iova              101 drivers/staging/media/ipu3/ipu3-dmamap.c 	struct iova *iova;
iova              106 drivers/staging/media/ipu3/ipu3-dmamap.c 	iova = alloc_iova(&imgu->iova_domain, size >> shift,
iova              108 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (!iova)
iova              116 drivers/staging/media/ipu3/ipu3-dmamap.c 	iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
iova              137 drivers/staging/media/ipu3/ipu3-dmamap.c 	map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
iova              150 drivers/staging/media/ipu3/ipu3-dmamap.c 	imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
iova              155 drivers/staging/media/ipu3/ipu3-dmamap.c 	__free_iova(&imgu->iova_domain, iova);
iova              162 drivers/staging/media/ipu3/ipu3-dmamap.c 	struct iova *iova;
iova              164 drivers/staging/media/ipu3/ipu3-dmamap.c 	iova = find_iova(&imgu->iova_domain,
iova              166 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (WARN_ON(!iova))
iova              169 drivers/staging/media/ipu3/ipu3-dmamap.c 	imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
iova              170 drivers/staging/media/ipu3/ipu3-dmamap.c 		       iova_size(iova) << iova_shift(&imgu->iova_domain));
iova              172 drivers/staging/media/ipu3/ipu3-dmamap.c 	__free_iova(&imgu->iova_domain, iova);
iova              203 drivers/staging/media/ipu3/ipu3-dmamap.c 	struct iova *iova;
iova              221 drivers/staging/media/ipu3/ipu3-dmamap.c 	iova = alloc_iova(&imgu->iova_domain, size >> shift,
iova              223 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (!iova)
iova              227 drivers/staging/media/ipu3/ipu3-dmamap.c 		iova->pfn_lo, iova->pfn_hi);
iova              229 drivers/staging/media/ipu3/ipu3-dmamap.c 	if (imgu_mmu_map_sg(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
iova              234 drivers/staging/media/ipu3/ipu3-dmamap.c 	map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
iova              240 drivers/staging/media/ipu3/ipu3-dmamap.c 	__free_iova(&imgu->iova_domain, iova);
iova              154 drivers/staging/media/ipu3/ipu3-mmu.c static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx,
iova              157 drivers/staging/media/ipu3/ipu3-mmu.c 	iova >>= IPU3_PAGE_SHIFT;
iova              160 drivers/staging/media/ipu3/ipu3-mmu.c 		*l2pt_idx = iova & IPU3_L2PT_MASK;
iova              162 drivers/staging/media/ipu3/ipu3-mmu.c 	iova >>= IPU3_L2PT_SHIFT;
iova              165 drivers/staging/media/ipu3/ipu3-mmu.c 		*l1pt_idx = iova & IPU3_L1PT_MASK;
iova              208 drivers/staging/media/ipu3/ipu3-mmu.c static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
iova              218 drivers/staging/media/ipu3/ipu3-mmu.c 	address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
iova              249 drivers/staging/media/ipu3/ipu3-mmu.c int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
iova              260 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
iova              262 drivers/staging/media/ipu3/ipu3-mmu.c 			iova, &paddr, size);
iova              267 drivers/staging/media/ipu3/ipu3-mmu.c 		iova, &paddr, size);
iova              270 drivers/staging/media/ipu3/ipu3-mmu.c 		dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
iova              272 drivers/staging/media/ipu3/ipu3-mmu.c 		ret = __imgu_mmu_map(mmu, iova, paddr);
iova              276 drivers/staging/media/ipu3/ipu3-mmu.c 		iova += IPU3_PAGE_SIZE;
iova              297 drivers/staging/media/ipu3/ipu3-mmu.c size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
iova              318 drivers/staging/media/ipu3/ipu3-mmu.c 		ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
iova              331 drivers/staging/media/ipu3/ipu3-mmu.c 	imgu_mmu_unmap(info, iova, mapped);
iova              337 drivers/staging/media/ipu3/ipu3-mmu.c 			       unsigned long iova, size_t size)
iova              347 drivers/staging/media/ipu3/ipu3-mmu.c 	address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
iova              377 drivers/staging/media/ipu3/ipu3-mmu.c size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
iova              388 drivers/staging/media/ipu3/ipu3-mmu.c 	if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
iova              390 drivers/staging/media/ipu3/ipu3-mmu.c 			iova, size);
iova              394 drivers/staging/media/ipu3/ipu3-mmu.c 	dev_dbg(mmu->dev, "unmap this: iova 0x%lx size 0x%zx\n", iova, size);
iova              401 drivers/staging/media/ipu3/ipu3-mmu.c 		unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
iova              406 drivers/staging/media/ipu3/ipu3-mmu.c 			iova, unmapped_page);
iova              408 drivers/staging/media/ipu3/ipu3-mmu.c 		iova += unmapped_page;
iova               30 drivers/staging/media/ipu3/ipu3-mmu.h int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
iova               32 drivers/staging/media/ipu3/ipu3-mmu.h size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
iova               34 drivers/staging/media/ipu3/ipu3-mmu.h size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
iova               25 drivers/staging/media/tegra-vde/dmabuf-cache.c 	struct iova *iova;
iova               36 drivers/staging/media/tegra-vde/dmabuf-cache.c 		tegra_vde_iommu_unmap(entry->vde, entry->iova);
iova               70 drivers/staging/media/tegra-vde/dmabuf-cache.c 	struct iova *iova;
iova               88 drivers/staging/media/tegra-vde/dmabuf-cache.c 			*addrp = iova_dma_addr(&vde->iova, entry->iova);
iova              122 drivers/staging/media/tegra-vde/dmabuf-cache.c 		err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
iova              126 drivers/staging/media/tegra-vde/dmabuf-cache.c 		*addrp = iova_dma_addr(&vde->iova, iova);
iova              129 drivers/staging/media/tegra-vde/dmabuf-cache.c 		iova = NULL;
iova              136 drivers/staging/media/tegra-vde/dmabuf-cache.c 	entry->iova = iova;
iova               21 drivers/staging/media/tegra-vde/iommu.c 			struct iova **iovap,
iova               24 drivers/staging/media/tegra-vde/iommu.c 	struct iova *iova;
iova               30 drivers/staging/media/tegra-vde/iommu.c 	size = iova_align(&vde->iova, size);
iova               31 drivers/staging/media/tegra-vde/iommu.c 	shift = iova_shift(&vde->iova);
iova               33 drivers/staging/media/tegra-vde/iommu.c 	iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
iova               34 drivers/staging/media/tegra-vde/iommu.c 	if (!iova)
iova               37 drivers/staging/media/tegra-vde/iommu.c 	addr = iova_dma_addr(&vde->iova, iova);
iova               42 drivers/staging/media/tegra-vde/iommu.c 		__free_iova(&vde->iova, iova);
iova               46 drivers/staging/media/tegra-vde/iommu.c 	*iovap = iova;
iova               51 drivers/staging/media/tegra-vde/iommu.c void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova)
iova               53 drivers/staging/media/tegra-vde/iommu.c 	unsigned long shift = iova_shift(&vde->iova);
iova               54 drivers/staging/media/tegra-vde/iommu.c 	unsigned long size = iova_size(iova) << shift;
iova               55 drivers/staging/media/tegra-vde/iommu.c 	dma_addr_t addr = iova_dma_addr(&vde->iova, iova);
iova               58 drivers/staging/media/tegra-vde/iommu.c 	__free_iova(&vde->iova, iova);
iova               64 drivers/staging/media/tegra-vde/iommu.c 	struct iova *iova;
iova               92 drivers/staging/media/tegra-vde/iommu.c 	init_iova_domain(&vde->iova, 1UL << order, 0);
iova              102 drivers/staging/media/tegra-vde/iommu.c 	shift = iova_shift(&vde->iova);
iova              103 drivers/staging/media/tegra-vde/iommu.c 	iova = reserve_iova(&vde->iova, 0x60000000 >> shift,
iova              105 drivers/staging/media/tegra-vde/iommu.c 	if (!iova) {
iova              110 drivers/staging/media/tegra-vde/iommu.c 	vde->iova_resv_static_addresses = iova;
iova              118 drivers/staging/media/tegra-vde/iommu.c 	iova = reserve_iova(&vde->iova, 0xffffffff >> shift,
iova              120 drivers/staging/media/tegra-vde/iommu.c 	if (!iova) {
iova              125 drivers/staging/media/tegra-vde/iommu.c 	vde->iova_resv_last_page = iova;
iova              130 drivers/staging/media/tegra-vde/iommu.c 	__free_iova(&vde->iova, vde->iova_resv_static_addresses);
iova              134 drivers/staging/media/tegra-vde/iommu.c 	put_iova_domain(&vde->iova);
iova              147 drivers/staging/media/tegra-vde/iommu.c 		__free_iova(&vde->iova, vde->iova_resv_last_page);
iova              148 drivers/staging/media/tegra-vde/iommu.c 		__free_iova(&vde->iova, vde->iova_resv_static_addresses);
iova              150 drivers/staging/media/tegra-vde/iommu.c 		put_iova_domain(&vde->iova);
iova               48 drivers/staging/media/tegra-vde/vde.h 	struct iova_domain iova;
iova               49 drivers/staging/media/tegra-vde/vde.h 	struct iova *iova_resv_static_addresses;
iova               50 drivers/staging/media/tegra-vde/vde.h 	struct iova *iova_resv_last_page;
iova               59 drivers/staging/media/tegra-vde/vde.h 			struct iova **iovap,
iova               61 drivers/staging/media/tegra-vde/vde.h void tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova);
iova              880 drivers/vfio/vfio_iommu_spapr_tce.c 		num = tce_iommu_find_table(container, param.iova, &tbl);
iova              901 drivers/vfio/vfio_iommu_spapr_tce.c 		ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
iova              907 drivers/vfio/vfio_iommu_spapr_tce.c 					param.iova >> tbl->it_page_shift,
iova              913 drivers/vfio/vfio_iommu_spapr_tce.c 					param.iova >> tbl->it_page_shift,
iova              947 drivers/vfio/vfio_iommu_spapr_tce.c 		num = tce_iommu_find_table(container, param.iova, &tbl);
iova              954 drivers/vfio/vfio_iommu_spapr_tce.c 		ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
iova              960 drivers/vfio/vfio_iommu_spapr_tce.c 				param.iova >> tbl->it_page_shift,
iova               85 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t		iova;		/* Device address */
iova              112 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t		iova;		/* Device address */
iova              119 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t iova;
iova              142 drivers/vfio/vfio_iommu_type1.c 		if (start + size <= dma->iova)
iova              144 drivers/vfio/vfio_iommu_type1.c 		else if (start >= dma->iova + dma->size)
iova              162 drivers/vfio/vfio_iommu_type1.c 		if (new->iova + new->size <= dma->iova)
iova              180 drivers/vfio/vfio_iommu_type1.c static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
iova              188 drivers/vfio/vfio_iommu_type1.c 		if (iova < vpfn->iova)
iova              190 drivers/vfio/vfio_iommu_type1.c 		else if (iova > vpfn->iova)
iova              209 drivers/vfio/vfio_iommu_type1.c 		if (new->iova < vpfn->iova)
iova              224 drivers/vfio/vfio_iommu_type1.c static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
iova              233 drivers/vfio/vfio_iommu_type1.c 	vpfn->iova = iova;
iova              248 drivers/vfio/vfio_iommu_type1.c 					       unsigned long iova)
iova              250 drivers/vfio/vfio_iommu_type1.c 	struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
iova              404 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
iova              421 drivers/vfio/vfio_iommu_type1.c 	if (!rsvd && !vfio_find_vpfn(dma, iova)) {
iova              435 drivers/vfio/vfio_iommu_type1.c 	for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
iova              436 drivers/vfio/vfio_iommu_type1.c 	     pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
iova              447 drivers/vfio/vfio_iommu_type1.c 		if (!rsvd && !vfio_find_vpfn(dma, iova)) {
iova              476 drivers/vfio/vfio_iommu_type1.c static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
iova              483 drivers/vfio/vfio_iommu_type1.c 	for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
iova              486 drivers/vfio/vfio_iommu_type1.c 			if (vfio_find_vpfn(dma, iova))
iova              524 drivers/vfio/vfio_iommu_type1.c static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
iova              528 drivers/vfio/vfio_iommu_type1.c 	struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
iova              575 drivers/vfio/vfio_iommu_type1.c 		dma_addr_t iova;
iova              578 drivers/vfio/vfio_iommu_type1.c 		iova = user_pfn[i] << PAGE_SHIFT;
iova              579 drivers/vfio/vfio_iommu_type1.c 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
iova              590 drivers/vfio/vfio_iommu_type1.c 		vpfn = vfio_iova_get_vfio_pfn(dma, iova);
iova              596 drivers/vfio/vfio_iommu_type1.c 		remote_vaddr = dma->vaddr + (iova - dma->iova);
iova              602 drivers/vfio/vfio_iommu_type1.c 		ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
iova              604 drivers/vfio/vfio_iommu_type1.c 			vfio_unpin_page_external(dma, iova, do_accounting);
iova              615 drivers/vfio/vfio_iommu_type1.c 		dma_addr_t iova;
iova              617 drivers/vfio/vfio_iommu_type1.c 		iova = user_pfn[j] << PAGE_SHIFT;
iova              618 drivers/vfio/vfio_iommu_type1.c 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
iova              619 drivers/vfio/vfio_iommu_type1.c 		vfio_unpin_page_external(dma, iova, do_accounting);
iova              647 drivers/vfio/vfio_iommu_type1.c 		dma_addr_t iova;
iova              649 drivers/vfio/vfio_iommu_type1.c 		iova = user_pfn[i] << PAGE_SHIFT;
iova              650 drivers/vfio/vfio_iommu_type1.c 		dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
iova              653 drivers/vfio/vfio_iommu_type1.c 		vfio_unpin_page_external(dma, iova, do_accounting);
iova              672 drivers/vfio/vfio_iommu_type1.c 						    entry->iova,
iova              695 drivers/vfio/vfio_iommu_type1.c 			       struct vfio_dma *dma, dma_addr_t *iova,
iova              705 drivers/vfio/vfio_iommu_type1.c 		unmapped = iommu_unmap_fast(domain->domain, *iova, len,
iova              711 drivers/vfio/vfio_iommu_type1.c 			entry->iova = *iova;
iova              716 drivers/vfio/vfio_iommu_type1.c 			*iova += unmapped;
iova              735 drivers/vfio/vfio_iommu_type1.c 			       struct vfio_dma *dma, dma_addr_t *iova,
iova              739 drivers/vfio/vfio_iommu_type1.c 	size_t unmapped = iommu_unmap(domain->domain, *iova, len);
iova              742 drivers/vfio/vfio_iommu_type1.c 		*unlocked += vfio_unpin_pages_remote(dma, *iova,
iova              746 drivers/vfio/vfio_iommu_type1.c 		*iova += unmapped;
iova              755 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
iova              779 drivers/vfio/vfio_iommu_type1.c 		iommu_unmap(d->domain, dma->iova, dma->size);
iova              784 drivers/vfio/vfio_iommu_type1.c 	while (iova < end) {
iova              788 drivers/vfio/vfio_iommu_type1.c 		phys = iommu_iova_to_phys(domain->domain, iova);
iova              790 drivers/vfio/vfio_iommu_type1.c 			iova += PAGE_SIZE;
iova              800 drivers/vfio/vfio_iommu_type1.c 		     !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
iova              801 drivers/vfio/vfio_iommu_type1.c 			next = iommu_iova_to_phys(domain->domain, iova + len);
iova              810 drivers/vfio/vfio_iommu_type1.c 		unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
iova              815 drivers/vfio/vfio_iommu_type1.c 			unmapped = unmap_unpin_slow(domain, dma, &iova, len,
iova              881 drivers/vfio/vfio_iommu_type1.c 	if (unmap->iova & mask)
iova              885 drivers/vfio/vfio_iommu_type1.c 	if (unmap->iova + unmap->size - 1 < unmap->iova ||
iova              925 drivers/vfio/vfio_iommu_type1.c 		dma = vfio_find_dma(iommu, unmap->iova, 1);
iova              926 drivers/vfio/vfio_iommu_type1.c 		if (dma && dma->iova != unmap->iova) {
iova              930 drivers/vfio/vfio_iommu_type1.c 		dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
iova              931 drivers/vfio/vfio_iommu_type1.c 		if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
iova              937 drivers/vfio/vfio_iommu_type1.c 	while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
iova              938 drivers/vfio/vfio_iommu_type1.c 		if (!iommu->v2 && unmap->iova > dma->iova)
iova              957 drivers/vfio/vfio_iommu_type1.c 			nb_unmap.iova = dma->iova;
iova              985 drivers/vfio/vfio_iommu_type1.c static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
iova              992 drivers/vfio/vfio_iommu_type1.c 		ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
iova             1004 drivers/vfio/vfio_iommu_type1.c 		iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
iova             1012 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t iova = dma->iova;
iova             1030 drivers/vfio/vfio_iommu_type1.c 		ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
iova             1033 drivers/vfio/vfio_iommu_type1.c 			vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
iova             1056 drivers/vfio/vfio_iommu_type1.c 	struct list_head *iova = &iommu->iova_list;
iova             1059 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry(node, iova, list) {
iova             1068 drivers/vfio/vfio_iommu_type1.c 	return list_empty(iova);
iova             1074 drivers/vfio/vfio_iommu_type1.c 	dma_addr_t iova = map->iova;
iova             1082 drivers/vfio/vfio_iommu_type1.c 	if (map->size != size || map->vaddr != vaddr || map->iova != iova)
iova             1095 drivers/vfio/vfio_iommu_type1.c 	if (!prot || !size || (size | iova | vaddr) & mask)
iova             1099 drivers/vfio/vfio_iommu_type1.c 	if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
iova             1104 drivers/vfio/vfio_iommu_type1.c 	if (vfio_find_dma(iommu, iova, size)) {
iova             1114 drivers/vfio/vfio_iommu_type1.c 	if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
iova             1126 drivers/vfio/vfio_iommu_type1.c 	dma->iova = iova;
iova             1201 drivers/vfio/vfio_iommu_type1.c 		dma_addr_t iova;
iova             1204 drivers/vfio/vfio_iommu_type1.c 		iova = dma->iova;
iova             1206 drivers/vfio/vfio_iommu_type1.c 		while (iova < dma->iova + dma->size) {
iova             1214 drivers/vfio/vfio_iommu_type1.c 				phys = iommu_iova_to_phys(d->domain, iova);
iova             1217 drivers/vfio/vfio_iommu_type1.c 					iova += PAGE_SIZE;
iova             1223 drivers/vfio/vfio_iommu_type1.c 				i = iova + size;
iova             1224 drivers/vfio/vfio_iommu_type1.c 				while (i < dma->iova + dma->size &&
iova             1233 drivers/vfio/vfio_iommu_type1.c 						     (iova - dma->iova);
iova             1234 drivers/vfio/vfio_iommu_type1.c 				size_t n = dma->iova + dma->size - iova;
iova             1250 drivers/vfio/vfio_iommu_type1.c 			ret = iommu_map(domain->domain, iova, phys,
iova             1255 drivers/vfio/vfio_iommu_type1.c 			iova += size;
iova             1464 drivers/vfio/vfio_iommu_type1.c 	struct list_head *iova = &iommu->iova_list;
iova             1466 drivers/vfio/vfio_iommu_type1.c 	if (list_empty(iova))
iova             1470 drivers/vfio/vfio_iommu_type1.c 	first = list_first_entry(iova, struct vfio_iova, list);
iova             1471 drivers/vfio/vfio_iommu_type1.c 	last = list_last_entry(iova, struct vfio_iova, list);
iova             1494 drivers/vfio/vfio_iommu_type1.c static int vfio_iommu_aper_resize(struct list_head *iova,
iova             1499 drivers/vfio/vfio_iommu_type1.c 	if (list_empty(iova))
iova             1500 drivers/vfio/vfio_iommu_type1.c 		return vfio_iommu_iova_insert(iova, start, end);
iova             1503 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry_safe(node, next, iova, list) {
iova             1516 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry_safe(node, next, iova, list) {
iova             1555 drivers/vfio/vfio_iommu_type1.c static int vfio_iommu_resv_exclude(struct list_head *iova,
iova             1570 drivers/vfio/vfio_iommu_type1.c 		list_for_each_entry_safe(n, next, iova, list) {
iova             1597 drivers/vfio/vfio_iommu_type1.c 	if (list_empty(iova))
iova             1613 drivers/vfio/vfio_iommu_type1.c static void vfio_iommu_iova_free(struct list_head *iova)
iova             1617 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry_safe(n, next, iova, list) {
iova             1626 drivers/vfio/vfio_iommu_type1.c 	struct list_head *iova = &iommu->iova_list;
iova             1630 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry(n, iova, list) {
iova             1646 drivers/vfio/vfio_iommu_type1.c 	struct list_head *iova = &iommu->iova_list;
iova             1648 drivers/vfio/vfio_iommu_type1.c 	vfio_iommu_iova_free(iova);
iova             1650 drivers/vfio/vfio_iommu_type1.c 	list_splice_tail(iova_copy, iova);
iova             2172 drivers/vfio/vfio_iommu_type1.c 	struct vfio_iova *iova;
iova             2178 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry(iova, &iommu->iova_list, list)
iova             2200 drivers/vfio/vfio_iommu_type1.c 	list_for_each_entry(iova, &iommu->iova_list, list) {
iova             2201 drivers/vfio/vfio_iommu_type1.c 		cap_iovas->iova_ranges[i].start = iova->start;
iova             2202 drivers/vfio/vfio_iommu_type1.c 		cap_iovas->iova_ranges[i].end = iova->end;
iova             1067 drivers/vhost/vhost.c 		if (msg->iova <= vq_msg->iova &&
iova             1068 drivers/vhost/vhost.c 		    msg->iova + msg->size - 1 >= vq_msg->iova &&
iova             1114 drivers/vhost/vhost.c 		if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
iova             1115 drivers/vhost/vhost.c 					 msg->iova + msg->size - 1,
iova             1128 drivers/vhost/vhost.c 		vhost_del_umem_range(dev->iotlb, msg->iova,
iova             1129 drivers/vhost/vhost.c 				     msg->iova + msg->size - 1);
iova             1269 drivers/vhost/vhost.c static int vhost_iotlb_miss(struct vhost_virtqueue *vq, u64 iova, int access)
iova             1288 drivers/vhost/vhost.c 	msg->iova = iova;
iova             3591 fs/cifs/smb2pdu.c 		v1->offset = cpu_to_le64(rdata->mr->mr->iova);
iova             4010 fs/cifs/smb2pdu.c 		v1->offset = cpu_to_le64(wdata->mr->mr->iova);
iova               41 include/linux/io-pgtable.h 	void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
iova               43 include/linux/io-pgtable.h 	void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule,
iova               46 include/linux/io-pgtable.h 			     unsigned long iova, size_t granule, void *cookie);
iova              138 include/linux/io-pgtable.h 	int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
iova              140 include/linux/io-pgtable.h 	size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
iova              143 include/linux/io-pgtable.h 				    unsigned long iova);
iova              198 include/linux/io-pgtable.h io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova,
iova              201 include/linux/io-pgtable.h 	iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie);
iova              205 include/linux/io-pgtable.h io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova,
iova              208 include/linux/io-pgtable.h 	iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie);
iova              213 include/linux/io-pgtable.h 			struct iommu_iotlb_gather * gather, unsigned long iova,
iova              217 include/linux/io-pgtable.h 		iop->cfg.tlb->tlb_add_page(gather, iova, granule, iop->cookie);
iova              258 include/linux/iommu.h 	int (*map)(struct iommu_domain *domain, unsigned long iova,
iova              260 include/linux/iommu.h 	size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
iova              266 include/linux/iommu.h 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
iova              422 include/linux/iommu.h extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              424 include/linux/iommu.h extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
iova              427 include/linux/iommu.h 			       unsigned long iova, size_t size,
iova              429 include/linux/iommu.h extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
iova              431 include/linux/iommu.h extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
iova              497 include/linux/iommu.h 			      unsigned long iova, int flags);
iova              516 include/linux/iommu.h 					       unsigned long iova, size_t size)
iova              518 include/linux/iommu.h 	unsigned long start = iova, end = start + size;
iova              659 include/linux/iommu.h static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
iova              666 include/linux/iommu.h 				 unsigned long iova, size_t size)
iova              672 include/linux/iommu.h 				      unsigned long iova, int gfp_order,
iova              679 include/linux/iommu.h 				  unsigned long iova, struct scatterlist *sg,
iova              706 include/linux/iommu.h static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
iova              894 include/linux/iommu.h 					       unsigned long iova, size_t size)
iova               85 include/linux/iova.h 	struct iova	anchor;		/* rbtree lookup anchor */
iova              100 include/linux/iova.h static inline unsigned long iova_size(struct iova *iova)
iova              102 include/linux/iova.h 	return iova->pfn_hi - iova->pfn_lo + 1;
iova              115 include/linux/iova.h static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova)
iova              117 include/linux/iova.h 	return iova & iova_mask(iovad);
iova              125 include/linux/iova.h static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova)
iova              127 include/linux/iova.h 	return (dma_addr_t)iova->pfn_lo << iova_shift(iovad);
iova              130 include/linux/iova.h static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
iova              132 include/linux/iova.h 	return iova >> iova_shift(iovad);
iova              139 include/linux/iova.h struct iova *alloc_iova_mem(void);
iova              140 include/linux/iova.h void free_iova_mem(struct iova *iova);
iova              142 include/linux/iova.h void __free_iova(struct iova_domain *iovad, struct iova *iova);
iova              143 include/linux/iova.h struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
iova              153 include/linux/iova.h struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
iova              161 include/linux/iova.h struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
iova              163 include/linux/iova.h struct iova *split_and_remove_iova(struct iova_domain *iovad,
iova              164 include/linux/iova.h 	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
iova              176 include/linux/iova.h static inline struct iova *alloc_iova_mem(void)
iova              181 include/linux/iova.h static inline void free_iova_mem(struct iova *iova)
iova              189 include/linux/iova.h static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
iova              193 include/linux/iova.h static inline struct iova *alloc_iova(struct iova_domain *iovad,
iova              221 include/linux/iova.h static inline struct iova *reserve_iova(struct iova_domain *iovad,
iova              251 include/linux/iova.h static inline struct iova *find_iova(struct iova_domain *iovad,
iova              261 include/linux/iova.h static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
iova              262 include/linux/iova.h 						 struct iova *iova,
iova              690 include/linux/mlx4/device.h 	u64			iova;
iova             1117 include/linux/mlx4/device.h int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
iova             1416 include/linux/mlx4/device.h 		      int npages, u64 iova, u32 *lkey, u32 *rkey);
iova             1538 include/linux/mlx4/device.h 			    u64 iova, u64 size, int npages,
iova              373 include/linux/mlx5/driver.h 	u64			iova;
iova              411 include/linux/qcom-geni-se.h 			dma_addr_t *iova);
iova              414 include/linux/qcom-geni-se.h 			dma_addr_t *iova);
iova              416 include/linux/qcom-geni-se.h void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
iova              418 include/linux/qcom-geni-se.h void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
iova             1765 include/rdma/ib_verbs.h 	u64		   iova;
iova             2421 include/rdma/ib_verbs.h 			    u64 iova);
iova             4193 include/rdma/ib_verbs.h 				  u64 iova)
iova             4195 include/rdma/ib_verbs.h 	return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
iova             4347 include/rdma/ib_verbs.h 	mr->iova = 0;
iova               76 include/rdma/rdmavt_mr.h 	u64 iova;               /* IB start address of this region */
iova               88 include/trace/events/iommu.h 	TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
iova               90 include/trace/events/iommu.h 	TP_ARGS(iova, paddr, size),
iova               93 include/trace/events/iommu.h 		__field(u64, iova)
iova               99 include/trace/events/iommu.h 		__entry->iova = iova;
iova              105 include/trace/events/iommu.h 			__entry->iova, __entry->paddr, __entry->size
iova              111 include/trace/events/iommu.h 	TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
iova              113 include/trace/events/iommu.h 	TP_ARGS(iova, size, unmapped_size),
iova              116 include/trace/events/iommu.h 		__field(u64, iova)
iova              122 include/trace/events/iommu.h 		__entry->iova = iova;
iova              128 include/trace/events/iommu.h 			__entry->iova, __entry->size, __entry->unmapped_size
iova              134 include/trace/events/iommu.h 	TP_PROTO(struct device *dev, unsigned long iova, int flags),
iova              136 include/trace/events/iommu.h 	TP_ARGS(dev, iova, flags),
iova              141 include/trace/events/iommu.h 		__field(u64, iova)
iova              148 include/trace/events/iommu.h 		__entry->iova = iova;
iova              154 include/trace/events/iommu.h 			__entry->iova, __entry->flags
iova              160 include/trace/events/iommu.h 	TP_PROTO(struct device *dev, unsigned long iova, int flags),
iova              162 include/trace/events/iommu.h 	TP_ARGS(dev, iova, flags)
iova              133 include/uapi/linux/fpga-dfl.h 	__u64 iova;             /* IO virtual address */
iova              149 include/uapi/linux/fpga-dfl.h 	__u64 iova;		/* IO virtual address */
iova              765 include/uapi/linux/vfio.h 	__u64	iova;				/* IO virtual address */
iova              784 include/uapi/linux/vfio.h 	__u64	iova;				/* IO virtual address */
iova               52 include/uapi/linux/vhost_types.h 	__u64 iova;
iova              138 include/uapi/rdma/rdma_user_rxe.h 	__aligned_u64		iova;
iova              366 net/sunrpc/xprtrdma/frwr_ops.c 	ibmr->iova &= 0x00000000ffffffff;
iova              367 net/sunrpc/xprtrdma/frwr_ops.c 	ibmr->iova |= ((u64)be32_to_cpu(xid)) << 32;
iova              380 net/sunrpc/xprtrdma/frwr_ops.c 	mr->mr_offset = ibmr->iova;