Lines Matching refs:zdev

21 static int zpci_refresh_global(struct zpci_dev *zdev)  in zpci_refresh_global()  argument
23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma, in zpci_refresh_global()
24 zdev->iommu_pages * PAGE_SIZE); in zpci_refresh_global()
132 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, in dma_update_trans() argument
145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags); in dma_update_trans()
146 if (!zdev->dma_table) { in dma_update_trans()
152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); in dma_update_trans()
169 if (!zdev->tlb_refresh && in dma_update_trans()
174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, in dma_update_trans()
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr); in dma_update_trans()
190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); in dma_update_trans()
220 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, in __dma_alloc_iommu() argument
225 boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1, in __dma_alloc_iommu()
227 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, in __dma_alloc_iommu()
231 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size) in dma_alloc_iommu() argument
236 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); in dma_alloc_iommu()
237 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size); in dma_alloc_iommu()
240 offset = __dma_alloc_iommu(zdev, 0, size); in dma_alloc_iommu()
245 zdev->next_bit = offset + size; in dma_alloc_iommu()
246 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap) in dma_alloc_iommu()
248 zpci_refresh_global(zdev); in dma_alloc_iommu()
250 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); in dma_alloc_iommu()
254 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size) in dma_free_iommu() argument
258 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); in dma_free_iommu()
259 if (!zdev->iommu_bitmap) in dma_free_iommu()
261 bitmap_clear(zdev->iommu_bitmap, offset, size); in dma_free_iommu()
266 if (!s390_iommu_strict && offset >= zdev->next_bit) in dma_free_iommu()
267 zdev->next_bit = offset + size; in dma_free_iommu()
269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); in dma_free_iommu()
287 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); in s390_dma_map_pages() local
296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages); in s390_dma_map_pages()
305 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE; in s390_dma_map_pages()
306 if (dma_addr + size > zdev->end_dma) { in s390_dma_map_pages()
314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags); in s390_dma_map_pages()
318 atomic64_add(nr_pages, &zdev->mapped_pages); in s390_dma_map_pages()
322 dma_free_iommu(zdev, iommu_page_index, nr_pages); in s390_dma_map_pages()
333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); in s390_dma_unmap_pages() local
339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE, in s390_dma_unmap_pages()
347 atomic64_add(npages, &zdev->unmapped_pages); in s390_dma_unmap_pages()
348 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT; in s390_dma_unmap_pages()
349 dma_free_iommu(zdev, iommu_page_index, npages); in s390_dma_unmap_pages()
356 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); in s390_dma_alloc() local
376 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages); in s390_dma_alloc()
386 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); in s390_dma_free() local
389 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages); in s390_dma_free()
441 int zpci_dma_init_device(struct zpci_dev *zdev) in zpci_dma_init_device() argument
450 WARN_ON(zdev->s390_domain); in zpci_dma_init_device()
452 spin_lock_init(&zdev->iommu_bitmap_lock); in zpci_dma_init_device()
453 spin_lock_init(&zdev->dma_table_lock); in zpci_dma_init_device()
455 zdev->dma_table = dma_alloc_cpu_table(); in zpci_dma_init_device()
456 if (!zdev->dma_table) { in zpci_dma_init_device()
461 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; in zpci_dma_init_device()
462 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; in zpci_dma_init_device()
463 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); in zpci_dma_init_device()
464 if (!zdev->iommu_bitmap) { in zpci_dma_init_device()
469 rc = zpci_register_ioat(zdev, in zpci_dma_init_device()
471 zdev->start_dma + PAGE_OFFSET, in zpci_dma_init_device()
472 zdev->start_dma + zdev->iommu_size - 1, in zpci_dma_init_device()
473 (u64) zdev->dma_table); in zpci_dma_init_device()
479 dma_free_cpu_table(zdev->dma_table); in zpci_dma_init_device()
484 void zpci_dma_exit_device(struct zpci_dev *zdev) in zpci_dma_exit_device() argument
491 WARN_ON(zdev->s390_domain); in zpci_dma_exit_device()
493 zpci_unregister_ioat(zdev, 0); in zpci_dma_exit_device()
494 dma_cleanup_tables(zdev->dma_table); in zpci_dma_exit_device()
495 zdev->dma_table = NULL; in zpci_dma_exit_device()
496 vfree(zdev->iommu_bitmap); in zpci_dma_exit_device()
497 zdev->iommu_bitmap = NULL; in zpci_dma_exit_device()
498 zdev->next_bit = 0; in zpci_dma_exit_device()