Lines Matching refs:size
52 static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags) in __alloc_from_pool() argument
62 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
68 memset(ptr, 0, size); in __alloc_from_pool()
74 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
76 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
79 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
81 if (!__in_atomic_pool(start, size)) in __free_from_pool()
84 gen_pool_free(atomic_pool, (unsigned long)start, size); in __free_from_pool()
89 static void *__dma_alloc_coherent(struct device *dev, size_t size, in __dma_alloc_coherent() argument
105 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, in __dma_alloc_coherent()
106 get_order(size)); in __dma_alloc_coherent()
112 memset(addr, 0, size); in __dma_alloc_coherent()
115 return swiotlb_alloc_coherent(dev, size, dma_handle, flags); in __dma_alloc_coherent()
119 static void __dma_free_coherent(struct device *dev, size_t size, in __dma_free_coherent() argument
133 size >> PAGE_SHIFT); in __dma_free_coherent()
135 swiotlb_free_coherent(dev, size, vaddr, dma_handle); in __dma_free_coherent()
138 static void *__dma_alloc(struct device *dev, size_t size, in __dma_alloc() argument
147 size = PAGE_ALIGN(size); in __dma_alloc()
151 void *addr = __alloc_from_pool(size, &page, flags); in __dma_alloc()
159 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); in __dma_alloc()
168 __dma_flush_range(ptr, ptr + size); in __dma_alloc()
172 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, in __dma_alloc()
180 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs); in __dma_alloc()
186 static void __dma_free(struct device *dev, size_t size, in __dma_free() argument
192 size = PAGE_ALIGN(size); in __dma_free()
195 if (__free_from_pool(vaddr, size)) in __dma_free()
199 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); in __dma_free()
203 unsigned long offset, size_t size, in __swiotlb_map_page() argument
209 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs); in __swiotlb_map_page()
211 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); in __swiotlb_map_page()
218 size_t size, enum dma_data_direction dir, in __swiotlb_unmap_page() argument
222 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); in __swiotlb_unmap_page()
223 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs); in __swiotlb_unmap_page()
258 dma_addr_t dev_addr, size_t size, in __swiotlb_sync_single_for_cpu() argument
262 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); in __swiotlb_sync_single_for_cpu()
263 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir); in __swiotlb_sync_single_for_cpu()
267 dma_addr_t dev_addr, size_t size, in __swiotlb_sync_single_for_device() argument
270 swiotlb_sync_single_for_device(dev, dev_addr, size, dir); in __swiotlb_sync_single_for_device()
272 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir); in __swiotlb_sync_single_for_device()
305 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __swiotlb_mmap() argument
311 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __swiotlb_mmap()
318 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) in __swiotlb_mmap()
332 void *cpu_addr, dma_addr_t handle, size_t size, in __swiotlb_get_sgtable() argument
339 PAGE_ALIGN(size), 0); in __swiotlb_get_sgtable()
426 static void *__dummy_alloc(struct device *dev, size_t size, in __dummy_alloc() argument
433 static void __dummy_free(struct device *dev, size_t size, in __dummy_free() argument
441 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __dummy_mmap() argument
448 unsigned long offset, size_t size, in __dummy_map_page() argument
456 size_t size, enum dma_data_direction dir, in __dummy_unmap_page() argument
476 dma_addr_t dev_addr, size_t size, in __dummy_sync_single() argument
541 static void *__iommu_alloc_attrs(struct device *dev, size_t size, in __iommu_alloc_attrs() argument
547 size_t iosize = size; in __iommu_alloc_attrs()
553 size = PAGE_ALIGN(size); in __iommu_alloc_attrs()
570 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot, in __iommu_alloc_attrs()
582 page = alloc_pages(gfp, get_order(size)); in __iommu_alloc_attrs()
585 addr = __alloc_from_pool(size, &page, gfp); in __iommu_alloc_attrs()
593 __free_pages(page, get_order(size)); in __iommu_alloc_attrs()
595 __free_from_pool(addr, size); in __iommu_alloc_attrs()
602 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, in __iommu_free_attrs() argument
605 size_t iosize = size; in __iommu_free_attrs()
607 size = PAGE_ALIGN(size); in __iommu_free_attrs()
618 if (__in_atomic_pool(cpu_addr, size)) { in __iommu_free_attrs()
620 __free_from_pool(cpu_addr, size); in __iommu_free_attrs()
627 dma_common_free_remap(cpu_addr, size, VM_USERMAP); in __iommu_free_attrs()
630 __free_pages(virt_to_page(cpu_addr), get_order(size)); in __iommu_free_attrs()
635 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __iommu_mmap_attrs() argument
644 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) in __iommu_mmap_attrs()
651 return iommu_dma_mmap(area->pages, size, vma); in __iommu_mmap_attrs()
656 size_t size, struct dma_attrs *attrs) in __iommu_get_sgtable() argument
658 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __iommu_get_sgtable()
664 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size, in __iommu_get_sgtable()
669 dma_addr_t dev_addr, size_t size, in __iommu_sync_single_for_cpu() argument
678 __dma_unmap_area(phys_to_virt(phys), size, dir); in __iommu_sync_single_for_cpu()
682 dma_addr_t dev_addr, size_t size, in __iommu_sync_single_for_device() argument
691 __dma_map_area(phys_to_virt(phys), size, dir); in __iommu_sync_single_for_device()
695 unsigned long offset, size_t size, in __iommu_map_page() argument
701 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot); in __iommu_map_page()
705 __iommu_sync_single_for_device(dev, dev_addr, size, dir); in __iommu_map_page()
711 size_t size, enum dma_data_direction dir, in __iommu_unmap_page() argument
715 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir); in __iommu_unmap_page()
717 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs); in __iommu_unmap_page()
802 u64 size; member
814 u64 dma_base, u64 size) in do_iommu_attach() argument
843 if (iommu_dma_init_domain(domain, dma_base, size)) in do_iommu_attach()
861 u64 dma_base, u64 size) in queue_iommu_attach() argument
872 iommudata->size = size; in queue_iommu_attach()
890 master->dma_base, master->size)) { in __iommu_attach_notifier()
944 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, in __iommu_setup_dma_ops() argument
959 do_iommu_attach(dev, ops, dma_base, size); in __iommu_setup_dma_ops()
962 queue_iommu_attach(dev, ops, dma_base, size); in __iommu_setup_dma_ops()
981 static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, in __iommu_setup_dma_ops() argument
987 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, in arch_setup_dma_ops() argument
994 __iommu_setup_dma_ops(dev, dma_base, size, iommu); in arch_setup_dma_ops()