Lines Matching refs:size
77 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_dma_map_page() argument
81 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page()
86 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_coherent_dma_map_page() argument
107 size_t size, enum dma_data_direction dir, in arm_dma_unmap_page() argument
112 handle & ~PAGE_MASK, size, dir); in arm_dma_unmap_page()
116 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_dma_sync_single_for_cpu() argument
120 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu()
124 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_dma_sync_single_for_device() argument
128 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_sync_single_for_device()
148 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
150 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
153 void *cpu_addr, dma_addr_t dma_addr, size_t size,
228 static void __dma_clear_buffer(struct page *page, size_t size) in __dma_clear_buffer() argument
236 phys_addr_t end = base + size; in __dma_clear_buffer()
237 while (size > 0) { in __dma_clear_buffer()
243 size -= PAGE_SIZE; in __dma_clear_buffer()
248 memset(ptr, 0, size); in __dma_clear_buffer()
249 dmac_flush_range(ptr, ptr + size); in __dma_clear_buffer()
250 outer_flush_range(__pa(ptr), __pa(ptr) + size); in __dma_clear_buffer()
258 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) in __dma_alloc_buffer() argument
260 unsigned long order = get_order(size); in __dma_alloc_buffer()
271 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
274 __dma_clear_buffer(page, size); in __dma_alloc_buffer()
282 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
284 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
294 static void *__alloc_from_contiguous(struct device *dev, size_t size,
298 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
303 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, in __dma_alloc_remap() argument
310 return dma_common_contiguous_remap(page, size, in __dma_alloc_remap()
315 static void __dma_free_remap(void *cpu_addr, size_t size) in __dma_free_remap() argument
317 dma_common_free_remap(cpu_addr, size, in __dma_free_remap()
333 void __init init_dma_coherent_pool_size(unsigned long size) in init_dma_coherent_pool_size() argument
345 atomic_pool_size = size; in init_dma_coherent_pool_size()
400 unsigned long size; member
407 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) in dma_contiguous_early_fixup() argument
410 dma_mmu_remap[dma_mmu_remap_num].size = size; in dma_contiguous_early_fixup()
419 phys_addr_t end = start + dma_mmu_remap[i].size; in dma_contiguous_remap()
463 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
466 unsigned end = start + size; in __dma_remap()
468 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); in __dma_remap()
472 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_remap_buffer() argument
478 page = __dma_alloc_buffer(dev, size, gfp); in __alloc_remap_buffer()
484 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); in __alloc_remap_buffer()
486 __dma_free_buffer(page, size); in __alloc_remap_buffer()
495 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool() argument
505 val = gen_pool_alloc(atomic_pool, size); in __alloc_from_pool()
516 static bool __in_atomic_pool(void *start, size_t size) in __in_atomic_pool() argument
518 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); in __in_atomic_pool()
521 static int __free_from_pool(void *start, size_t size) in __free_from_pool() argument
523 if (!__in_atomic_pool(start, size)) in __free_from_pool()
526 gen_pool_free(atomic_pool, (unsigned long)start, size); in __free_from_pool()
531 static void *__alloc_from_contiguous(struct device *dev, size_t size, in __alloc_from_contiguous() argument
535 unsigned long order = get_order(size); in __alloc_from_contiguous()
536 size_t count = size >> PAGE_SHIFT; in __alloc_from_contiguous()
544 __dma_clear_buffer(page, size); in __alloc_from_contiguous()
550 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); in __alloc_from_contiguous()
556 __dma_remap(page, size, prot); in __alloc_from_contiguous()
566 void *cpu_addr, size_t size, bool want_vaddr) in __free_from_contiguous() argument
570 __dma_free_remap(cpu_addr, size); in __free_from_contiguous()
572 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
574 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
592 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL argument
593 #define __alloc_from_pool(size, ret_page) NULL argument
594 #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL argument
595 #define __free_from_pool(cpu_addr, size) 0 argument
596 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) argument
597 #define __dma_free_remap(cpu_addr, size) do { } while (0) argument
601 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, in __alloc_simple_buffer() argument
605 page = __dma_alloc_buffer(dev, size, gfp); in __alloc_simple_buffer()
615 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in __dma_alloc() argument
626 if (limit && size >= limit) { in __dma_alloc()
628 size, mask); in __dma_alloc()
649 size = PAGE_ALIGN(size); in __dma_alloc()
653 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __dma_alloc()
655 addr = __alloc_from_contiguous(dev, size, prot, &page, in __dma_alloc()
658 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __dma_alloc()
660 addr = __alloc_from_pool(size, &page); in __dma_alloc()
662 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, in __dma_alloc()
675 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, in arm_dma_alloc() argument
680 return __dma_alloc(dev, size, handle, gfp, prot, false, in arm_dma_alloc()
684 static void *arm_coherent_dma_alloc(struct device *dev, size_t size, in arm_coherent_dma_alloc() argument
687 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, in arm_coherent_dma_alloc()
692 void *cpu_addr, dma_addr_t dma_addr, size_t size, in __arm_dma_mmap() argument
698 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in __arm_dma_mmap()
702 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) in __arm_dma_mmap()
720 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_coherent_dma_mmap() argument
723 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_coherent_dma_mmap()
727 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_dma_mmap() argument
733 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); in arm_dma_mmap()
739 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in __arm_dma_free() argument
746 size = PAGE_ALIGN(size); in __arm_dma_free()
749 __dma_free_buffer(page, size); in __arm_dma_free()
750 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) { in __arm_dma_free()
754 __dma_free_remap(cpu_addr, size); in __arm_dma_free()
755 __dma_free_buffer(page, size); in __arm_dma_free()
761 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); in __arm_dma_free()
765 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, in arm_dma_free() argument
768 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); in arm_dma_free()
771 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, in arm_coherent_dma_free() argument
774 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); in arm_coherent_dma_free()
778 void *cpu_addr, dma_addr_t handle, size_t size, in arm_dma_get_sgtable() argument
788 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in arm_dma_get_sgtable()
793 size_t size, enum dma_data_direction dir, in dma_cache_maint_page() argument
797 size_t left = size; in dma_cache_maint_page()
846 size_t size, enum dma_data_direction dir) in __dma_page_cpu_to_dev() argument
850 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
854 outer_inv_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
856 outer_clean_range(paddr, paddr + size); in __dma_page_cpu_to_dev()
862 size_t size, enum dma_data_direction dir) in __dma_page_dev_to_cpu() argument
869 outer_inv_range(paddr, paddr + size); in __dma_page_dev_to_cpu()
871 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
877 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { in __dma_page_dev_to_cpu()
879 size_t left = size; in __dma_page_dev_to_cpu()
1033 size_t size) in __alloc_iova() argument
1035 unsigned int order = get_order(size); in __alloc_iova()
1046 count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __alloc_iova()
1091 dma_addr_t addr, size_t size) in __free_iova() argument
1099 if (!size) in __free_iova()
1109 if (addr + size > bitmap_base + mapping_size) { in __free_iova()
1118 count = size >> PAGE_SHIFT; in __free_iova()
1125 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer() argument
1129 int count = size >> PAGE_SHIFT; in __iommu_alloc_buffer()
1142 unsigned long order = get_order(size); in __iommu_alloc_buffer()
1149 __dma_clear_buffer(page, size); in __iommu_alloc_buffer()
1211 size_t size, struct dma_attrs *attrs) in __iommu_free_buffer() argument
1213 int count = size >> PAGE_SHIFT; in __iommu_free_buffer()
1236 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, in __iommu_alloc_remap() argument
1239 return dma_common_pages_remap(pages, size, in __iommu_alloc_remap()
1247 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) in __iommu_create_mapping() argument
1250 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in __iommu_create_mapping()
1254 dma_addr = __alloc_iova(mapping, size); in __iommu_create_mapping()
1281 __free_iova(mapping, dma_addr, size); in __iommu_create_mapping()
1285 static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) in __iommu_remove_mapping() argument
1293 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); in __iommu_remove_mapping()
1296 iommu_unmap(mapping->domain, iova, size); in __iommu_remove_mapping()
1297 __free_iova(mapping, iova, size); in __iommu_remove_mapping()
1328 static void *__iommu_alloc_atomic(struct device *dev, size_t size, in __iommu_alloc_atomic() argument
1334 addr = __alloc_from_pool(size, &page); in __iommu_alloc_atomic()
1338 *handle = __iommu_create_mapping(dev, &page, size); in __iommu_alloc_atomic()
1345 __free_from_pool(addr, size); in __iommu_alloc_atomic()
1350 dma_addr_t handle, size_t size) in __iommu_free_atomic() argument
1352 __iommu_remove_mapping(dev, handle, size); in __iommu_free_atomic()
1353 __free_from_pool(cpu_addr, size); in __iommu_free_atomic()
1356 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, in arm_iommu_alloc_attrs() argument
1364 size = PAGE_ALIGN(size); in arm_iommu_alloc_attrs()
1367 return __iommu_alloc_atomic(dev, size, handle); in arm_iommu_alloc_attrs()
1378 pages = __iommu_alloc_buffer(dev, size, gfp, attrs); in arm_iommu_alloc_attrs()
1382 *handle = __iommu_create_mapping(dev, pages, size); in arm_iommu_alloc_attrs()
1389 addr = __iommu_alloc_remap(pages, size, gfp, prot, in arm_iommu_alloc_attrs()
1397 __iommu_remove_mapping(dev, *handle, size); in arm_iommu_alloc_attrs()
1399 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_alloc_attrs()
1404 void *cpu_addr, dma_addr_t dma_addr, size_t size, in arm_iommu_mmap_attrs() argument
1410 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_mmap_attrs()
1440 void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, in arm_iommu_free_attrs() argument
1444 size = PAGE_ALIGN(size); in arm_iommu_free_attrs()
1446 if (__in_atomic_pool(cpu_addr, size)) { in arm_iommu_free_attrs()
1447 __iommu_free_atomic(dev, cpu_addr, handle, size); in arm_iommu_free_attrs()
1458 dma_common_free_remap(cpu_addr, size, in arm_iommu_free_attrs()
1462 __iommu_remove_mapping(dev, handle, size); in arm_iommu_free_attrs()
1463 __iommu_free_buffer(dev, pages, size, attrs); in arm_iommu_free_attrs()
1468 size_t size, struct dma_attrs *attrs) in arm_iommu_get_sgtable() argument
1470 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; in arm_iommu_get_sgtable()
1476 return sg_alloc_table_from_pages(sgt, pages, count, 0, size, in arm_iommu_get_sgtable()
1505 size_t size, dma_addr_t *handle, in __map_sg_chunk() argument
1516 size = PAGE_ALIGN(size); in __map_sg_chunk()
1519 iova_base = iova = __alloc_iova(mapping, size); in __map_sg_chunk()
1523 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { in __map_sg_chunk()
1544 __free_iova(mapping, iova_base, size); in __map_sg_chunk()
1555 unsigned int size = s->offset + s->length; in __iommu_map_sg() local
1564 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { in __iommu_map_sg()
1565 if (__map_sg_chunk(dev, start, size, &dma->dma_address, in __iommu_map_sg()
1570 dma->dma_length = size - offset; in __iommu_map_sg()
1572 size = offset = s->offset; in __iommu_map_sg()
1577 size += s->length; in __iommu_map_sg()
1579 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, in __iommu_map_sg()
1584 dma->dma_length = size - offset; in __iommu_map_sg()
1727 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_coherent_iommu_map_page() argument
1732 int ret, prot, len = PAGE_ALIGN(size + offset); in arm_coherent_iommu_map_page()
1761 unsigned long offset, size_t size, enum dma_data_direction dir, in arm_iommu_map_page() argument
1765 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1767 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); in arm_iommu_map_page()
1780 size_t size, enum dma_data_direction dir, in arm_coherent_iommu_unmap_page() argument
1786 int len = PAGE_ALIGN(size + offset); in arm_coherent_iommu_unmap_page()
1805 size_t size, enum dma_data_direction dir, in arm_iommu_unmap_page() argument
1812 int len = PAGE_ALIGN(size + offset); in arm_iommu_unmap_page()
1818 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1825 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_cpu() argument
1835 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1839 dma_addr_t handle, size_t size, enum dma_data_direction dir) in arm_iommu_sync_single_for_device() argument
1849 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()
1900 arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size) in arm_iommu_create_mapping() argument
1902 unsigned int bits = size >> PAGE_SHIFT; in arm_iommu_create_mapping()
1909 if (size > DMA_BIT_MASK(32) + 1) in arm_iommu_create_mapping()
2075 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, in arm_setup_iommu_dma_ops() argument
2083 mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); in arm_setup_iommu_dma_ops()
2086 size, dev_name(dev)); in arm_setup_iommu_dma_ops()
2113 static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, in arm_setup_iommu_dma_ops() argument
2130 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, in arch_setup_dma_ops() argument
2136 if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) in arch_setup_dma_ops()