Lines Matching refs:page

57 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
59 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
76 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, in arm_dma_map_page() argument
81 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page()
82 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_dma_map_page()
85 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, in arm_coherent_dma_map_page() argument
89 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_coherent_dma_map_page()
119 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_cpu() local
120 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu()
127 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_device() local
128 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_sync_single_for_device()
228 static void __dma_clear_buffer(struct page *page, size_t size) in __dma_clear_buffer() argument
234 if (PageHighMem(page)) { in __dma_clear_buffer()
235 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer()
238 void *ptr = kmap_atomic(page); in __dma_clear_buffer()
242 page++; in __dma_clear_buffer()
247 void *ptr = page_address(page); in __dma_clear_buffer()
258 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) in __dma_alloc_buffer()
261 struct page *page, *p, *e; in __dma_alloc_buffer() local
263 page = alloc_pages(gfp, order); in __dma_alloc_buffer()
264 if (!page) in __dma_alloc_buffer()
270 split_page(page, order); in __dma_alloc_buffer()
271 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
274 __dma_clear_buffer(page, size); in __dma_alloc_buffer()
276 return page; in __dma_alloc_buffer()
282 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
284 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
286 while (page < e) { in __dma_free_buffer()
287 __free_page(page); in __dma_free_buffer()
288 page++; in __dma_free_buffer()
295 pgprot_t prot, struct page **ret_page,
299 pgprot_t prot, struct page **ret_page,
303 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, in __dma_alloc_remap() argument
310 return dma_common_contiguous_remap(page, size, in __dma_alloc_remap()
355 struct page *page; in atomic_pool_init() local
364 &page, atomic_pool_init, true); in atomic_pool_init()
367 &page, atomic_pool_init, true); in atomic_pool_init()
372 page_to_phys(page), in atomic_pool_init()
456 struct page *page = virt_to_page(addr); in __dma_update_pte() local
459 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
463 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
465 unsigned long start = (unsigned long) page_address(page); in __dma_remap()
473 pgprot_t prot, struct page **ret_page, in __alloc_remap_buffer()
476 struct page *page; in __alloc_remap_buffer() local
478 page = __dma_alloc_buffer(dev, size, gfp); in __alloc_remap_buffer()
479 if (!page) in __alloc_remap_buffer()
484 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); in __alloc_remap_buffer()
486 __dma_free_buffer(page, size); in __alloc_remap_buffer()
491 *ret_page = page; in __alloc_remap_buffer()
495 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool()
532 pgprot_t prot, struct page **ret_page, in __alloc_from_contiguous()
537 struct page *page; in __alloc_from_contiguous() local
540 page = dma_alloc_from_contiguous(dev, count, order); in __alloc_from_contiguous()
541 if (!page) in __alloc_from_contiguous()
544 __dma_clear_buffer(page, size); in __alloc_from_contiguous()
549 if (PageHighMem(page)) { in __alloc_from_contiguous()
550 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); in __alloc_from_contiguous()
552 dma_release_from_contiguous(dev, page, count); in __alloc_from_contiguous()
556 __dma_remap(page, size, prot); in __alloc_from_contiguous()
557 ptr = page_address(page); in __alloc_from_contiguous()
561 *ret_page = page; in __alloc_from_contiguous()
565 static void __free_from_contiguous(struct device *dev, struct page *page, in __free_from_contiguous() argument
569 if (PageHighMem(page)) in __free_from_contiguous()
572 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
574 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
596 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) argument
602 struct page **ret_page) in __alloc_simple_buffer()
604 struct page *page; in __alloc_simple_buffer() local
605 page = __dma_alloc_buffer(dev, size, gfp); in __alloc_simple_buffer()
606 if (!page) in __alloc_simple_buffer()
609 *ret_page = page; in __alloc_simple_buffer()
610 return page_address(page); in __alloc_simple_buffer()
620 struct page *page = NULL; in __dma_alloc() local
653 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __dma_alloc()
655 addr = __alloc_from_contiguous(dev, size, prot, &page, in __dma_alloc()
658 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __dma_alloc()
660 addr = __alloc_from_pool(size, &page); in __dma_alloc()
662 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, in __dma_alloc()
665 if (page) in __dma_alloc()
666 *handle = pfn_to_dma(dev, page_to_pfn(page)); in __dma_alloc()
668 return want_vaddr ? addr : page; in __dma_alloc()
743 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); in __arm_dma_free() local
749 __dma_free_buffer(page, size); in __arm_dma_free()
755 __dma_free_buffer(page, size); in __arm_dma_free()
761 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); in __arm_dma_free()
781 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); in arm_dma_get_sgtable() local
788 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in arm_dma_get_sgtable()
792 static void dma_cache_maint_page(struct page *page, unsigned long offset, in dma_cache_maint_page() argument
799 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page()
812 page = pfn_to_page(pfn); in dma_cache_maint_page()
814 if (PageHighMem(page)) { in dma_cache_maint_page()
819 vaddr = kmap_atomic(page); in dma_cache_maint_page()
823 vaddr = kmap_high_get(page); in dma_cache_maint_page()
826 kunmap_high(page); in dma_cache_maint_page()
830 vaddr = page_address(page) + offset; in dma_cache_maint_page()
845 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, in __dma_page_cpu_to_dev() argument
850 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
852 paddr = page_to_phys(page) + off; in __dma_page_cpu_to_dev()
861 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, in __dma_page_dev_to_cpu() argument
864 phys_addr_t paddr = page_to_phys(page) + off; in __dma_page_dev_to_cpu()
871 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
881 pfn = page_to_pfn(page) + off / PAGE_SIZE; in __dma_page_dev_to_cpu()
888 page = pfn_to_page(pfn++); in __dma_page_dev_to_cpu()
889 set_bit(PG_dcache_clean, &page->flags); in __dma_page_dev_to_cpu()
1125 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer()
1128 struct page **pages; in __iommu_alloc_buffer()
1130 int array_size = count * sizeof(struct page *); in __iommu_alloc_buffer()
1143 struct page *page; in __iommu_alloc_buffer() local
1145 page = dma_alloc_from_contiguous(dev, count, order); in __iommu_alloc_buffer()
1146 if (!page) in __iommu_alloc_buffer()
1149 __dma_clear_buffer(page, size); in __iommu_alloc_buffer()
1152 pages[i] = page + i; in __iommu_alloc_buffer()
1210 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer()
1214 int array_size = count * sizeof(struct page *); in __iommu_free_buffer()
1236 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, in __iommu_alloc_remap()
1247 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) in __iommu_create_mapping()
1301 static struct page **__atomic_get_pages(void *addr) in __atomic_get_pages()
1303 struct page *page; in __atomic_get_pages() local
1307 page = phys_to_page(phys); in __atomic_get_pages()
1309 return (struct page **)page; in __atomic_get_pages()
1312 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) in __iommu_get_pages()
1331 struct page *page; in __iommu_alloc_atomic() local
1334 addr = __alloc_from_pool(size, &page); in __iommu_alloc_atomic()
1338 *handle = __iommu_create_mapping(dev, &page, size); in __iommu_alloc_atomic()
1360 struct page **pages; in arm_iommu_alloc_attrs()
1409 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_mmap_attrs()
1443 struct page **pages; in arm_iommu_free_attrs()
1471 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable()
1726 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, in arm_coherent_iommu_map_page() argument
1740 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); in arm_coherent_iommu_map_page()
1760 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, in arm_iommu_map_page() argument
1765 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1767 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); in arm_iommu_map_page()
1810 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page() local
1818 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1829 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu() local
1835 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1843 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device() local
1849 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()