Lines Matching refs:page
56 static void __dma_page_cpu_to_dev(struct page *, unsigned long,
58 static void __dma_page_dev_to_cpu(struct page *, unsigned long,
75 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, in arm_dma_map_page() argument
80 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_map_page()
81 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_dma_map_page()
84 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, in arm_coherent_dma_map_page() argument
88 return pfn_to_dma(dev, page_to_pfn(page)) + offset; in arm_coherent_dma_map_page()
118 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_cpu() local
119 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_dma_sync_single_for_cpu()
126 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); in arm_dma_sync_single_for_device() local
127 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_dma_sync_single_for_device()
224 static void __dma_clear_buffer(struct page *page, size_t size) in __dma_clear_buffer() argument
230 if (PageHighMem(page)) { in __dma_clear_buffer()
231 phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); in __dma_clear_buffer()
234 void *ptr = kmap_atomic(page); in __dma_clear_buffer()
238 page++; in __dma_clear_buffer()
243 void *ptr = page_address(page); in __dma_clear_buffer()
254 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) in __dma_alloc_buffer()
257 struct page *page, *p, *e; in __dma_alloc_buffer() local
259 page = alloc_pages(gfp, order); in __dma_alloc_buffer()
260 if (!page) in __dma_alloc_buffer()
266 split_page(page, order); in __dma_alloc_buffer()
267 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) in __dma_alloc_buffer()
270 __dma_clear_buffer(page, size); in __dma_alloc_buffer()
272 return page; in __dma_alloc_buffer()
278 static void __dma_free_buffer(struct page *page, size_t size) in __dma_free_buffer() argument
280 struct page *e = page + (size >> PAGE_SHIFT); in __dma_free_buffer()
282 while (page < e) { in __dma_free_buffer()
283 __free_page(page); in __dma_free_buffer()
284 page++; in __dma_free_buffer()
291 pgprot_t prot, struct page **ret_page,
295 pgprot_t prot, struct page **ret_page,
299 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, in __dma_alloc_remap() argument
306 return dma_common_contiguous_remap(page, size, in __dma_alloc_remap()
351 struct page *page; in atomic_pool_init() local
360 &page, atomic_pool_init, true); in atomic_pool_init()
363 &page, atomic_pool_init, true); in atomic_pool_init()
368 page_to_phys(page), in atomic_pool_init()
452 struct page *page = virt_to_page(addr); in __dma_update_pte() local
455 set_pte_ext(pte, mk_pte(page, prot), 0); in __dma_update_pte()
459 static void __dma_remap(struct page *page, size_t size, pgprot_t prot) in __dma_remap() argument
461 unsigned long start = (unsigned long) page_address(page); in __dma_remap()
469 pgprot_t prot, struct page **ret_page, in __alloc_remap_buffer()
472 struct page *page; in __alloc_remap_buffer() local
474 page = __dma_alloc_buffer(dev, size, gfp); in __alloc_remap_buffer()
475 if (!page) in __alloc_remap_buffer()
480 ptr = __dma_alloc_remap(page, size, gfp, prot, caller); in __alloc_remap_buffer()
482 __dma_free_buffer(page, size); in __alloc_remap_buffer()
487 *ret_page = page; in __alloc_remap_buffer()
491 static void *__alloc_from_pool(size_t size, struct page **ret_page) in __alloc_from_pool()
528 pgprot_t prot, struct page **ret_page, in __alloc_from_contiguous()
533 struct page *page; in __alloc_from_contiguous() local
536 page = dma_alloc_from_contiguous(dev, count, order); in __alloc_from_contiguous()
537 if (!page) in __alloc_from_contiguous()
540 __dma_clear_buffer(page, size); in __alloc_from_contiguous()
545 if (PageHighMem(page)) { in __alloc_from_contiguous()
546 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); in __alloc_from_contiguous()
548 dma_release_from_contiguous(dev, page, count); in __alloc_from_contiguous()
552 __dma_remap(page, size, prot); in __alloc_from_contiguous()
553 ptr = page_address(page); in __alloc_from_contiguous()
557 *ret_page = page; in __alloc_from_contiguous()
561 static void __free_from_contiguous(struct device *dev, struct page *page, in __free_from_contiguous() argument
565 if (PageHighMem(page)) in __free_from_contiguous()
568 __dma_remap(page, size, PAGE_KERNEL); in __free_from_contiguous()
570 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); in __free_from_contiguous()
592 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0) argument
598 struct page **ret_page) in __alloc_simple_buffer()
600 struct page *page; in __alloc_simple_buffer() local
601 page = __dma_alloc_buffer(dev, size, gfp); in __alloc_simple_buffer()
602 if (!page) in __alloc_simple_buffer()
605 *ret_page = page; in __alloc_simple_buffer()
606 return page_address(page); in __alloc_simple_buffer()
616 struct page *page = NULL; in __dma_alloc() local
649 addr = __alloc_simple_buffer(dev, size, gfp, &page); in __dma_alloc()
651 addr = __alloc_from_pool(size, &page); in __dma_alloc()
653 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller, want_vaddr); in __dma_alloc()
655 addr = __alloc_from_contiguous(dev, size, prot, &page, caller, want_vaddr); in __dma_alloc()
657 if (page) in __dma_alloc()
658 *handle = pfn_to_dma(dev, page_to_pfn(page)); in __dma_alloc()
660 return want_vaddr ? addr : page; in __dma_alloc()
730 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); in __arm_dma_free() local
739 __dma_free_buffer(page, size); in __arm_dma_free()
745 __dma_free_buffer(page, size); in __arm_dma_free()
751 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr); in __arm_dma_free()
771 struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); in arm_dma_get_sgtable() local
778 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in arm_dma_get_sgtable()
782 static void dma_cache_maint_page(struct page *page, unsigned long offset, in dma_cache_maint_page() argument
789 pfn = page_to_pfn(page) + offset / PAGE_SIZE; in dma_cache_maint_page()
802 page = pfn_to_page(pfn); in dma_cache_maint_page()
804 if (PageHighMem(page)) { in dma_cache_maint_page()
809 vaddr = kmap_atomic(page); in dma_cache_maint_page()
813 vaddr = kmap_high_get(page); in dma_cache_maint_page()
816 kunmap_high(page); in dma_cache_maint_page()
820 vaddr = page_address(page) + offset; in dma_cache_maint_page()
835 static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, in __dma_page_cpu_to_dev() argument
840 dma_cache_maint_page(page, off, size, dir, dmac_map_area); in __dma_page_cpu_to_dev()
842 paddr = page_to_phys(page) + off; in __dma_page_cpu_to_dev()
851 static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, in __dma_page_dev_to_cpu() argument
854 phys_addr_t paddr = page_to_phys(page) + off; in __dma_page_dev_to_cpu()
861 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); in __dma_page_dev_to_cpu()
871 pfn = page_to_pfn(page) + off / PAGE_SIZE; in __dma_page_dev_to_cpu()
878 page = pfn_to_page(pfn++); in __dma_page_dev_to_cpu()
879 set_bit(PG_dcache_clean, &page->flags); in __dma_page_dev_to_cpu()
1115 static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, in __iommu_alloc_buffer()
1118 struct page **pages; in __iommu_alloc_buffer()
1120 int array_size = count * sizeof(struct page *); in __iommu_alloc_buffer()
1133 struct page *page; in __iommu_alloc_buffer() local
1135 page = dma_alloc_from_contiguous(dev, count, order); in __iommu_alloc_buffer()
1136 if (!page) in __iommu_alloc_buffer()
1139 __dma_clear_buffer(page, size); in __iommu_alloc_buffer()
1142 pages[i] = page + i; in __iommu_alloc_buffer()
1200 static int __iommu_free_buffer(struct device *dev, struct page **pages, in __iommu_free_buffer()
1204 int array_size = count * sizeof(struct page *); in __iommu_free_buffer()
1226 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, in __iommu_alloc_remap()
1237 __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) in __iommu_create_mapping()
1289 static struct page **__atomic_get_pages(void *addr) in __atomic_get_pages()
1291 struct page *page; in __atomic_get_pages() local
1295 page = phys_to_page(phys); in __atomic_get_pages()
1297 return (struct page **)page; in __atomic_get_pages()
1300 static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) in __iommu_get_pages()
1319 struct page *page; in __iommu_alloc_atomic() local
1322 addr = __alloc_from_pool(size, &page); in __iommu_alloc_atomic()
1326 *handle = __iommu_create_mapping(dev, &page, size); in __iommu_alloc_atomic()
1348 struct page **pages; in arm_iommu_alloc_attrs()
1397 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_mmap_attrs()
1431 struct page **pages; in arm_iommu_free_attrs()
1459 struct page **pages = __iommu_get_pages(cpu_addr, attrs); in arm_iommu_get_sgtable()
1714 static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, in arm_coherent_iommu_map_page() argument
1728 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); in arm_coherent_iommu_map_page()
1748 static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, in arm_iommu_map_page() argument
1753 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_map_page()
1755 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); in arm_iommu_map_page()
1798 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_unmap_page() local
1806 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_unmap_page()
1817 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_cpu() local
1823 __dma_page_dev_to_cpu(page, offset, size, dir); in arm_iommu_sync_single_for_cpu()
1831 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); in arm_iommu_sync_single_for_device() local
1837 __dma_page_cpu_to_dev(page, offset, size, dir); in arm_iommu_sync_single_for_device()