1#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2#define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8void __xen_dma_map_page(struct device *hwdev, struct page *page,
9	     dma_addr_t dev_addr, unsigned long offset, size_t size,
10	     enum dma_data_direction dir, struct dma_attrs *attrs);
11void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12		size_t size, enum dma_data_direction dir,
13		struct dma_attrs *attrs);
14void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15		dma_addr_t handle, size_t size, enum dma_data_direction dir);
16
17void __xen_dma_sync_single_for_device(struct device *hwdev,
18		dma_addr_t handle, size_t size, enum dma_data_direction dir);
19
20static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
21		dma_addr_t *dma_handle, gfp_t flags,
22		struct dma_attrs *attrs)
23{
24	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
25}
26
27static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
28		void *cpu_addr, dma_addr_t dma_handle,
29		struct dma_attrs *attrs)
30{
31	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
32}
33
34static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35	     dma_addr_t dev_addr, unsigned long offset, size_t size,
36	     enum dma_data_direction dir, struct dma_attrs *attrs)
37{
38	unsigned long page_pfn = page_to_xen_pfn(page);
39	unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
40	unsigned long compound_pages =
41		(1<<compound_order(page)) * XEN_PFN_PER_PAGE;
42	bool local = (page_pfn <= dev_pfn) &&
43		(dev_pfn - page_pfn < compound_pages);
44
45	/*
46	 * Dom0 is mapped 1:1, while the Linux page can span across
47	 * multiple Xen pages, it's not possible for it to contain a
48	 * mix of local and foreign Xen pages. So if the first xen_pfn
49	 * == mfn the page is local otherwise it's a foreign page
50	 * grant-mapped in dom0. If the page is local we can safely
51	 * call the native dma_ops function, otherwise we call the xen
52	 * specific function.
53	 */
54	if (local)
55		__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
56	else
57		__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
58}
59
60static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
61		size_t size, enum dma_data_direction dir,
62		struct dma_attrs *attrs)
63{
64	unsigned long pfn = PFN_DOWN(handle);
65	/*
66	 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
67	 * multiple Xen page, it's not possible to have a mix of local and
68	 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
69	 * foreign mfn will always return false. If the page is local we can
70	 * safely call the native dma_ops function, otherwise we call the xen
71	 * specific function.
72	 */
73	if (pfn_valid(pfn)) {
74		if (__generic_dma_ops(hwdev)->unmap_page)
75			__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
76	} else
77		__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
78}
79
80static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
81		dma_addr_t handle, size_t size, enum dma_data_direction dir)
82{
83	unsigned long pfn = PFN_DOWN(handle);
84	if (pfn_valid(pfn)) {
85		if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
86			__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
87	} else
88		__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
89}
90
91static inline void xen_dma_sync_single_for_device(struct device *hwdev,
92		dma_addr_t handle, size_t size, enum dma_data_direction dir)
93{
94	unsigned long pfn = PFN_DOWN(handle);
95	if (pfn_valid(pfn)) {
96		if (__generic_dma_ops(hwdev)->sync_single_for_device)
97			__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
98	} else
99		__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
100}
101
102#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
103