1 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2 #define _ASM_ARM_XEN_PAGE_COHERENT_H
3 
4 #include <asm/page.h>
5 #include <linux/dma-attrs.h>
6 #include <linux/dma-mapping.h>
7 
8 void __xen_dma_map_page(struct device *hwdev, struct page *page,
9 	     dma_addr_t dev_addr, unsigned long offset, size_t size,
10 	     enum dma_data_direction dir, struct dma_attrs *attrs);
11 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12 		size_t size, enum dma_data_direction dir,
13 		struct dma_attrs *attrs);
14 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15 		dma_addr_t handle, size_t size, enum dma_data_direction dir);
16 
17 void __xen_dma_sync_single_for_device(struct device *hwdev,
18 		dma_addr_t handle, size_t size, enum dma_data_direction dir);
19 
xen_alloc_coherent_pages(struct device * hwdev,size_t size,dma_addr_t * dma_handle,gfp_t flags,struct dma_attrs * attrs)20 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
21 		dma_addr_t *dma_handle, gfp_t flags,
22 		struct dma_attrs *attrs)
23 {
24 	return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
25 }
26 
xen_free_coherent_pages(struct device * hwdev,size_t size,void * cpu_addr,dma_addr_t dma_handle,struct dma_attrs * attrs)27 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
28 		void *cpu_addr, dma_addr_t dma_handle,
29 		struct dma_attrs *attrs)
30 {
31 	__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
32 }
33 
xen_dma_map_page(struct device * hwdev,struct page * page,dma_addr_t dev_addr,unsigned long offset,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)34 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35 	     dma_addr_t dev_addr, unsigned long offset, size_t size,
36 	     enum dma_data_direction dir, struct dma_attrs *attrs)
37 {
38 	bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
39 	/* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
40 	 * is a foreign page grant-mapped in dom0. If the page is local we
41 	 * can safely call the native dma_ops function, otherwise we call
42 	 * the xen specific function. */
43 	if (local)
44 		__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
45 	else
46 		__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
47 }
48 
xen_dma_unmap_page(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)49 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
50 		size_t size, enum dma_data_direction dir,
51 		struct dma_attrs *attrs)
52 {
53 	unsigned long pfn = PFN_DOWN(handle);
54 	/* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
55 	 * always return false. If the page is local we can safely call the
56 	 * native dma_ops function, otherwise we call the xen specific
57 	 * function. */
58 	if (pfn_valid(pfn)) {
59 		if (__generic_dma_ops(hwdev)->unmap_page)
60 			__generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
61 	} else
62 		__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
63 }
64 
xen_dma_sync_single_for_cpu(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir)65 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
66 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
67 {
68 	unsigned long pfn = PFN_DOWN(handle);
69 	if (pfn_valid(pfn)) {
70 		if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
71 			__generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
72 	} else
73 		__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
74 }
75 
xen_dma_sync_single_for_device(struct device * hwdev,dma_addr_t handle,size_t size,enum dma_data_direction dir)76 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
77 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
78 {
79 	unsigned long pfn = PFN_DOWN(handle);
80 	if (pfn_valid(pfn)) {
81 		if (__generic_dma_ops(hwdev)->sync_single_for_device)
82 			__generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
83 	} else
84 		__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
85 }
86 
87 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
88