1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/bootmem.h>
4#include <linux/gfp.h>
5#include <linux/highmem.h>
6#include <linux/export.h>
7#include <linux/memblock.h>
8#include <linux/of_address.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/dma-mapping.h>
12#include <linux/vmalloc.h>
13#include <linux/swiotlb.h>
14
15#include <xen/xen.h>
16#include <xen/interface/grant_table.h>
17#include <xen/interface/memory.h>
18#include <xen/swiotlb-xen.h>
19
20#include <asm/cacheflush.h>
21#include <asm/xen/page.h>
22#include <asm/xen/hypercall.h>
23#include <asm/xen/interface.h>
24
25unsigned long xen_get_swiotlb_free_pages(unsigned int order)
26{
27	struct memblock_region *reg;
28	gfp_t flags = __GFP_NOWARN;
29
30	for_each_memblock(memory, reg) {
31		if (reg->base < (phys_addr_t)0xffffffff) {
32			flags |= __GFP_DMA;
33			break;
34		}
35	}
36	return __get_free_pages(flags, order);
37}
38
39enum dma_cache_op {
40       DMA_UNMAP,
41       DMA_MAP,
42};
43static bool hypercall_cflush = false;
44
45/* functions called by SWIOTLB */
46
47static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
48	size_t size, enum dma_data_direction dir, enum dma_cache_op op)
49{
50	struct gnttab_cache_flush cflush;
51	unsigned long pfn;
52	size_t left = size;
53
54	pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
55	offset %= PAGE_SIZE;
56
57	do {
58		size_t len = left;
59
60		/* buffers in highmem or foreign pages cannot cross page
61		 * boundaries */
62		if (len + offset > PAGE_SIZE)
63			len = PAGE_SIZE - offset;
64
65		cflush.op = 0;
66		cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
67		cflush.offset = offset;
68		cflush.length = len;
69
70		if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
71			cflush.op = GNTTAB_CACHE_INVAL;
72		if (op == DMA_MAP) {
73			if (dir == DMA_FROM_DEVICE)
74				cflush.op = GNTTAB_CACHE_INVAL;
75			else
76				cflush.op = GNTTAB_CACHE_CLEAN;
77		}
78		if (cflush.op)
79			HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
80
81		offset = 0;
82		pfn++;
83		left -= len;
84	} while (left);
85}
86
87static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
88		size_t size, enum dma_data_direction dir)
89{
90	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
91}
92
93static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
94		size_t size, enum dma_data_direction dir)
95{
96	dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
97}
98
99void __xen_dma_map_page(struct device *hwdev, struct page *page,
100	     dma_addr_t dev_addr, unsigned long offset, size_t size,
101	     enum dma_data_direction dir, struct dma_attrs *attrs)
102{
103	if (is_device_dma_coherent(hwdev))
104		return;
105	if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
106		return;
107
108	__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
109}
110
111void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
112		size_t size, enum dma_data_direction dir,
113		struct dma_attrs *attrs)
114
115{
116	if (is_device_dma_coherent(hwdev))
117		return;
118	if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
119		return;
120
121	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
122}
123
124void __xen_dma_sync_single_for_cpu(struct device *hwdev,
125		dma_addr_t handle, size_t size, enum dma_data_direction dir)
126{
127	if (is_device_dma_coherent(hwdev))
128		return;
129	__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
130}
131
132void __xen_dma_sync_single_for_device(struct device *hwdev,
133		dma_addr_t handle, size_t size, enum dma_data_direction dir)
134{
135	if (is_device_dma_coherent(hwdev))
136		return;
137	__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
138}
139
140bool xen_arch_need_swiotlb(struct device *dev,
141			   unsigned long pfn,
142			   unsigned long mfn)
143{
144	return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
145}
146
147int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
148				 unsigned int address_bits,
149				 dma_addr_t *dma_handle)
150{
151	if (!xen_initial_domain())
152		return -EINVAL;
153
154	/* we assume that dom0 is mapped 1:1 for now */
155	*dma_handle = pstart;
156	return 0;
157}
158EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
159
160void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order)
161{
162	return;
163}
164EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
165
166struct dma_map_ops *xen_dma_ops;
167EXPORT_SYMBOL(xen_dma_ops);
168
169static struct dma_map_ops xen_swiotlb_dma_ops = {
170	.mapping_error = xen_swiotlb_dma_mapping_error,
171	.alloc = xen_swiotlb_alloc_coherent,
172	.free = xen_swiotlb_free_coherent,
173	.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
174	.sync_single_for_device = xen_swiotlb_sync_single_for_device,
175	.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
176	.sync_sg_for_device = xen_swiotlb_sync_sg_for_device,
177	.map_sg = xen_swiotlb_map_sg_attrs,
178	.unmap_sg = xen_swiotlb_unmap_sg_attrs,
179	.map_page = xen_swiotlb_map_page,
180	.unmap_page = xen_swiotlb_unmap_page,
181	.dma_supported = xen_swiotlb_dma_supported,
182	.set_dma_mask = xen_swiotlb_set_dma_mask,
183};
184
185int __init xen_mm_init(void)
186{
187	struct gnttab_cache_flush cflush;
188	if (!xen_initial_domain())
189		return 0;
190	xen_swiotlb_init(1, false);
191	xen_dma_ops = &xen_swiotlb_dma_ops;
192
193	cflush.op = 0;
194	cflush.a.dev_bus_addr = 0;
195	cflush.offset = 0;
196	cflush.length = 0;
197	if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
198		hypercall_cflush = true;
199	return 0;
200}
201arch_initcall(xen_mm_init);
202