1 #ifndef ASMARM_DMA_MAPPING_H
2 #define ASMARM_DMA_MAPPING_H
3
4 #ifdef __KERNEL__
5
6 #include <linux/mm_types.h>
7 #include <linux/scatterlist.h>
8 #include <linux/dma-attrs.h>
9 #include <linux/dma-debug.h>
10
11 #include <asm-generic/dma-coherent.h>
12 #include <asm/memory.h>
13
14 #include <xen/xen.h>
15 #include <asm/xen/hypervisor.h>
16
17 #define DMA_ERROR_CODE (~0)
18 extern struct dma_map_ops arm_dma_ops;
19 extern struct dma_map_ops arm_coherent_dma_ops;
20
__generic_dma_ops(struct device * dev)21 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
22 {
23 if (dev && dev->archdata.dma_ops)
24 return dev->archdata.dma_ops;
25 return &arm_dma_ops;
26 }
27
get_dma_ops(struct device * dev)28 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
29 {
30 if (xen_initial_domain())
31 return xen_dma_ops;
32 else
33 return __generic_dma_ops(dev);
34 }
35
set_dma_ops(struct device * dev,struct dma_map_ops * ops)36 static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
37 {
38 BUG_ON(!dev);
39 dev->archdata.dma_ops = ops;
40 }
41
42 #include <asm-generic/dma-mapping-common.h>
43
dma_set_mask(struct device * dev,u64 mask)44 static inline int dma_set_mask(struct device *dev, u64 mask)
45 {
46 return get_dma_ops(dev)->set_dma_mask(dev, mask);
47 }
48
49 #ifdef __arch_page_to_dma
50 #error Please update to __arch_pfn_to_dma
51 #endif
52
53 /*
54 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
55 * functions used internally by the DMA-mapping API to provide DMA
56 * addresses. They must not be used by drivers.
57 */
58 #ifndef __arch_pfn_to_dma
pfn_to_dma(struct device * dev,unsigned long pfn)59 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
60 {
61 if (dev)
62 pfn -= dev->dma_pfn_offset;
63 return (dma_addr_t)__pfn_to_bus(pfn);
64 }
65
dma_to_pfn(struct device * dev,dma_addr_t addr)66 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
67 {
68 unsigned long pfn = __bus_to_pfn(addr);
69
70 if (dev)
71 pfn += dev->dma_pfn_offset;
72
73 return pfn;
74 }
75
dma_to_virt(struct device * dev,dma_addr_t addr)76 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
77 {
78 if (dev) {
79 unsigned long pfn = dma_to_pfn(dev, addr);
80
81 return phys_to_virt(__pfn_to_phys(pfn));
82 }
83
84 return (void *)__bus_to_virt((unsigned long)addr);
85 }
86
virt_to_dma(struct device * dev,void * addr)87 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
88 {
89 if (dev)
90 return pfn_to_dma(dev, virt_to_pfn(addr));
91
92 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
93 }
94
95 #else
pfn_to_dma(struct device * dev,unsigned long pfn)96 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
97 {
98 return __arch_pfn_to_dma(dev, pfn);
99 }
100
dma_to_pfn(struct device * dev,dma_addr_t addr)101 static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
102 {
103 return __arch_dma_to_pfn(dev, addr);
104 }
105
dma_to_virt(struct device * dev,dma_addr_t addr)106 static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
107 {
108 return __arch_dma_to_virt(dev, addr);
109 }
110
virt_to_dma(struct device * dev,void * addr)111 static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
112 {
113 return __arch_virt_to_dma(dev, addr);
114 }
115 #endif
116
117 /* The ARM override for dma_max_pfn() */
dma_max_pfn(struct device * dev)118 static inline unsigned long dma_max_pfn(struct device *dev)
119 {
120 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
121 }
122 #define dma_max_pfn(dev) dma_max_pfn(dev)
123
124 #define arch_setup_dma_ops arch_setup_dma_ops
125 extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
126 struct iommu_ops *iommu, bool coherent);
127
128 #define arch_teardown_dma_ops arch_teardown_dma_ops
129 extern void arch_teardown_dma_ops(struct device *dev);
130
131 /* do not use this function in a driver */
is_device_dma_coherent(struct device * dev)132 static inline bool is_device_dma_coherent(struct device *dev)
133 {
134 return dev->archdata.dma_coherent;
135 }
136
phys_to_dma(struct device * dev,phys_addr_t paddr)137 static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
138 {
139 unsigned int offset = paddr & ~PAGE_MASK;
140 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
141 }
142
dma_to_phys(struct device * dev,dma_addr_t dev_addr)143 static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
144 {
145 unsigned int offset = dev_addr & ~PAGE_MASK;
146 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
147 }
148
dma_capable(struct device * dev,dma_addr_t addr,size_t size)149 static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
150 {
151 u64 limit, mask;
152
153 if (!dev->dma_mask)
154 return 0;
155
156 mask = *dev->dma_mask;
157
158 limit = (mask + 1) & ~mask;
159 if (limit && size > limit)
160 return 0;
161
162 if ((addr | (addr + size - 1)) & ~mask)
163 return 0;
164
165 return 1;
166 }
167
dma_mark_clean(void * addr,size_t size)168 static inline void dma_mark_clean(void *addr, size_t size) { }
169
170 /*
171 * DMA errors are defined by all-bits-set in the DMA address.
172 */
dma_mapping_error(struct device * dev,dma_addr_t dma_addr)173 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
174 {
175 debug_dma_mapping_error(dev, dma_addr);
176 return dma_addr == DMA_ERROR_CODE;
177 }
178
179 /*
180 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
181 * function so drivers using this API are highlighted with build warnings.
182 */
dma_alloc_noncoherent(struct device * dev,size_t size,dma_addr_t * handle,gfp_t gfp)183 static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
184 dma_addr_t *handle, gfp_t gfp)
185 {
186 return NULL;
187 }
188
dma_free_noncoherent(struct device * dev,size_t size,void * cpu_addr,dma_addr_t handle)189 static inline void dma_free_noncoherent(struct device *dev, size_t size,
190 void *cpu_addr, dma_addr_t handle)
191 {
192 }
193
194 extern int dma_supported(struct device *dev, u64 mask);
195
196 extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
197
198 /**
199 * arm_dma_alloc - allocate consistent memory for DMA
200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
201 * @size: required memory size
202 * @handle: bus-specific DMA address
203 * @attrs: optinal attributes that specific mapping properties
204 *
205 * Allocate some memory for a device for performing DMA. This function
206 * allocates pages, and will return the CPU-viewed address, and sets @handle
207 * to be the device-viewed address.
208 */
209 extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
210 gfp_t gfp, struct dma_attrs *attrs);
211
212 #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
213
dma_alloc_attrs(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,struct dma_attrs * attrs)214 static inline void *dma_alloc_attrs(struct device *dev, size_t size,
215 dma_addr_t *dma_handle, gfp_t flag,
216 struct dma_attrs *attrs)
217 {
218 struct dma_map_ops *ops = get_dma_ops(dev);
219 void *cpu_addr;
220 BUG_ON(!ops);
221
222 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
223 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
224 return cpu_addr;
225 }
226
227 /**
228 * arm_dma_free - free memory allocated by arm_dma_alloc
229 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
230 * @size: size of memory originally requested in dma_alloc_coherent
231 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
232 * @handle: device-view address returned from dma_alloc_coherent
233 * @attrs: optinal attributes that specific mapping properties
234 *
235 * Free (and unmap) a DMA buffer previously allocated by
236 * arm_dma_alloc().
237 *
238 * References to memory and mappings associated with cpu_addr/handle
239 * during and after this call executing are illegal.
240 */
241 extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
242 dma_addr_t handle, struct dma_attrs *attrs);
243
244 #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
245
dma_free_attrs(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_handle,struct dma_attrs * attrs)246 static inline void dma_free_attrs(struct device *dev, size_t size,
247 void *cpu_addr, dma_addr_t dma_handle,
248 struct dma_attrs *attrs)
249 {
250 struct dma_map_ops *ops = get_dma_ops(dev);
251 BUG_ON(!ops);
252
253 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
254 ops->free(dev, size, cpu_addr, dma_handle, attrs);
255 }
256
257 /**
258 * arm_dma_mmap - map a coherent DMA allocation into user space
259 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
260 * @vma: vm_area_struct describing requested user mapping
261 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
262 * @handle: device-view address returned from dma_alloc_coherent
263 * @size: size of memory originally requested in dma_alloc_coherent
264 * @attrs: optinal attributes that specific mapping properties
265 *
266 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
267 * into user space. The coherent DMA buffer must not be freed by the
268 * driver until the user space mapping has been released.
269 */
270 extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
271 void *cpu_addr, dma_addr_t dma_addr, size_t size,
272 struct dma_attrs *attrs);
273
274 /*
275 * This can be called during early boot to increase the size of the atomic
276 * coherent DMA pool above the default value of 256KiB. It must be called
277 * before postcore_initcall.
278 */
279 extern void __init init_dma_coherent_pool_size(unsigned long size);
280
281 /*
282 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
283 * and utilize bounce buffers as needed to work around limited DMA windows.
284 *
285 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
286 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
287 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
288 *
289 * The following are helper functions used by the dmabounce subystem
290 *
291 */
292
293 /**
294 * dmabounce_register_dev
295 *
296 * @dev: valid struct device pointer
297 * @small_buf_size: size of buffers to use with small buffer pool
298 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
299 * @needs_bounce_fn: called to determine whether buffer needs bouncing
300 *
301 * This function should be called by low-level platform code to register
302 * a device as requireing DMA buffer bouncing. The function will allocate
303 * appropriate DMA pools for the device.
304 */
305 extern int dmabounce_register_dev(struct device *, unsigned long,
306 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
307
308 /**
309 * dmabounce_unregister_dev
310 *
311 * @dev: valid struct device pointer
312 *
313 * This function should be called by low-level platform code when device
314 * that was previously registered with dmabounce_register_dev is removed
315 * from the system.
316 *
317 */
318 extern void dmabounce_unregister_dev(struct device *);
319
320
321
322 /*
323 * The scatter list versions of the above methods.
324 */
325 extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
326 enum dma_data_direction, struct dma_attrs *attrs);
327 extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
328 enum dma_data_direction, struct dma_attrs *attrs);
329 extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
330 enum dma_data_direction);
331 extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
332 enum dma_data_direction);
333 extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
334 void *cpu_addr, dma_addr_t dma_addr, size_t size,
335 struct dma_attrs *attrs);
336
337 #endif /* __KERNEL__ */
338 #endif
339