1 #ifndef _ASM_GENERIC_DMA_MAPPING_H
2 #define _ASM_GENERIC_DMA_MAPPING_H
3 
4 #include <linux/kmemcheck.h>
5 #include <linux/bug.h>
6 #include <linux/scatterlist.h>
7 #include <linux/dma-debug.h>
8 #include <linux/dma-attrs.h>
9 
dma_map_single_attrs(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)10 static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
11 					      size_t size,
12 					      enum dma_data_direction dir,
13 					      struct dma_attrs *attrs)
14 {
15 	struct dma_map_ops *ops = get_dma_ops(dev);
16 	dma_addr_t addr;
17 
18 	kmemcheck_mark_initialized(ptr, size);
19 	BUG_ON(!valid_dma_direction(dir));
20 	addr = ops->map_page(dev, virt_to_page(ptr),
21 			     (unsigned long)ptr & ~PAGE_MASK, size,
22 			     dir, attrs);
23 	debug_dma_map_page(dev, virt_to_page(ptr),
24 			   (unsigned long)ptr & ~PAGE_MASK, size,
25 			   dir, addr, true);
26 	return addr;
27 }
28 
dma_unmap_single_attrs(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir,struct dma_attrs * attrs)29 static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
30 					  size_t size,
31 					  enum dma_data_direction dir,
32 					  struct dma_attrs *attrs)
33 {
34 	struct dma_map_ops *ops = get_dma_ops(dev);
35 
36 	BUG_ON(!valid_dma_direction(dir));
37 	if (ops->unmap_page)
38 		ops->unmap_page(dev, addr, size, dir, attrs);
39 	debug_dma_unmap_page(dev, addr, size, dir, true);
40 }
41 
42 /*
43  * dma_maps_sg_attrs returns 0 on error and > 0 on success.
44  * It should never return a value < 0.
45  */
dma_map_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)46 static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
47 				   int nents, enum dma_data_direction dir,
48 				   struct dma_attrs *attrs)
49 {
50 	struct dma_map_ops *ops = get_dma_ops(dev);
51 	int i, ents;
52 	struct scatterlist *s;
53 
54 	for_each_sg(sg, s, nents, i)
55 		kmemcheck_mark_initialized(sg_virt(s), s->length);
56 	BUG_ON(!valid_dma_direction(dir));
57 	ents = ops->map_sg(dev, sg, nents, dir, attrs);
58 	BUG_ON(ents < 0);
59 	debug_dma_map_sg(dev, sg, nents, ents, dir);
60 
61 	return ents;
62 }
63 
dma_unmap_sg_attrs(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir,struct dma_attrs * attrs)64 static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
65 				      int nents, enum dma_data_direction dir,
66 				      struct dma_attrs *attrs)
67 {
68 	struct dma_map_ops *ops = get_dma_ops(dev);
69 
70 	BUG_ON(!valid_dma_direction(dir));
71 	debug_dma_unmap_sg(dev, sg, nents, dir);
72 	if (ops->unmap_sg)
73 		ops->unmap_sg(dev, sg, nents, dir, attrs);
74 }
75 
dma_map_page(struct device * dev,struct page * page,size_t offset,size_t size,enum dma_data_direction dir)76 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
77 				      size_t offset, size_t size,
78 				      enum dma_data_direction dir)
79 {
80 	struct dma_map_ops *ops = get_dma_ops(dev);
81 	dma_addr_t addr;
82 
83 	kmemcheck_mark_initialized(page_address(page) + offset, size);
84 	BUG_ON(!valid_dma_direction(dir));
85 	addr = ops->map_page(dev, page, offset, size, dir, NULL);
86 	debug_dma_map_page(dev, page, offset, size, dir, addr, false);
87 
88 	return addr;
89 }
90 
dma_unmap_page(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)91 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
92 				  size_t size, enum dma_data_direction dir)
93 {
94 	struct dma_map_ops *ops = get_dma_ops(dev);
95 
96 	BUG_ON(!valid_dma_direction(dir));
97 	if (ops->unmap_page)
98 		ops->unmap_page(dev, addr, size, dir, NULL);
99 	debug_dma_unmap_page(dev, addr, size, dir, false);
100 }
101 
dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)102 static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
103 					   size_t size,
104 					   enum dma_data_direction dir)
105 {
106 	struct dma_map_ops *ops = get_dma_ops(dev);
107 
108 	BUG_ON(!valid_dma_direction(dir));
109 	if (ops->sync_single_for_cpu)
110 		ops->sync_single_for_cpu(dev, addr, size, dir);
111 	debug_dma_sync_single_for_cpu(dev, addr, size, dir);
112 }
113 
dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)114 static inline void dma_sync_single_for_device(struct device *dev,
115 					      dma_addr_t addr, size_t size,
116 					      enum dma_data_direction dir)
117 {
118 	struct dma_map_ops *ops = get_dma_ops(dev);
119 
120 	BUG_ON(!valid_dma_direction(dir));
121 	if (ops->sync_single_for_device)
122 		ops->sync_single_for_device(dev, addr, size, dir);
123 	debug_dma_sync_single_for_device(dev, addr, size, dir);
124 }
125 
dma_sync_single_range_for_cpu(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)126 static inline void dma_sync_single_range_for_cpu(struct device *dev,
127 						 dma_addr_t addr,
128 						 unsigned long offset,
129 						 size_t size,
130 						 enum dma_data_direction dir)
131 {
132 	const struct dma_map_ops *ops = get_dma_ops(dev);
133 
134 	BUG_ON(!valid_dma_direction(dir));
135 	if (ops->sync_single_for_cpu)
136 		ops->sync_single_for_cpu(dev, addr + offset, size, dir);
137 	debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
138 }
139 
dma_sync_single_range_for_device(struct device * dev,dma_addr_t addr,unsigned long offset,size_t size,enum dma_data_direction dir)140 static inline void dma_sync_single_range_for_device(struct device *dev,
141 						    dma_addr_t addr,
142 						    unsigned long offset,
143 						    size_t size,
144 						    enum dma_data_direction dir)
145 {
146 	const struct dma_map_ops *ops = get_dma_ops(dev);
147 
148 	BUG_ON(!valid_dma_direction(dir));
149 	if (ops->sync_single_for_device)
150 		ops->sync_single_for_device(dev, addr + offset, size, dir);
151 	debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
152 }
153 
154 static inline void
dma_sync_sg_for_cpu(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)155 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
156 		    int nelems, enum dma_data_direction dir)
157 {
158 	struct dma_map_ops *ops = get_dma_ops(dev);
159 
160 	BUG_ON(!valid_dma_direction(dir));
161 	if (ops->sync_sg_for_cpu)
162 		ops->sync_sg_for_cpu(dev, sg, nelems, dir);
163 	debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
164 }
165 
166 static inline void
dma_sync_sg_for_device(struct device * dev,struct scatterlist * sg,int nelems,enum dma_data_direction dir)167 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
168 		       int nelems, enum dma_data_direction dir)
169 {
170 	struct dma_map_ops *ops = get_dma_ops(dev);
171 
172 	BUG_ON(!valid_dma_direction(dir));
173 	if (ops->sync_sg_for_device)
174 		ops->sync_sg_for_device(dev, sg, nelems, dir);
175 	debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
176 
177 }
178 
179 #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
180 #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
181 #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
182 #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
183 
184 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
185 			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
186 
187 void *dma_common_contiguous_remap(struct page *page, size_t size,
188 			unsigned long vm_flags,
189 			pgprot_t prot, const void *caller);
190 
191 void *dma_common_pages_remap(struct page **pages, size_t size,
192 			unsigned long vm_flags, pgprot_t prot,
193 			const void *caller);
194 void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
195 
196 /**
197  * dma_mmap_attrs - map a coherent DMA allocation into user space
198  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
199  * @vma: vm_area_struct describing requested user mapping
200  * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
201  * @handle: device-view address returned from dma_alloc_attrs
202  * @size: size of memory originally requested in dma_alloc_attrs
203  * @attrs: attributes of mapping properties requested in dma_alloc_attrs
204  *
205  * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
206  * into user space.  The coherent DMA buffer must not be freed by the
207  * driver until the user space mapping has been released.
208  */
209 static inline int
dma_mmap_attrs(struct device * dev,struct vm_area_struct * vma,void * cpu_addr,dma_addr_t dma_addr,size_t size,struct dma_attrs * attrs)210 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
211 	       dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
212 {
213 	struct dma_map_ops *ops = get_dma_ops(dev);
214 	BUG_ON(!ops);
215 	if (ops->mmap)
216 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
217 	return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
218 }
219 
220 #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
221 
222 int
223 dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
224 		       void *cpu_addr, dma_addr_t dma_addr, size_t size);
225 
226 static inline int
dma_get_sgtable_attrs(struct device * dev,struct sg_table * sgt,void * cpu_addr,dma_addr_t dma_addr,size_t size,struct dma_attrs * attrs)227 dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
228 		      dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
229 {
230 	struct dma_map_ops *ops = get_dma_ops(dev);
231 	BUG_ON(!ops);
232 	if (ops->get_sgtable)
233 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
234 					attrs);
235 	return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
236 }
237 
238 #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
239 
240 #endif
241