1 /*
2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation.
11  */
12 
13 #include <linux/io.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/vmalloc.h>
19 
20 #include <media/videobuf2-core.h>
21 #include <media/videobuf2-vmalloc.h>
22 #include <media/videobuf2-memops.h>
23 
24 struct vb2_vmalloc_buf {
25 	void				*vaddr;
26 	struct page			**pages;
27 	struct vm_area_struct		*vma;
28 	enum dma_data_direction		dma_dir;
29 	unsigned long			size;
30 	unsigned int			n_pages;
31 	atomic_t			refcount;
32 	struct vb2_vmarea_handler	handler;
33 	struct dma_buf			*dbuf;
34 };
35 
36 static void vb2_vmalloc_put(void *buf_priv);
37 
vb2_vmalloc_alloc(void * alloc_ctx,unsigned long size,enum dma_data_direction dma_dir,gfp_t gfp_flags)38 static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size,
39 			       enum dma_data_direction dma_dir, gfp_t gfp_flags)
40 {
41 	struct vb2_vmalloc_buf *buf;
42 
43 	buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
44 	if (!buf)
45 		return NULL;
46 
47 	buf->size = size;
48 	buf->vaddr = vmalloc_user(buf->size);
49 	buf->dma_dir = dma_dir;
50 	buf->handler.refcount = &buf->refcount;
51 	buf->handler.put = vb2_vmalloc_put;
52 	buf->handler.arg = buf;
53 
54 	if (!buf->vaddr) {
55 		pr_debug("vmalloc of size %ld failed\n", buf->size);
56 		kfree(buf);
57 		return NULL;
58 	}
59 
60 	atomic_inc(&buf->refcount);
61 	return buf;
62 }
63 
vb2_vmalloc_put(void * buf_priv)64 static void vb2_vmalloc_put(void *buf_priv)
65 {
66 	struct vb2_vmalloc_buf *buf = buf_priv;
67 
68 	if (atomic_dec_and_test(&buf->refcount)) {
69 		vfree(buf->vaddr);
70 		kfree(buf);
71 	}
72 }
73 
vb2_vmalloc_get_userptr(void * alloc_ctx,unsigned long vaddr,unsigned long size,enum dma_data_direction dma_dir)74 static void *vb2_vmalloc_get_userptr(void *alloc_ctx, unsigned long vaddr,
75 				     unsigned long size,
76 				     enum dma_data_direction dma_dir)
77 {
78 	struct vb2_vmalloc_buf *buf;
79 	unsigned long first, last;
80 	int n_pages, offset;
81 	struct vm_area_struct *vma;
82 	dma_addr_t physp;
83 
84 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
85 	if (!buf)
86 		return NULL;
87 
88 	buf->dma_dir = dma_dir;
89 	offset = vaddr & ~PAGE_MASK;
90 	buf->size = size;
91 
92 
93 	vma = find_vma(current->mm, vaddr);
94 	if (vma && (vma->vm_flags & VM_PFNMAP) && (vma->vm_pgoff)) {
95 		if (vb2_get_contig_userptr(vaddr, size, &vma, &physp))
96 			goto fail_pages_array_alloc;
97 		buf->vma = vma;
98 		buf->vaddr = (__force void *)ioremap_nocache(physp, size);
99 		if (!buf->vaddr)
100 			goto fail_pages_array_alloc;
101 	} else {
102 		first = vaddr >> PAGE_SHIFT;
103 		last  = (vaddr + size - 1) >> PAGE_SHIFT;
104 		buf->n_pages = last - first + 1;
105 		buf->pages = kzalloc(buf->n_pages * sizeof(struct page *),
106 				     GFP_KERNEL);
107 		if (!buf->pages)
108 			goto fail_pages_array_alloc;
109 
110 		/* current->mm->mmap_sem is taken by videobuf2 core */
111 		n_pages = get_user_pages(current, current->mm,
112 					 vaddr & PAGE_MASK, buf->n_pages,
113 					 dma_dir == DMA_FROM_DEVICE,
114 					 1, /* force */
115 					 buf->pages, NULL);
116 		if (n_pages != buf->n_pages)
117 			goto fail_get_user_pages;
118 
119 		buf->vaddr = vm_map_ram(buf->pages, buf->n_pages, -1,
120 					PAGE_KERNEL);
121 		if (!buf->vaddr)
122 			goto fail_get_user_pages;
123 	}
124 
125 	buf->vaddr += offset;
126 	return buf;
127 
128 fail_get_user_pages:
129 	pr_debug("get_user_pages requested/got: %d/%d]\n", n_pages,
130 		 buf->n_pages);
131 	while (--n_pages >= 0)
132 		put_page(buf->pages[n_pages]);
133 	kfree(buf->pages);
134 
135 fail_pages_array_alloc:
136 	kfree(buf);
137 
138 	return NULL;
139 }
140 
vb2_vmalloc_put_userptr(void * buf_priv)141 static void vb2_vmalloc_put_userptr(void *buf_priv)
142 {
143 	struct vb2_vmalloc_buf *buf = buf_priv;
144 	unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
145 	unsigned int i;
146 
147 	if (buf->pages) {
148 		if (vaddr)
149 			vm_unmap_ram((void *)vaddr, buf->n_pages);
150 		for (i = 0; i < buf->n_pages; ++i) {
151 			if (buf->dma_dir == DMA_FROM_DEVICE)
152 				set_page_dirty_lock(buf->pages[i]);
153 			put_page(buf->pages[i]);
154 		}
155 		kfree(buf->pages);
156 	} else {
157 		vb2_put_vma(buf->vma);
158 		iounmap((__force void __iomem *)buf->vaddr);
159 	}
160 	kfree(buf);
161 }
162 
vb2_vmalloc_vaddr(void * buf_priv)163 static void *vb2_vmalloc_vaddr(void *buf_priv)
164 {
165 	struct vb2_vmalloc_buf *buf = buf_priv;
166 
167 	if (!buf->vaddr) {
168 		pr_err("Address of an unallocated plane requested "
169 		       "or cannot map user pointer\n");
170 		return NULL;
171 	}
172 
173 	return buf->vaddr;
174 }
175 
vb2_vmalloc_num_users(void * buf_priv)176 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
177 {
178 	struct vb2_vmalloc_buf *buf = buf_priv;
179 	return atomic_read(&buf->refcount);
180 }
181 
vb2_vmalloc_mmap(void * buf_priv,struct vm_area_struct * vma)182 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
183 {
184 	struct vb2_vmalloc_buf *buf = buf_priv;
185 	int ret;
186 
187 	if (!buf) {
188 		pr_err("No memory to map\n");
189 		return -EINVAL;
190 	}
191 
192 	ret = remap_vmalloc_range(vma, buf->vaddr, 0);
193 	if (ret) {
194 		pr_err("Remapping vmalloc memory, error: %d\n", ret);
195 		return ret;
196 	}
197 
198 	/*
199 	 * Make sure that vm_areas for 2 buffers won't be merged together
200 	 */
201 	vma->vm_flags		|= VM_DONTEXPAND;
202 
203 	/*
204 	 * Use common vm_area operations to track buffer refcount.
205 	 */
206 	vma->vm_private_data	= &buf->handler;
207 	vma->vm_ops		= &vb2_common_vm_ops;
208 
209 	vma->vm_ops->open(vma);
210 
211 	return 0;
212 }
213 
214 #ifdef CONFIG_HAS_DMA
215 /*********************************************/
216 /*         DMABUF ops for exporters          */
217 /*********************************************/
218 
219 struct vb2_vmalloc_attachment {
220 	struct sg_table sgt;
221 	enum dma_data_direction dma_dir;
222 };
223 
vb2_vmalloc_dmabuf_ops_attach(struct dma_buf * dbuf,struct device * dev,struct dma_buf_attachment * dbuf_attach)224 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
225 	struct dma_buf_attachment *dbuf_attach)
226 {
227 	struct vb2_vmalloc_attachment *attach;
228 	struct vb2_vmalloc_buf *buf = dbuf->priv;
229 	int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
230 	struct sg_table *sgt;
231 	struct scatterlist *sg;
232 	void *vaddr = buf->vaddr;
233 	int ret;
234 	int i;
235 
236 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
237 	if (!attach)
238 		return -ENOMEM;
239 
240 	sgt = &attach->sgt;
241 	ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
242 	if (ret) {
243 		kfree(attach);
244 		return ret;
245 	}
246 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
247 		struct page *page = vmalloc_to_page(vaddr);
248 
249 		if (!page) {
250 			sg_free_table(sgt);
251 			kfree(attach);
252 			return -ENOMEM;
253 		}
254 		sg_set_page(sg, page, PAGE_SIZE, 0);
255 		vaddr += PAGE_SIZE;
256 	}
257 
258 	attach->dma_dir = DMA_NONE;
259 	dbuf_attach->priv = attach;
260 	return 0;
261 }
262 
vb2_vmalloc_dmabuf_ops_detach(struct dma_buf * dbuf,struct dma_buf_attachment * db_attach)263 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
264 	struct dma_buf_attachment *db_attach)
265 {
266 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
267 	struct sg_table *sgt;
268 
269 	if (!attach)
270 		return;
271 
272 	sgt = &attach->sgt;
273 
274 	/* release the scatterlist cache */
275 	if (attach->dma_dir != DMA_NONE)
276 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
277 			attach->dma_dir);
278 	sg_free_table(sgt);
279 	kfree(attach);
280 	db_attach->priv = NULL;
281 }
282 
vb2_vmalloc_dmabuf_ops_map(struct dma_buf_attachment * db_attach,enum dma_data_direction dma_dir)283 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
284 	struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
285 {
286 	struct vb2_vmalloc_attachment *attach = db_attach->priv;
287 	/* stealing dmabuf mutex to serialize map/unmap operations */
288 	struct mutex *lock = &db_attach->dmabuf->lock;
289 	struct sg_table *sgt;
290 	int ret;
291 
292 	mutex_lock(lock);
293 
294 	sgt = &attach->sgt;
295 	/* return previously mapped sg table */
296 	if (attach->dma_dir == dma_dir) {
297 		mutex_unlock(lock);
298 		return sgt;
299 	}
300 
301 	/* release any previous cache */
302 	if (attach->dma_dir != DMA_NONE) {
303 		dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
304 			attach->dma_dir);
305 		attach->dma_dir = DMA_NONE;
306 	}
307 
308 	/* mapping to the client with new direction */
309 	ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dma_dir);
310 	if (ret <= 0) {
311 		pr_err("failed to map scatterlist\n");
312 		mutex_unlock(lock);
313 		return ERR_PTR(-EIO);
314 	}
315 
316 	attach->dma_dir = dma_dir;
317 
318 	mutex_unlock(lock);
319 
320 	return sgt;
321 }
322 
vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment * db_attach,struct sg_table * sgt,enum dma_data_direction dma_dir)323 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
324 	struct sg_table *sgt, enum dma_data_direction dma_dir)
325 {
326 	/* nothing to be done here */
327 }
328 
vb2_vmalloc_dmabuf_ops_release(struct dma_buf * dbuf)329 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
330 {
331 	/* drop reference obtained in vb2_vmalloc_get_dmabuf */
332 	vb2_vmalloc_put(dbuf->priv);
333 }
334 
vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf * dbuf,unsigned long pgnum)335 static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
336 {
337 	struct vb2_vmalloc_buf *buf = dbuf->priv;
338 
339 	return buf->vaddr + pgnum * PAGE_SIZE;
340 }
341 
vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf * dbuf)342 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
343 {
344 	struct vb2_vmalloc_buf *buf = dbuf->priv;
345 
346 	return buf->vaddr;
347 }
348 
vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf * dbuf,struct vm_area_struct * vma)349 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
350 	struct vm_area_struct *vma)
351 {
352 	return vb2_vmalloc_mmap(dbuf->priv, vma);
353 }
354 
355 static struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
356 	.attach = vb2_vmalloc_dmabuf_ops_attach,
357 	.detach = vb2_vmalloc_dmabuf_ops_detach,
358 	.map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
359 	.unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
360 	.kmap = vb2_vmalloc_dmabuf_ops_kmap,
361 	.kmap_atomic = vb2_vmalloc_dmabuf_ops_kmap,
362 	.vmap = vb2_vmalloc_dmabuf_ops_vmap,
363 	.mmap = vb2_vmalloc_dmabuf_ops_mmap,
364 	.release = vb2_vmalloc_dmabuf_ops_release,
365 };
366 
vb2_vmalloc_get_dmabuf(void * buf_priv,unsigned long flags)367 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
368 {
369 	struct vb2_vmalloc_buf *buf = buf_priv;
370 	struct dma_buf *dbuf;
371 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
372 
373 	exp_info.ops = &vb2_vmalloc_dmabuf_ops;
374 	exp_info.size = buf->size;
375 	exp_info.flags = flags;
376 	exp_info.priv = buf;
377 
378 	if (WARN_ON(!buf->vaddr))
379 		return NULL;
380 
381 	dbuf = dma_buf_export(&exp_info);
382 	if (IS_ERR(dbuf))
383 		return NULL;
384 
385 	/* dmabuf keeps reference to vb2 buffer */
386 	atomic_inc(&buf->refcount);
387 
388 	return dbuf;
389 }
390 #endif /* CONFIG_HAS_DMA */
391 
392 
393 /*********************************************/
394 /*       callbacks for DMABUF buffers        */
395 /*********************************************/
396 
vb2_vmalloc_map_dmabuf(void * mem_priv)397 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
398 {
399 	struct vb2_vmalloc_buf *buf = mem_priv;
400 
401 	buf->vaddr = dma_buf_vmap(buf->dbuf);
402 
403 	return buf->vaddr ? 0 : -EFAULT;
404 }
405 
vb2_vmalloc_unmap_dmabuf(void * mem_priv)406 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
407 {
408 	struct vb2_vmalloc_buf *buf = mem_priv;
409 
410 	dma_buf_vunmap(buf->dbuf, buf->vaddr);
411 	buf->vaddr = NULL;
412 }
413 
vb2_vmalloc_detach_dmabuf(void * mem_priv)414 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
415 {
416 	struct vb2_vmalloc_buf *buf = mem_priv;
417 
418 	if (buf->vaddr)
419 		dma_buf_vunmap(buf->dbuf, buf->vaddr);
420 
421 	kfree(buf);
422 }
423 
vb2_vmalloc_attach_dmabuf(void * alloc_ctx,struct dma_buf * dbuf,unsigned long size,enum dma_data_direction dma_dir)424 static void *vb2_vmalloc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
425 	unsigned long size, enum dma_data_direction dma_dir)
426 {
427 	struct vb2_vmalloc_buf *buf;
428 
429 	if (dbuf->size < size)
430 		return ERR_PTR(-EFAULT);
431 
432 	buf = kzalloc(sizeof(*buf), GFP_KERNEL);
433 	if (!buf)
434 		return ERR_PTR(-ENOMEM);
435 
436 	buf->dbuf = dbuf;
437 	buf->dma_dir = dma_dir;
438 	buf->size = size;
439 
440 	return buf;
441 }
442 
443 
444 const struct vb2_mem_ops vb2_vmalloc_memops = {
445 	.alloc		= vb2_vmalloc_alloc,
446 	.put		= vb2_vmalloc_put,
447 	.get_userptr	= vb2_vmalloc_get_userptr,
448 	.put_userptr	= vb2_vmalloc_put_userptr,
449 #ifdef CONFIG_HAS_DMA
450 	.get_dmabuf	= vb2_vmalloc_get_dmabuf,
451 #endif
452 	.map_dmabuf	= vb2_vmalloc_map_dmabuf,
453 	.unmap_dmabuf	= vb2_vmalloc_unmap_dmabuf,
454 	.attach_dmabuf	= vb2_vmalloc_attach_dmabuf,
455 	.detach_dmabuf	= vb2_vmalloc_detach_dmabuf,
456 	.vaddr		= vb2_vmalloc_vaddr,
457 	.mmap		= vb2_vmalloc_mmap,
458 	.num_users	= vb2_vmalloc_num_users,
459 };
460 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
461 
462 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
463 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
464 MODULE_LICENSE("GPL");
465