root/drivers/media/common/videobuf2/videobuf2-vmalloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. vb2_vmalloc_alloc
  2. vb2_vmalloc_put
  3. vb2_vmalloc_get_userptr
  4. vb2_vmalloc_put_userptr
  5. vb2_vmalloc_vaddr
  6. vb2_vmalloc_num_users
  7. vb2_vmalloc_mmap
  8. vb2_vmalloc_dmabuf_ops_attach
  9. vb2_vmalloc_dmabuf_ops_detach
  10. vb2_vmalloc_dmabuf_ops_map
  11. vb2_vmalloc_dmabuf_ops_unmap
  12. vb2_vmalloc_dmabuf_ops_release
  13. vb2_vmalloc_dmabuf_ops_kmap
  14. vb2_vmalloc_dmabuf_ops_vmap
  15. vb2_vmalloc_dmabuf_ops_mmap
  16. vb2_vmalloc_get_dmabuf
  17. vb2_vmalloc_map_dmabuf
  18. vb2_vmalloc_unmap_dmabuf
  19. vb2_vmalloc_detach_dmabuf
  20. vb2_vmalloc_attach_dmabuf

   1 /*
   2  * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
   3  *
   4  * Copyright (C) 2010 Samsung Electronics
   5  *
   6  * Author: Pawel Osciak <pawel@osciak.com>
   7  *
   8  * This program is free software; you can redistribute it and/or modify
   9  * it under the terms of the GNU General Public License as published by
  10  * the Free Software Foundation.
  11  */
  12 
  13 #include <linux/io.h>
  14 #include <linux/module.h>
  15 #include <linux/mm.h>
  16 #include <linux/refcount.h>
  17 #include <linux/sched.h>
  18 #include <linux/slab.h>
  19 #include <linux/vmalloc.h>
  20 
  21 #include <media/videobuf2-v4l2.h>
  22 #include <media/videobuf2-vmalloc.h>
  23 #include <media/videobuf2-memops.h>
  24 
  25 struct vb2_vmalloc_buf {
  26         void                            *vaddr;
  27         struct frame_vector             *vec;
  28         enum dma_data_direction         dma_dir;
  29         unsigned long                   size;
  30         refcount_t                      refcount;
  31         struct vb2_vmarea_handler       handler;
  32         struct dma_buf                  *dbuf;
  33 };
  34 
  35 static void vb2_vmalloc_put(void *buf_priv);
  36 
  37 static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
  38                                unsigned long size, enum dma_data_direction dma_dir,
  39                                gfp_t gfp_flags)
  40 {
  41         struct vb2_vmalloc_buf *buf;
  42 
  43         buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
  44         if (!buf)
  45                 return ERR_PTR(-ENOMEM);
  46 
  47         buf->size = size;
  48         buf->vaddr = vmalloc_user(buf->size);
  49         if (!buf->vaddr) {
  50                 pr_debug("vmalloc of size %ld failed\n", buf->size);
  51                 kfree(buf);
  52                 return ERR_PTR(-ENOMEM);
  53         }
  54 
  55         buf->dma_dir = dma_dir;
  56         buf->handler.refcount = &buf->refcount;
  57         buf->handler.put = vb2_vmalloc_put;
  58         buf->handler.arg = buf;
  59 
  60         refcount_set(&buf->refcount, 1);
  61         return buf;
  62 }
  63 
  64 static void vb2_vmalloc_put(void *buf_priv)
  65 {
  66         struct vb2_vmalloc_buf *buf = buf_priv;
  67 
  68         if (refcount_dec_and_test(&buf->refcount)) {
  69                 vfree(buf->vaddr);
  70                 kfree(buf);
  71         }
  72 }
  73 
  74 static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
  75                                      unsigned long size,
  76                                      enum dma_data_direction dma_dir)
  77 {
  78         struct vb2_vmalloc_buf *buf;
  79         struct frame_vector *vec;
  80         int n_pages, offset, i;
  81         int ret = -ENOMEM;
  82 
  83         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  84         if (!buf)
  85                 return ERR_PTR(-ENOMEM);
  86 
  87         buf->dma_dir = dma_dir;
  88         offset = vaddr & ~PAGE_MASK;
  89         buf->size = size;
  90         vec = vb2_create_framevec(vaddr, size);
  91         if (IS_ERR(vec)) {
  92                 ret = PTR_ERR(vec);
  93                 goto fail_pfnvec_create;
  94         }
  95         buf->vec = vec;
  96         n_pages = frame_vector_count(vec);
  97         if (frame_vector_to_pages(vec) < 0) {
  98                 unsigned long *nums = frame_vector_pfns(vec);
  99 
 100                 /*
 101                  * We cannot get page pointers for these pfns. Check memory is
 102                  * physically contiguous and use direct mapping.
 103                  */
 104                 for (i = 1; i < n_pages; i++)
 105                         if (nums[i-1] + 1 != nums[i])
 106                                 goto fail_map;
 107                 buf->vaddr = (__force void *)
 108                         ioremap_nocache(__pfn_to_phys(nums[0]), size + offset);
 109         } else {
 110                 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1,
 111                                         PAGE_KERNEL);
 112         }
 113 
 114         if (!buf->vaddr)
 115                 goto fail_map;
 116         buf->vaddr += offset;
 117         return buf;
 118 
 119 fail_map:
 120         vb2_destroy_framevec(vec);
 121 fail_pfnvec_create:
 122         kfree(buf);
 123 
 124         return ERR_PTR(ret);
 125 }
 126 
 127 static void vb2_vmalloc_put_userptr(void *buf_priv)
 128 {
 129         struct vb2_vmalloc_buf *buf = buf_priv;
 130         unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
 131         unsigned int i;
 132         struct page **pages;
 133         unsigned int n_pages;
 134 
 135         if (!buf->vec->is_pfns) {
 136                 n_pages = frame_vector_count(buf->vec);
 137                 pages = frame_vector_pages(buf->vec);
 138                 if (vaddr)
 139                         vm_unmap_ram((void *)vaddr, n_pages);
 140                 if (buf->dma_dir == DMA_FROM_DEVICE ||
 141                     buf->dma_dir == DMA_BIDIRECTIONAL)
 142                         for (i = 0; i < n_pages; i++)
 143                                 set_page_dirty_lock(pages[i]);
 144         } else {
 145                 iounmap((__force void __iomem *)buf->vaddr);
 146         }
 147         vb2_destroy_framevec(buf->vec);
 148         kfree(buf);
 149 }
 150 
 151 static void *vb2_vmalloc_vaddr(void *buf_priv)
 152 {
 153         struct vb2_vmalloc_buf *buf = buf_priv;
 154 
 155         if (!buf->vaddr) {
 156                 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
 157                 return NULL;
 158         }
 159 
 160         return buf->vaddr;
 161 }
 162 
 163 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
 164 {
 165         struct vb2_vmalloc_buf *buf = buf_priv;
 166         return refcount_read(&buf->refcount);
 167 }
 168 
 169 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
 170 {
 171         struct vb2_vmalloc_buf *buf = buf_priv;
 172         int ret;
 173 
 174         if (!buf) {
 175                 pr_err("No memory to map\n");
 176                 return -EINVAL;
 177         }
 178 
 179         ret = remap_vmalloc_range(vma, buf->vaddr, 0);
 180         if (ret) {
 181                 pr_err("Remapping vmalloc memory, error: %d\n", ret);
 182                 return ret;
 183         }
 184 
 185         /*
 186          * Make sure that vm_areas for 2 buffers won't be merged together
 187          */
 188         vma->vm_flags           |= VM_DONTEXPAND;
 189 
 190         /*
 191          * Use common vm_area operations to track buffer refcount.
 192          */
 193         vma->vm_private_data    = &buf->handler;
 194         vma->vm_ops             = &vb2_common_vm_ops;
 195 
 196         vma->vm_ops->open(vma);
 197 
 198         return 0;
 199 }
 200 
 201 #ifdef CONFIG_HAS_DMA
 202 /*********************************************/
 203 /*         DMABUF ops for exporters          */
 204 /*********************************************/
 205 
 206 struct vb2_vmalloc_attachment {
 207         struct sg_table sgt;
 208         enum dma_data_direction dma_dir;
 209 };
 210 
 211 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
 212         struct dma_buf_attachment *dbuf_attach)
 213 {
 214         struct vb2_vmalloc_attachment *attach;
 215         struct vb2_vmalloc_buf *buf = dbuf->priv;
 216         int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
 217         struct sg_table *sgt;
 218         struct scatterlist *sg;
 219         void *vaddr = buf->vaddr;
 220         int ret;
 221         int i;
 222 
 223         attach = kzalloc(sizeof(*attach), GFP_KERNEL);
 224         if (!attach)
 225                 return -ENOMEM;
 226 
 227         sgt = &attach->sgt;
 228         ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
 229         if (ret) {
 230                 kfree(attach);
 231                 return ret;
 232         }
 233         for_each_sg(sgt->sgl, sg, sgt->nents, i) {
 234                 struct page *page = vmalloc_to_page(vaddr);
 235 
 236                 if (!page) {
 237                         sg_free_table(sgt);
 238                         kfree(attach);
 239                         return -ENOMEM;
 240                 }
 241                 sg_set_page(sg, page, PAGE_SIZE, 0);
 242                 vaddr += PAGE_SIZE;
 243         }
 244 
 245         attach->dma_dir = DMA_NONE;
 246         dbuf_attach->priv = attach;
 247         return 0;
 248 }
 249 
 250 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
 251         struct dma_buf_attachment *db_attach)
 252 {
 253         struct vb2_vmalloc_attachment *attach = db_attach->priv;
 254         struct sg_table *sgt;
 255 
 256         if (!attach)
 257                 return;
 258 
 259         sgt = &attach->sgt;
 260 
 261         /* release the scatterlist cache */
 262         if (attach->dma_dir != DMA_NONE)
 263                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 264                         attach->dma_dir);
 265         sg_free_table(sgt);
 266         kfree(attach);
 267         db_attach->priv = NULL;
 268 }
 269 
 270 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
 271         struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
 272 {
 273         struct vb2_vmalloc_attachment *attach = db_attach->priv;
 274         /* stealing dmabuf mutex to serialize map/unmap operations */
 275         struct mutex *lock = &db_attach->dmabuf->lock;
 276         struct sg_table *sgt;
 277 
 278         mutex_lock(lock);
 279 
 280         sgt = &attach->sgt;
 281         /* return previously mapped sg table */
 282         if (attach->dma_dir == dma_dir) {
 283                 mutex_unlock(lock);
 284                 return sgt;
 285         }
 286 
 287         /* release any previous cache */
 288         if (attach->dma_dir != DMA_NONE) {
 289                 dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 290                         attach->dma_dir);
 291                 attach->dma_dir = DMA_NONE;
 292         }
 293 
 294         /* mapping to the client with new direction */
 295         sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
 296                                 dma_dir);
 297         if (!sgt->nents) {
 298                 pr_err("failed to map scatterlist\n");
 299                 mutex_unlock(lock);
 300                 return ERR_PTR(-EIO);
 301         }
 302 
 303         attach->dma_dir = dma_dir;
 304 
 305         mutex_unlock(lock);
 306 
 307         return sgt;
 308 }
 309 
 310 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
 311         struct sg_table *sgt, enum dma_data_direction dma_dir)
 312 {
 313         /* nothing to be done here */
 314 }
 315 
 316 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
 317 {
 318         /* drop reference obtained in vb2_vmalloc_get_dmabuf */
 319         vb2_vmalloc_put(dbuf->priv);
 320 }
 321 
 322 static void *vb2_vmalloc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
 323 {
 324         struct vb2_vmalloc_buf *buf = dbuf->priv;
 325 
 326         return buf->vaddr + pgnum * PAGE_SIZE;
 327 }
 328 
 329 static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
 330 {
 331         struct vb2_vmalloc_buf *buf = dbuf->priv;
 332 
 333         return buf->vaddr;
 334 }
 335 
 336 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
 337         struct vm_area_struct *vma)
 338 {
 339         return vb2_vmalloc_mmap(dbuf->priv, vma);
 340 }
 341 
 342 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
 343         .attach = vb2_vmalloc_dmabuf_ops_attach,
 344         .detach = vb2_vmalloc_dmabuf_ops_detach,
 345         .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
 346         .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
 347         .map = vb2_vmalloc_dmabuf_ops_kmap,
 348         .vmap = vb2_vmalloc_dmabuf_ops_vmap,
 349         .mmap = vb2_vmalloc_dmabuf_ops_mmap,
 350         .release = vb2_vmalloc_dmabuf_ops_release,
 351 };
 352 
 353 static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
 354 {
 355         struct vb2_vmalloc_buf *buf = buf_priv;
 356         struct dma_buf *dbuf;
 357         DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 358 
 359         exp_info.ops = &vb2_vmalloc_dmabuf_ops;
 360         exp_info.size = buf->size;
 361         exp_info.flags = flags;
 362         exp_info.priv = buf;
 363 
 364         if (WARN_ON(!buf->vaddr))
 365                 return NULL;
 366 
 367         dbuf = dma_buf_export(&exp_info);
 368         if (IS_ERR(dbuf))
 369                 return NULL;
 370 
 371         /* dmabuf keeps reference to vb2 buffer */
 372         refcount_inc(&buf->refcount);
 373 
 374         return dbuf;
 375 }
 376 #endif /* CONFIG_HAS_DMA */
 377 
 378 
 379 /*********************************************/
 380 /*       callbacks for DMABUF buffers        */
 381 /*********************************************/
 382 
 383 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
 384 {
 385         struct vb2_vmalloc_buf *buf = mem_priv;
 386 
 387         buf->vaddr = dma_buf_vmap(buf->dbuf);
 388 
 389         return buf->vaddr ? 0 : -EFAULT;
 390 }
 391 
 392 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
 393 {
 394         struct vb2_vmalloc_buf *buf = mem_priv;
 395 
 396         dma_buf_vunmap(buf->dbuf, buf->vaddr);
 397         buf->vaddr = NULL;
 398 }
 399 
 400 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
 401 {
 402         struct vb2_vmalloc_buf *buf = mem_priv;
 403 
 404         if (buf->vaddr)
 405                 dma_buf_vunmap(buf->dbuf, buf->vaddr);
 406 
 407         kfree(buf);
 408 }
 409 
 410 static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
 411         unsigned long size, enum dma_data_direction dma_dir)
 412 {
 413         struct vb2_vmalloc_buf *buf;
 414 
 415         if (dbuf->size < size)
 416                 return ERR_PTR(-EFAULT);
 417 
 418         buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 419         if (!buf)
 420                 return ERR_PTR(-ENOMEM);
 421 
 422         buf->dbuf = dbuf;
 423         buf->dma_dir = dma_dir;
 424         buf->size = size;
 425 
 426         return buf;
 427 }
 428 
 429 
 430 const struct vb2_mem_ops vb2_vmalloc_memops = {
 431         .alloc          = vb2_vmalloc_alloc,
 432         .put            = vb2_vmalloc_put,
 433         .get_userptr    = vb2_vmalloc_get_userptr,
 434         .put_userptr    = vb2_vmalloc_put_userptr,
 435 #ifdef CONFIG_HAS_DMA
 436         .get_dmabuf     = vb2_vmalloc_get_dmabuf,
 437 #endif
 438         .map_dmabuf     = vb2_vmalloc_map_dmabuf,
 439         .unmap_dmabuf   = vb2_vmalloc_unmap_dmabuf,
 440         .attach_dmabuf  = vb2_vmalloc_attach_dmabuf,
 441         .detach_dmabuf  = vb2_vmalloc_detach_dmabuf,
 442         .vaddr          = vb2_vmalloc_vaddr,
 443         .mmap           = vb2_vmalloc_mmap,
 444         .num_users      = vb2_vmalloc_num_users,
 445 };
 446 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
 447 
 448 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
 449 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
 450 MODULE_LICENSE("GPL");

/* [<][>][^][v][top][bottom][index][help] */