root/drivers/usb/gadget/function/uvc_queue.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. uvc_queue_setup
  2. uvc_buffer_prepare
  3. uvc_buffer_queue
  4. uvcg_queue_init
  5. uvcg_free_buffers
  6. uvcg_alloc_buffers
  7. uvcg_query_buffer
  8. uvcg_queue_buffer
  9. uvcg_dequeue_buffer
  10. uvcg_queue_poll
  11. uvcg_queue_mmap
  12. uvcg_queue_get_unmapped_area
  13. uvcg_queue_cancel
  14. uvcg_queue_enable
  15. uvcg_queue_next_buffer
  16. uvcg_queue_head

   1 // SPDX-License-Identifier: GPL-2.0+
   2 /*
   3  *      uvc_queue.c  --  USB Video Class driver - Buffers management
   4  *
   5  *      Copyright (C) 2005-2010
   6  *          Laurent Pinchart (laurent.pinchart@ideasonboard.com)
   7  */
   8 
   9 #include <linux/atomic.h>
  10 #include <linux/kernel.h>
  11 #include <linux/mm.h>
  12 #include <linux/list.h>
  13 #include <linux/module.h>
  14 #include <linux/usb.h>
  15 #include <linux/videodev2.h>
  16 #include <linux/vmalloc.h>
  17 #include <linux/wait.h>
  18 
  19 #include <media/v4l2-common.h>
  20 #include <media/videobuf2-vmalloc.h>
  21 
  22 #include "uvc.h"
  23 
  24 /* ------------------------------------------------------------------------
  25  * Video buffers queue management.
  26  *
  27  * Video queues is initialized by uvcg_queue_init(). The function performs
  28  * basic initialization of the uvc_video_queue struct and never fails.
  29  *
  30  * Video buffers are managed by videobuf2. The driver uses a mutex to protect
  31  * the videobuf2 queue operations by serializing calls to videobuf2 and a
  32  * spinlock to protect the IRQ queue that holds the buffers to be processed by
  33  * the driver.
  34  */
  35 
  36 /* -----------------------------------------------------------------------------
  37  * videobuf2 queue operations
  38  */
  39 
  40 static int uvc_queue_setup(struct vb2_queue *vq,
  41                            unsigned int *nbuffers, unsigned int *nplanes,
  42                            unsigned int sizes[], struct device *alloc_devs[])
  43 {
  44         struct uvc_video_queue *queue = vb2_get_drv_priv(vq);
  45         struct uvc_video *video = container_of(queue, struct uvc_video, queue);
  46 
  47         if (*nbuffers > UVC_MAX_VIDEO_BUFFERS)
  48                 *nbuffers = UVC_MAX_VIDEO_BUFFERS;
  49 
  50         *nplanes = 1;
  51 
  52         sizes[0] = video->imagesize;
  53 
  54         return 0;
  55 }
  56 
  57 static int uvc_buffer_prepare(struct vb2_buffer *vb)
  58 {
  59         struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
  60         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  61         struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
  62 
  63         if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  64             vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) {
  65                 uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
  66                 return -EINVAL;
  67         }
  68 
  69         if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED))
  70                 return -ENODEV;
  71 
  72         buf->state = UVC_BUF_STATE_QUEUED;
  73         buf->mem = vb2_plane_vaddr(vb, 0);
  74         buf->length = vb2_plane_size(vb, 0);
  75         if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
  76                 buf->bytesused = 0;
  77         else
  78                 buf->bytesused = vb2_get_plane_payload(vb, 0);
  79 
  80         return 0;
  81 }
  82 
  83 static void uvc_buffer_queue(struct vb2_buffer *vb)
  84 {
  85         struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue);
  86         struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  87         struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf);
  88         unsigned long flags;
  89 
  90         spin_lock_irqsave(&queue->irqlock, flags);
  91 
  92         if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) {
  93                 list_add_tail(&buf->queue, &queue->irqqueue);
  94         } else {
  95                 /* If the device is disconnected return the buffer to userspace
  96                  * directly. The next QBUF call will fail with -ENODEV.
  97                  */
  98                 buf->state = UVC_BUF_STATE_ERROR;
  99                 vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
 100         }
 101 
 102         spin_unlock_irqrestore(&queue->irqlock, flags);
 103 }
 104 
 105 static const struct vb2_ops uvc_queue_qops = {
 106         .queue_setup = uvc_queue_setup,
 107         .buf_prepare = uvc_buffer_prepare,
 108         .buf_queue = uvc_buffer_queue,
 109         .wait_prepare = vb2_ops_wait_prepare,
 110         .wait_finish = vb2_ops_wait_finish,
 111 };
 112 
 113 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
 114                     struct mutex *lock)
 115 {
 116         int ret;
 117 
 118         queue->queue.type = type;
 119         queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
 120         queue->queue.drv_priv = queue;
 121         queue->queue.buf_struct_size = sizeof(struct uvc_buffer);
 122         queue->queue.ops = &uvc_queue_qops;
 123         queue->queue.lock = lock;
 124         queue->queue.mem_ops = &vb2_vmalloc_memops;
 125         queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
 126                                      | V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
 127         ret = vb2_queue_init(&queue->queue);
 128         if (ret)
 129                 return ret;
 130 
 131         spin_lock_init(&queue->irqlock);
 132         INIT_LIST_HEAD(&queue->irqqueue);
 133         queue->flags = 0;
 134 
 135         return 0;
 136 }
 137 
 138 /*
 139  * Free the video buffers.
 140  */
 141 void uvcg_free_buffers(struct uvc_video_queue *queue)
 142 {
 143         vb2_queue_release(&queue->queue);
 144 }
 145 
 146 /*
 147  * Allocate the video buffers.
 148  */
 149 int uvcg_alloc_buffers(struct uvc_video_queue *queue,
 150                               struct v4l2_requestbuffers *rb)
 151 {
 152         int ret;
 153 
 154         ret = vb2_reqbufs(&queue->queue, rb);
 155 
 156         return ret ? ret : rb->count;
 157 }
 158 
 159 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
 160 {
 161         return vb2_querybuf(&queue->queue, buf);
 162 }
 163 
 164 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf)
 165 {
 166         unsigned long flags;
 167         int ret;
 168 
 169         ret = vb2_qbuf(&queue->queue, NULL, buf);
 170         if (ret < 0)
 171                 return ret;
 172 
 173         spin_lock_irqsave(&queue->irqlock, flags);
 174         ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
 175         queue->flags &= ~UVC_QUEUE_PAUSED;
 176         spin_unlock_irqrestore(&queue->irqlock, flags);
 177         return ret;
 178 }
 179 
 180 /*
 181  * Dequeue a video buffer. If nonblocking is false, block until a buffer is
 182  * available.
 183  */
 184 int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf,
 185                         int nonblocking)
 186 {
 187         return vb2_dqbuf(&queue->queue, buf, nonblocking);
 188 }
 189 
 190 /*
 191  * Poll the video queue.
 192  *
 193  * This function implements video queue polling and is intended to be used by
 194  * the device poll handler.
 195  */
 196 __poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file,
 197                              poll_table *wait)
 198 {
 199         return vb2_poll(&queue->queue, file, wait);
 200 }
 201 
 202 int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma)
 203 {
 204         return vb2_mmap(&queue->queue, vma);
 205 }
 206 
 207 #ifndef CONFIG_MMU
 208 /*
 209  * Get unmapped area.
 210  *
 211  * NO-MMU arch need this function to make mmap() work correctly.
 212  */
 213 unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
 214                                            unsigned long pgoff)
 215 {
 216         return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0);
 217 }
 218 #endif
 219 
 220 /*
 221  * Cancel the video buffers queue.
 222  *
 223  * Cancelling the queue marks all buffers on the irq queue as erroneous,
 224  * wakes them up and removes them from the queue.
 225  *
 226  * If the disconnect parameter is set, further calls to uvc_queue_buffer will
 227  * fail with -ENODEV.
 228  *
 229  * This function acquires the irq spinlock and can be called from interrupt
 230  * context.
 231  */
 232 void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect)
 233 {
 234         struct uvc_buffer *buf;
 235         unsigned long flags;
 236 
 237         spin_lock_irqsave(&queue->irqlock, flags);
 238         while (!list_empty(&queue->irqqueue)) {
 239                 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 240                                        queue);
 241                 list_del(&buf->queue);
 242                 buf->state = UVC_BUF_STATE_ERROR;
 243                 vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR);
 244         }
 245         /* This must be protected by the irqlock spinlock to avoid race
 246          * conditions between uvc_queue_buffer and the disconnection event that
 247          * could result in an interruptible wait in uvc_dequeue_buffer. Do not
 248          * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
 249          * state outside the queue code.
 250          */
 251         if (disconnect)
 252                 queue->flags |= UVC_QUEUE_DISCONNECTED;
 253         spin_unlock_irqrestore(&queue->irqlock, flags);
 254 }
 255 
 256 /*
 257  * Enable or disable the video buffers queue.
 258  *
 259  * The queue must be enabled before starting video acquisition and must be
 260  * disabled after stopping it. This ensures that the video buffers queue
 261  * state can be properly initialized before buffers are accessed from the
 262  * interrupt handler.
 263  *
 264  * Enabling the video queue initializes parameters (such as sequence number,
 265  * sync pattern, ...). If the queue is already enabled, return -EBUSY.
 266  *
 267  * Disabling the video queue cancels the queue and removes all buffers from
 268  * the main queue.
 269  *
 270  * This function can't be called from interrupt context. Use
 271  * uvcg_queue_cancel() instead.
 272  */
 273 int uvcg_queue_enable(struct uvc_video_queue *queue, int enable)
 274 {
 275         unsigned long flags;
 276         int ret = 0;
 277 
 278         if (enable) {
 279                 ret = vb2_streamon(&queue->queue, queue->queue.type);
 280                 if (ret < 0)
 281                         return ret;
 282 
 283                 queue->sequence = 0;
 284                 queue->buf_used = 0;
 285         } else {
 286                 ret = vb2_streamoff(&queue->queue, queue->queue.type);
 287                 if (ret < 0)
 288                         return ret;
 289 
 290                 spin_lock_irqsave(&queue->irqlock, flags);
 291                 INIT_LIST_HEAD(&queue->irqqueue);
 292 
 293                 /*
 294                  * FIXME: We need to clear the DISCONNECTED flag to ensure that
 295                  * applications will be able to queue buffers for the next
 296                  * streaming run. However, clearing it here doesn't guarantee
 297                  * that the device will be reconnected in the meantime.
 298                  */
 299                 queue->flags &= ~UVC_QUEUE_DISCONNECTED;
 300                 spin_unlock_irqrestore(&queue->irqlock, flags);
 301         }
 302 
 303         return ret;
 304 }
 305 
 306 /* called with &queue_irqlock held.. */
 307 struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
 308                                           struct uvc_buffer *buf)
 309 {
 310         struct uvc_buffer *nextbuf;
 311 
 312         if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
 313              buf->length != buf->bytesused) {
 314                 buf->state = UVC_BUF_STATE_QUEUED;
 315                 vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0);
 316                 return buf;
 317         }
 318 
 319         list_del(&buf->queue);
 320         if (!list_empty(&queue->irqqueue))
 321                 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 322                                            queue);
 323         else
 324                 nextbuf = NULL;
 325 
 326         buf->buf.field = V4L2_FIELD_NONE;
 327         buf->buf.sequence = queue->sequence++;
 328         buf->buf.vb2_buf.timestamp = ktime_get_ns();
 329 
 330         vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused);
 331         vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE);
 332 
 333         return nextbuf;
 334 }
 335 
 336 struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue)
 337 {
 338         struct uvc_buffer *buf = NULL;
 339 
 340         if (!list_empty(&queue->irqqueue))
 341                 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
 342                                        queue);
 343         else
 344                 queue->flags |= UVC_QUEUE_PAUSED;
 345 
 346         return buf;
 347 }
 348 

/* [<][>][^][v][top][bottom][index][help] */