This source file includes following definitions.
- __verify_planes_array
- __verify_planes_array_core
- __verify_length
- __init_vb2_v4l2_buffer
- __copy_timestamp
- vb2_warn_zero_bytesused
- vb2_fill_vb2_v4l2_buffer
- vb2_queue_or_prepare_buf
- __fill_v4l2_buffer
- __fill_vb2_buffer
- vb2_find_timestamp
- vb2_querybuf
- fill_buf_caps
- vb2_reqbufs
- vb2_prepare_buf
- vb2_create_bufs
- vb2_qbuf
- vb2_dqbuf
- vb2_streamon
- vb2_streamoff
- vb2_expbuf
- vb2_queue_init
- vb2_queue_release
- vb2_poll
- vb2_queue_is_busy
- vb2_ioctl_reqbufs
- vb2_ioctl_create_bufs
- vb2_ioctl_prepare_buf
- vb2_ioctl_querybuf
- vb2_ioctl_qbuf
- vb2_ioctl_dqbuf
- vb2_ioctl_streamon
- vb2_ioctl_streamoff
- vb2_ioctl_expbuf
- vb2_fop_mmap
- _vb2_fop_release
- vb2_fop_release
- vb2_fop_write
- vb2_fop_read
- vb2_fop_poll
- vb2_fop_get_unmapped_area
- vb2_ops_wait_prepare
- vb2_ops_wait_finish
- vb2_request_validate
- vb2_request_queue
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mm.h>
21 #include <linux/poll.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/freezer.h>
25 #include <linux/kthread.h>
26
27 #include <media/v4l2-dev.h>
28 #include <media/v4l2-device.h>
29 #include <media/v4l2-fh.h>
30 #include <media/v4l2-event.h>
31 #include <media/v4l2-common.h>
32
33 #include <media/videobuf2-v4l2.h>
34
35 static int debug;
36 module_param(debug, int, 0644);
37
38 #define dprintk(level, fmt, arg...) \
39 do { \
40 if (debug >= level) \
41 pr_info("vb2-v4l2: %s: " fmt, __func__, ## arg); \
42 } while (0)
43
44
45 #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
46 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
47 V4L2_BUF_FLAG_PREPARED | \
48 V4L2_BUF_FLAG_IN_REQUEST | \
49 V4L2_BUF_FLAG_REQUEST_FD | \
50 V4L2_BUF_FLAG_TIMESTAMP_MASK)
51
52 #define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
53 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
54
55
56
57
58
59 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
60 {
61 if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
62 return 0;
63
64
65 if (b->m.planes == NULL) {
66 dprintk(1, "multi-planar buffer passed but planes array not provided\n");
67 return -EINVAL;
68 }
69
70 if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
71 dprintk(1, "incorrect planes array length, expected %d, got %d\n",
72 vb->num_planes, b->length);
73 return -EINVAL;
74 }
75
76 return 0;
77 }
78
79 static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
80 {
81 return __verify_planes_array(vb, pb);
82 }
83
84
85
86
87
88 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
89 {
90 unsigned int length;
91 unsigned int bytesused;
92 unsigned int plane;
93
94 if (!V4L2_TYPE_IS_OUTPUT(b->type))
95 return 0;
96
97 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
98 for (plane = 0; plane < vb->num_planes; ++plane) {
99 length = (b->memory == VB2_MEMORY_USERPTR ||
100 b->memory == VB2_MEMORY_DMABUF)
101 ? b->m.planes[plane].length
102 : vb->planes[plane].length;
103 bytesused = b->m.planes[plane].bytesused
104 ? b->m.planes[plane].bytesused : length;
105
106 if (b->m.planes[plane].bytesused > length)
107 return -EINVAL;
108
109 if (b->m.planes[plane].data_offset > 0 &&
110 b->m.planes[plane].data_offset >= bytesused)
111 return -EINVAL;
112 }
113 } else {
114 length = (b->memory == VB2_MEMORY_USERPTR)
115 ? b->length : vb->planes[0].length;
116
117 if (b->bytesused > length)
118 return -EINVAL;
119 }
120
121 return 0;
122 }
123
124
125
126
127 static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
128 {
129 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
130
131 vbuf->request_fd = -1;
132 }
133
134 static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
135 {
136 const struct v4l2_buffer *b = pb;
137 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
138 struct vb2_queue *q = vb->vb2_queue;
139
140 if (q->is_output) {
141
142
143
144
145 if (q->copy_timestamp)
146 vb->timestamp = v4l2_timeval_to_ns(&b->timestamp);
147 vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
148 if (b->flags & V4L2_BUF_FLAG_TIMECODE)
149 vbuf->timecode = b->timecode;
150 }
151 };
152
153 static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
154 {
155 static bool check_once;
156
157 if (check_once)
158 return;
159
160 check_once = true;
161
162 pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
163 if (vb->vb2_queue->allow_zero_bytesused)
164 pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
165 else
166 pr_warn("use the actual size instead.\n");
167 }
168
169 static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
170 {
171 struct vb2_queue *q = vb->vb2_queue;
172 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
173 struct vb2_plane *planes = vbuf->planes;
174 unsigned int plane;
175 int ret;
176
177 ret = __verify_length(vb, b);
178 if (ret < 0) {
179 dprintk(1, "plane parameters verification failed: %d\n", ret);
180 return ret;
181 }
182 if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
183
184
185
186
187
188
189
190
191
192 dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
193 return -EINVAL;
194 }
195 vbuf->sequence = 0;
196 vbuf->request_fd = -1;
197
198 if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
199 switch (b->memory) {
200 case VB2_MEMORY_USERPTR:
201 for (plane = 0; plane < vb->num_planes; ++plane) {
202 planes[plane].m.userptr =
203 b->m.planes[plane].m.userptr;
204 planes[plane].length =
205 b->m.planes[plane].length;
206 }
207 break;
208 case VB2_MEMORY_DMABUF:
209 for (plane = 0; plane < vb->num_planes; ++plane) {
210 planes[plane].m.fd =
211 b->m.planes[plane].m.fd;
212 planes[plane].length =
213 b->m.planes[plane].length;
214 }
215 break;
216 default:
217 for (plane = 0; plane < vb->num_planes; ++plane) {
218 planes[plane].m.offset =
219 vb->planes[plane].m.offset;
220 planes[plane].length =
221 vb->planes[plane].length;
222 }
223 break;
224 }
225
226
227 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244 for (plane = 0; plane < vb->num_planes; ++plane) {
245 struct vb2_plane *pdst = &planes[plane];
246 struct v4l2_plane *psrc = &b->m.planes[plane];
247
248 if (psrc->bytesused == 0)
249 vb2_warn_zero_bytesused(vb);
250
251 if (vb->vb2_queue->allow_zero_bytesused)
252 pdst->bytesused = psrc->bytesused;
253 else
254 pdst->bytesused = psrc->bytesused ?
255 psrc->bytesused : pdst->length;
256 pdst->data_offset = psrc->data_offset;
257 }
258 }
259 } else {
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274 switch (b->memory) {
275 case VB2_MEMORY_USERPTR:
276 planes[0].m.userptr = b->m.userptr;
277 planes[0].length = b->length;
278 break;
279 case VB2_MEMORY_DMABUF:
280 planes[0].m.fd = b->m.fd;
281 planes[0].length = b->length;
282 break;
283 default:
284 planes[0].m.offset = vb->planes[0].m.offset;
285 planes[0].length = vb->planes[0].length;
286 break;
287 }
288
289 planes[0].data_offset = 0;
290 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
291 if (b->bytesused == 0)
292 vb2_warn_zero_bytesused(vb);
293
294 if (vb->vb2_queue->allow_zero_bytesused)
295 planes[0].bytesused = b->bytesused;
296 else
297 planes[0].bytesused = b->bytesused ?
298 b->bytesused : planes[0].length;
299 } else
300 planes[0].bytesused = 0;
301
302 }
303
304
305 vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
306 if (!vb->vb2_queue->copy_timestamp || !V4L2_TYPE_IS_OUTPUT(b->type)) {
307
308
309
310
311
312 vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
313 }
314
315 if (V4L2_TYPE_IS_OUTPUT(b->type)) {
316
317
318
319
320
321
322 vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
323 vbuf->field = b->field;
324 } else {
325
326 vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
327
328 vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
329 }
330
331 return 0;
332 }
333
334 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
335 struct v4l2_buffer *b, bool is_prepare,
336 struct media_request **p_req)
337 {
338 const char *opname = is_prepare ? "prepare_buf" : "qbuf";
339 struct media_request *req;
340 struct vb2_v4l2_buffer *vbuf;
341 struct vb2_buffer *vb;
342 int ret;
343
344 if (b->type != q->type) {
345 dprintk(1, "%s: invalid buffer type\n", opname);
346 return -EINVAL;
347 }
348
349 if (b->index >= q->num_buffers) {
350 dprintk(1, "%s: buffer index out of range\n", opname);
351 return -EINVAL;
352 }
353
354 if (q->bufs[b->index] == NULL) {
355
356 dprintk(1, "%s: buffer is NULL\n", opname);
357 return -EINVAL;
358 }
359
360 if (b->memory != q->memory) {
361 dprintk(1, "%s: invalid memory type\n", opname);
362 return -EINVAL;
363 }
364
365 vb = q->bufs[b->index];
366 vbuf = to_vb2_v4l2_buffer(vb);
367 ret = __verify_planes_array(vb, b);
368 if (ret)
369 return ret;
370
371 if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
372 vb->state != VB2_BUF_STATE_DEQUEUED) {
373 dprintk(1, "%s: buffer is not in dequeued state\n", opname);
374 return -EINVAL;
375 }
376
377 if (!vb->prepared) {
378
379 memset(vbuf->planes, 0,
380 sizeof(vbuf->planes[0]) * vb->num_planes);
381 ret = vb2_fill_vb2_v4l2_buffer(vb, b);
382 if (ret)
383 return ret;
384 }
385
386 if (is_prepare)
387 return 0;
388
389 if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
390 if (q->requires_requests) {
391 dprintk(1, "%s: queue requires requests\n", opname);
392 return -EBADR;
393 }
394 if (q->uses_requests) {
395 dprintk(1, "%s: queue uses requests\n", opname);
396 return -EBUSY;
397 }
398 return 0;
399 } else if (!q->supports_requests) {
400 dprintk(1, "%s: queue does not support requests\n", opname);
401 return -EBADR;
402 } else if (q->uses_qbuf) {
403 dprintk(1, "%s: queue does not use requests\n", opname);
404 return -EBUSY;
405 }
406
407
408
409
410
411
412 if (WARN_ON(!q->lock || !p_req))
413 return -EINVAL;
414
415
416
417
418
419
420 if (WARN_ON(!q->ops->buf_request_complete))
421 return -EINVAL;
422
423
424
425
426
427 if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
428 q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
429 !q->ops->buf_out_validate))
430 return -EINVAL;
431
432 if (b->request_fd < 0) {
433 dprintk(1, "%s: request_fd < 0\n", opname);
434 return -EINVAL;
435 }
436
437 req = media_request_get_by_fd(mdev, b->request_fd);
438 if (IS_ERR(req)) {
439 dprintk(1, "%s: invalid request_fd\n", opname);
440 return PTR_ERR(req);
441 }
442
443
444
445
446
447 if (req->state != MEDIA_REQUEST_STATE_IDLE &&
448 req->state != MEDIA_REQUEST_STATE_UPDATING) {
449 dprintk(1, "%s: request is not idle\n", opname);
450 media_request_put(req);
451 return -EBUSY;
452 }
453
454 *p_req = req;
455 vbuf->request_fd = b->request_fd;
456
457 return 0;
458 }
459
460
461
462
463
464 static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
465 {
466 struct v4l2_buffer *b = pb;
467 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
468 struct vb2_queue *q = vb->vb2_queue;
469 unsigned int plane;
470
471
472 b->index = vb->index;
473 b->type = vb->type;
474 b->memory = vb->memory;
475 b->bytesused = 0;
476
477 b->flags = vbuf->flags;
478 b->field = vbuf->field;
479 b->timestamp = ns_to_timeval(vb->timestamp);
480 b->timecode = vbuf->timecode;
481 b->sequence = vbuf->sequence;
482 b->reserved2 = 0;
483 b->request_fd = 0;
484
485 if (q->is_multiplanar) {
486
487
488
489
490 b->length = vb->num_planes;
491 for (plane = 0; plane < vb->num_planes; ++plane) {
492 struct v4l2_plane *pdst = &b->m.planes[plane];
493 struct vb2_plane *psrc = &vb->planes[plane];
494
495 pdst->bytesused = psrc->bytesused;
496 pdst->length = psrc->length;
497 if (q->memory == VB2_MEMORY_MMAP)
498 pdst->m.mem_offset = psrc->m.offset;
499 else if (q->memory == VB2_MEMORY_USERPTR)
500 pdst->m.userptr = psrc->m.userptr;
501 else if (q->memory == VB2_MEMORY_DMABUF)
502 pdst->m.fd = psrc->m.fd;
503 pdst->data_offset = psrc->data_offset;
504 memset(pdst->reserved, 0, sizeof(pdst->reserved));
505 }
506 } else {
507
508
509
510
511 b->length = vb->planes[0].length;
512 b->bytesused = vb->planes[0].bytesused;
513 if (q->memory == VB2_MEMORY_MMAP)
514 b->m.offset = vb->planes[0].m.offset;
515 else if (q->memory == VB2_MEMORY_USERPTR)
516 b->m.userptr = vb->planes[0].m.userptr;
517 else if (q->memory == VB2_MEMORY_DMABUF)
518 b->m.fd = vb->planes[0].m.fd;
519 }
520
521
522
523
524 b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
525 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
526 if (!q->copy_timestamp) {
527
528
529
530
531 b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
532 b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
533 }
534
535 switch (vb->state) {
536 case VB2_BUF_STATE_QUEUED:
537 case VB2_BUF_STATE_ACTIVE:
538 b->flags |= V4L2_BUF_FLAG_QUEUED;
539 break;
540 case VB2_BUF_STATE_IN_REQUEST:
541 b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
542 break;
543 case VB2_BUF_STATE_ERROR:
544 b->flags |= V4L2_BUF_FLAG_ERROR;
545
546 case VB2_BUF_STATE_DONE:
547 b->flags |= V4L2_BUF_FLAG_DONE;
548 break;
549 case VB2_BUF_STATE_PREPARING:
550 case VB2_BUF_STATE_DEQUEUED:
551
552 break;
553 }
554
555 if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
556 vb->state == VB2_BUF_STATE_IN_REQUEST) &&
557 vb->synced && vb->prepared)
558 b->flags |= V4L2_BUF_FLAG_PREPARED;
559
560 if (vb2_buffer_in_use(q, vb))
561 b->flags |= V4L2_BUF_FLAG_MAPPED;
562 if (vbuf->request_fd >= 0) {
563 b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
564 b->request_fd = vbuf->request_fd;
565 }
566 }
567
568
569
570
571
572
573 static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
574 {
575 struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
576 unsigned int plane;
577
578 if (!vb->vb2_queue->copy_timestamp)
579 vb->timestamp = 0;
580
581 for (plane = 0; plane < vb->num_planes; ++plane) {
582 if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
583 planes[plane].m = vbuf->planes[plane].m;
584 planes[plane].length = vbuf->planes[plane].length;
585 }
586 planes[plane].bytesused = vbuf->planes[plane].bytesused;
587 planes[plane].data_offset = vbuf->planes[plane].data_offset;
588 }
589 return 0;
590 }
591
592 static const struct vb2_buf_ops v4l2_buf_ops = {
593 .verify_planes_array = __verify_planes_array_core,
594 .init_buffer = __init_vb2_v4l2_buffer,
595 .fill_user_buffer = __fill_v4l2_buffer,
596 .fill_vb2_buffer = __fill_vb2_buffer,
597 .copy_timestamp = __copy_timestamp,
598 };
599
600 int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp,
601 unsigned int start_idx)
602 {
603 unsigned int i;
604
605 for (i = start_idx; i < q->num_buffers; i++)
606 if (q->bufs[i]->copied_timestamp &&
607 q->bufs[i]->timestamp == timestamp)
608 return i;
609 return -1;
610 }
611 EXPORT_SYMBOL_GPL(vb2_find_timestamp);
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
627 {
628 struct vb2_buffer *vb;
629 int ret;
630
631 if (b->type != q->type) {
632 dprintk(1, "wrong buffer type\n");
633 return -EINVAL;
634 }
635
636 if (b->index >= q->num_buffers) {
637 dprintk(1, "buffer index out of range\n");
638 return -EINVAL;
639 }
640 vb = q->bufs[b->index];
641 ret = __verify_planes_array(vb, b);
642 if (!ret)
643 vb2_core_querybuf(q, b->index, b);
644 return ret;
645 }
646 EXPORT_SYMBOL(vb2_querybuf);
647
648 static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
649 {
650 *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
651 if (q->io_modes & VB2_MMAP)
652 *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
653 if (q->io_modes & VB2_USERPTR)
654 *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
655 if (q->io_modes & VB2_DMABUF)
656 *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
657 #ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
658 if (q->supports_requests)
659 *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
660 #endif
661 }
662
663 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
664 {
665 int ret = vb2_verify_memory_type(q, req->memory, req->type);
666
667 fill_buf_caps(q, &req->capabilities);
668 return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
669 }
670 EXPORT_SYMBOL_GPL(vb2_reqbufs);
671
672 int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
673 struct v4l2_buffer *b)
674 {
675 int ret;
676
677 if (vb2_fileio_is_active(q)) {
678 dprintk(1, "file io in progress\n");
679 return -EBUSY;
680 }
681
682 if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
683 return -EINVAL;
684
685 ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
686
687 return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
688 }
689 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
690
691 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
692 {
693 unsigned requested_planes = 1;
694 unsigned requested_sizes[VIDEO_MAX_PLANES];
695 struct v4l2_format *f = &create->format;
696 int ret = vb2_verify_memory_type(q, create->memory, f->type);
697 unsigned i;
698
699 fill_buf_caps(q, &create->capabilities);
700 create->index = q->num_buffers;
701 if (create->count == 0)
702 return ret != -EBUSY ? ret : 0;
703
704 switch (f->type) {
705 case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
706 case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
707 requested_planes = f->fmt.pix_mp.num_planes;
708 if (requested_planes == 0 ||
709 requested_planes > VIDEO_MAX_PLANES)
710 return -EINVAL;
711 for (i = 0; i < requested_planes; i++)
712 requested_sizes[i] =
713 f->fmt.pix_mp.plane_fmt[i].sizeimage;
714 break;
715 case V4L2_BUF_TYPE_VIDEO_CAPTURE:
716 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
717 requested_sizes[0] = f->fmt.pix.sizeimage;
718 break;
719 case V4L2_BUF_TYPE_VBI_CAPTURE:
720 case V4L2_BUF_TYPE_VBI_OUTPUT:
721 requested_sizes[0] = f->fmt.vbi.samples_per_line *
722 (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
723 break;
724 case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
725 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
726 requested_sizes[0] = f->fmt.sliced.io_size;
727 break;
728 case V4L2_BUF_TYPE_SDR_CAPTURE:
729 case V4L2_BUF_TYPE_SDR_OUTPUT:
730 requested_sizes[0] = f->fmt.sdr.buffersize;
731 break;
732 case V4L2_BUF_TYPE_META_CAPTURE:
733 case V4L2_BUF_TYPE_META_OUTPUT:
734 requested_sizes[0] = f->fmt.meta.buffersize;
735 break;
736 default:
737 return -EINVAL;
738 }
739 for (i = 0; i < requested_planes; i++)
740 if (requested_sizes[i] == 0)
741 return -EINVAL;
742 return ret ? ret : vb2_core_create_bufs(q, create->memory,
743 &create->count, requested_planes, requested_sizes);
744 }
745 EXPORT_SYMBOL_GPL(vb2_create_bufs);
746
747 int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
748 struct v4l2_buffer *b)
749 {
750 struct media_request *req = NULL;
751 int ret;
752
753 if (vb2_fileio_is_active(q)) {
754 dprintk(1, "file io in progress\n");
755 return -EBUSY;
756 }
757
758 ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
759 if (ret)
760 return ret;
761 ret = vb2_core_qbuf(q, b->index, b, req);
762 if (req)
763 media_request_put(req);
764 return ret;
765 }
766 EXPORT_SYMBOL_GPL(vb2_qbuf);
767
768 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
769 {
770 int ret;
771
772 if (vb2_fileio_is_active(q)) {
773 dprintk(1, "file io in progress\n");
774 return -EBUSY;
775 }
776
777 if (b->type != q->type) {
778 dprintk(1, "invalid buffer type\n");
779 return -EINVAL;
780 }
781
782 ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
783
784 if (!q->is_output &&
785 b->flags & V4L2_BUF_FLAG_DONE &&
786 b->flags & V4L2_BUF_FLAG_LAST)
787 q->last_buffer_dequeued = true;
788
789
790
791
792
793 b->flags &= ~V4L2_BUF_FLAG_DONE;
794
795 return ret;
796 }
797 EXPORT_SYMBOL_GPL(vb2_dqbuf);
798
799 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
800 {
801 if (vb2_fileio_is_active(q)) {
802 dprintk(1, "file io in progress\n");
803 return -EBUSY;
804 }
805 return vb2_core_streamon(q, type);
806 }
807 EXPORT_SYMBOL_GPL(vb2_streamon);
808
809 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
810 {
811 if (vb2_fileio_is_active(q)) {
812 dprintk(1, "file io in progress\n");
813 return -EBUSY;
814 }
815 return vb2_core_streamoff(q, type);
816 }
817 EXPORT_SYMBOL_GPL(vb2_streamoff);
818
819 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
820 {
821 return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
822 eb->plane, eb->flags);
823 }
824 EXPORT_SYMBOL_GPL(vb2_expbuf);
825
826 int vb2_queue_init(struct vb2_queue *q)
827 {
828
829
830
831 if (WARN_ON(!q) ||
832 WARN_ON(q->timestamp_flags &
833 ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
834 V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
835 return -EINVAL;
836
837
838 WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
839 V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
840
841
842 if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
843 || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
844 || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
845 return -EINVAL;
846
847 if (q->buf_struct_size == 0)
848 q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
849
850 q->buf_ops = &v4l2_buf_ops;
851 q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
852 q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
853 q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
854 == V4L2_BUF_FLAG_TIMESTAMP_COPY;
855
856
857
858
859
860 q->quirk_poll_must_check_waiting_for_buffers = true;
861
862 return vb2_core_queue_init(q);
863 }
864 EXPORT_SYMBOL_GPL(vb2_queue_init);
865
866 void vb2_queue_release(struct vb2_queue *q)
867 {
868 vb2_core_queue_release(q);
869 }
870 EXPORT_SYMBOL_GPL(vb2_queue_release);
871
872 __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
873 {
874 struct video_device *vfd = video_devdata(file);
875 __poll_t res;
876
877 res = vb2_core_poll(q, file, wait);
878
879 if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
880 struct v4l2_fh *fh = file->private_data;
881
882 poll_wait(file, &fh->wait, wait);
883 if (v4l2_event_pending(fh))
884 res |= EPOLLPRI;
885 }
886
887 return res;
888 }
889 EXPORT_SYMBOL_GPL(vb2_poll);
890
891
892
893
894
895
896
897
898
899
900 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
901 {
902 return vdev->queue->owner && vdev->queue->owner != file->private_data;
903 }
904
905
906
907 int vb2_ioctl_reqbufs(struct file *file, void *priv,
908 struct v4l2_requestbuffers *p)
909 {
910 struct video_device *vdev = video_devdata(file);
911 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
912
913 fill_buf_caps(vdev->queue, &p->capabilities);
914 if (res)
915 return res;
916 if (vb2_queue_is_busy(vdev, file))
917 return -EBUSY;
918 res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
919
920
921 if (res == 0)
922 vdev->queue->owner = p->count ? file->private_data : NULL;
923 return res;
924 }
925 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
926
927 int vb2_ioctl_create_bufs(struct file *file, void *priv,
928 struct v4l2_create_buffers *p)
929 {
930 struct video_device *vdev = video_devdata(file);
931 int res = vb2_verify_memory_type(vdev->queue, p->memory,
932 p->format.type);
933
934 p->index = vdev->queue->num_buffers;
935 fill_buf_caps(vdev->queue, &p->capabilities);
936
937
938
939
940 if (p->count == 0)
941 return res != -EBUSY ? res : 0;
942 if (res)
943 return res;
944 if (vb2_queue_is_busy(vdev, file))
945 return -EBUSY;
946
947 res = vb2_create_bufs(vdev->queue, p);
948 if (res == 0)
949 vdev->queue->owner = file->private_data;
950 return res;
951 }
952 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
953
954 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
955 struct v4l2_buffer *p)
956 {
957 struct video_device *vdev = video_devdata(file);
958
959 if (vb2_queue_is_busy(vdev, file))
960 return -EBUSY;
961 return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
962 }
963 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
964
965 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
966 {
967 struct video_device *vdev = video_devdata(file);
968
969
970 return vb2_querybuf(vdev->queue, p);
971 }
972 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
973
974 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
975 {
976 struct video_device *vdev = video_devdata(file);
977
978 if (vb2_queue_is_busy(vdev, file))
979 return -EBUSY;
980 return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
981 }
982 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
983
984 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
985 {
986 struct video_device *vdev = video_devdata(file);
987
988 if (vb2_queue_is_busy(vdev, file))
989 return -EBUSY;
990 return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
991 }
992 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
993
994 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
995 {
996 struct video_device *vdev = video_devdata(file);
997
998 if (vb2_queue_is_busy(vdev, file))
999 return -EBUSY;
1000 return vb2_streamon(vdev->queue, i);
1001 }
1002 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
1003
1004 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
1005 {
1006 struct video_device *vdev = video_devdata(file);
1007
1008 if (vb2_queue_is_busy(vdev, file))
1009 return -EBUSY;
1010 return vb2_streamoff(vdev->queue, i);
1011 }
1012 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
1013
1014 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
1015 {
1016 struct video_device *vdev = video_devdata(file);
1017
1018 if (vb2_queue_is_busy(vdev, file))
1019 return -EBUSY;
1020 return vb2_expbuf(vdev->queue, p);
1021 }
1022 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
1023
1024
1025
1026 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
1027 {
1028 struct video_device *vdev = video_devdata(file);
1029
1030 return vb2_mmap(vdev->queue, vma);
1031 }
1032 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
1033
1034 int _vb2_fop_release(struct file *file, struct mutex *lock)
1035 {
1036 struct video_device *vdev = video_devdata(file);
1037
1038 if (lock)
1039 mutex_lock(lock);
1040 if (file->private_data == vdev->queue->owner) {
1041 vb2_queue_release(vdev->queue);
1042 vdev->queue->owner = NULL;
1043 }
1044 if (lock)
1045 mutex_unlock(lock);
1046 return v4l2_fh_release(file);
1047 }
1048 EXPORT_SYMBOL_GPL(_vb2_fop_release);
1049
1050 int vb2_fop_release(struct file *file)
1051 {
1052 struct video_device *vdev = video_devdata(file);
1053 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1054
1055 return _vb2_fop_release(file, lock);
1056 }
1057 EXPORT_SYMBOL_GPL(vb2_fop_release);
1058
1059 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
1060 size_t count, loff_t *ppos)
1061 {
1062 struct video_device *vdev = video_devdata(file);
1063 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1064 int err = -EBUSY;
1065
1066 if (!(vdev->queue->io_modes & VB2_WRITE))
1067 return -EINVAL;
1068 if (lock && mutex_lock_interruptible(lock))
1069 return -ERESTARTSYS;
1070 if (vb2_queue_is_busy(vdev, file))
1071 goto exit;
1072 err = vb2_write(vdev->queue, buf, count, ppos,
1073 file->f_flags & O_NONBLOCK);
1074 if (vdev->queue->fileio)
1075 vdev->queue->owner = file->private_data;
1076 exit:
1077 if (lock)
1078 mutex_unlock(lock);
1079 return err;
1080 }
1081 EXPORT_SYMBOL_GPL(vb2_fop_write);
1082
1083 ssize_t vb2_fop_read(struct file *file, char __user *buf,
1084 size_t count, loff_t *ppos)
1085 {
1086 struct video_device *vdev = video_devdata(file);
1087 struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
1088 int err = -EBUSY;
1089
1090 if (!(vdev->queue->io_modes & VB2_READ))
1091 return -EINVAL;
1092 if (lock && mutex_lock_interruptible(lock))
1093 return -ERESTARTSYS;
1094 if (vb2_queue_is_busy(vdev, file))
1095 goto exit;
1096 err = vb2_read(vdev->queue, buf, count, ppos,
1097 file->f_flags & O_NONBLOCK);
1098 if (vdev->queue->fileio)
1099 vdev->queue->owner = file->private_data;
1100 exit:
1101 if (lock)
1102 mutex_unlock(lock);
1103 return err;
1104 }
1105 EXPORT_SYMBOL_GPL(vb2_fop_read);
1106
1107 __poll_t vb2_fop_poll(struct file *file, poll_table *wait)
1108 {
1109 struct video_device *vdev = video_devdata(file);
1110 struct vb2_queue *q = vdev->queue;
1111 struct mutex *lock = q->lock ? q->lock : vdev->lock;
1112 __poll_t res;
1113 void *fileio;
1114
1115
1116
1117
1118
1119 WARN_ON(!lock);
1120
1121 if (lock && mutex_lock_interruptible(lock))
1122 return EPOLLERR;
1123
1124 fileio = q->fileio;
1125
1126 res = vb2_poll(vdev->queue, file, wait);
1127
1128
1129 if (!fileio && q->fileio)
1130 q->owner = file->private_data;
1131 if (lock)
1132 mutex_unlock(lock);
1133 return res;
1134 }
1135 EXPORT_SYMBOL_GPL(vb2_fop_poll);
1136
1137 #ifndef CONFIG_MMU
1138 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
1139 unsigned long len, unsigned long pgoff, unsigned long flags)
1140 {
1141 struct video_device *vdev = video_devdata(file);
1142
1143 return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
1144 }
1145 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
1146 #endif
1147
1148
1149
1150 void vb2_ops_wait_prepare(struct vb2_queue *vq)
1151 {
1152 mutex_unlock(vq->lock);
1153 }
1154 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
1155
1156 void vb2_ops_wait_finish(struct vb2_queue *vq)
1157 {
1158 mutex_lock(vq->lock);
1159 }
1160 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
1161
1162
1163
1164
1165
1166
1167
1168 int vb2_request_validate(struct media_request *req)
1169 {
1170 struct media_request_object *obj;
1171 int ret = 0;
1172
1173 if (!vb2_request_buffer_cnt(req))
1174 return -ENOENT;
1175
1176 list_for_each_entry(obj, &req->objects, list) {
1177 if (!obj->ops->prepare)
1178 continue;
1179
1180 ret = obj->ops->prepare(obj);
1181 if (ret)
1182 break;
1183 }
1184
1185 if (ret) {
1186 list_for_each_entry_continue_reverse(obj, &req->objects, list)
1187 if (obj->ops->unprepare)
1188 obj->ops->unprepare(obj);
1189 return ret;
1190 }
1191 return 0;
1192 }
1193 EXPORT_SYMBOL_GPL(vb2_request_validate);
1194
1195 void vb2_request_queue(struct media_request *req)
1196 {
1197 struct media_request_object *obj, *obj_safe;
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207 list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
1208 if (obj->ops->queue)
1209 obj->ops->queue(obj);
1210 }
1211 EXPORT_SYMBOL_GPL(vb2_request_queue);
1212
1213 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
1214 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
1215 MODULE_LICENSE("GPL");