1/* 2 * vivid-vid-cap.c - video capture support functions. 3 * 4 * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved. 5 * 6 * This program is free software; you may redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; version 2 of the License. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 11 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 12 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 13 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 14 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 15 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 17 * SOFTWARE. 18 */ 19 20#include <linux/errno.h> 21#include <linux/kernel.h> 22#include <linux/sched.h> 23#include <linux/vmalloc.h> 24#include <linux/videodev2.h> 25#include <linux/v4l2-dv-timings.h> 26#include <media/v4l2-common.h> 27#include <media/v4l2-event.h> 28#include <media/v4l2-dv-timings.h> 29 30#include "vivid-core.h" 31#include "vivid-vid-common.h" 32#include "vivid-kthread-cap.h" 33#include "vivid-vid-cap.h" 34 35/* timeperframe: min/max and default */ 36static const struct v4l2_fract 37 tpf_min = {.numerator = 1, .denominator = FPS_MAX}, 38 tpf_max = {.numerator = FPS_MAX, .denominator = 1}, 39 tpf_default = {.numerator = 1, .denominator = 30}; 40 41static const struct vivid_fmt formats_ovl[] = { 42 { 43 .name = "RGB565 (LE)", 44 .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */ 45 .vdownsampling = { 1 }, 46 .bit_depth = { 16 }, 47 .planes = 1, 48 .buffers = 1, 49 }, 50 { 51 .name = "XRGB555 (LE)", 52 .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */ 53 .vdownsampling = { 1 }, 54 .bit_depth = { 16 }, 55 .planes = 1, 56 .buffers = 1, 57 }, 58 { 59 .name = "ARGB555 (LE)", 60 .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */ 61 .vdownsampling = { 1 }, 62 .bit_depth = { 16 }, 63 .planes = 1, 64 .buffers = 1, 65 }, 66}; 67 68/* The number of discrete webcam framesizes */ 69#define VIVID_WEBCAM_SIZES 3 70/* The number of discrete webcam frameintervals */ 71#define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2) 72 73/* Sizes must be in increasing order */ 74static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = { 75 { 320, 180 }, 76 { 640, 360 }, 77 { 1280, 720 }, 78}; 79 80/* 81 * Intervals must be in increasing order and there must be twice as many 82 * elements in this array as there are in webcam_sizes. 83 */ 84static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = { 85 { 1, 10 }, 86 { 1, 15 }, 87 { 1, 25 }, 88 { 1, 30 }, 89 { 1, 50 }, 90 { 1, 60 }, 91}; 92 93static const struct v4l2_discrete_probe webcam_probe = { 94 webcam_sizes, 95 VIVID_WEBCAM_SIZES 96}; 97 98static int vid_cap_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, 99 unsigned *nbuffers, unsigned *nplanes, 100 unsigned sizes[], void *alloc_ctxs[]) 101{ 102 struct vivid_dev *dev = vb2_get_drv_priv(vq); 103 unsigned buffers = tpg_g_buffers(&dev->tpg); 104 unsigned h = dev->fmt_cap_rect.height; 105 unsigned p; 106 107 if (dev->field_cap == V4L2_FIELD_ALTERNATE) { 108 /* 109 * You cannot use read() with FIELD_ALTERNATE since the field 110 * information (TOP/BOTTOM) cannot be passed back to the user. 111 */ 112 if (vb2_fileio_is_active(vq)) 113 return -EINVAL; 114 } 115 116 if (dev->queue_setup_error) { 117 /* 118 * Error injection: test what happens if queue_setup() returns 119 * an error. 120 */ 121 dev->queue_setup_error = false; 122 return -EINVAL; 123 } 124 if (fmt) { 125 const struct v4l2_pix_format_mplane *mp; 126 struct v4l2_format mp_fmt; 127 const struct vivid_fmt *vfmt; 128 129 if (!V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) { 130 fmt_sp2mp(fmt, &mp_fmt); 131 fmt = &mp_fmt; 132 } 133 mp = &fmt->fmt.pix_mp; 134 /* 135 * Check if the number of planes in the specified format match 136 * the number of buffers in the current format. You can't mix that. 137 */ 138 if (mp->num_planes != buffers) 139 return -EINVAL; 140 vfmt = vivid_get_format(dev, mp->pixelformat); 141 for (p = 0; p < buffers; p++) { 142 sizes[p] = mp->plane_fmt[p].sizeimage; 143 if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h + 144 vfmt->data_offset[p]) 145 return -EINVAL; 146 } 147 } else { 148 for (p = 0; p < buffers; p++) 149 sizes[p] = tpg_g_line_width(&dev->tpg, p) * h + 150 dev->fmt_cap->data_offset[p]; 151 } 152 153 if (vq->num_buffers + *nbuffers < 2) 154 *nbuffers = 2 - vq->num_buffers; 155 156 *nplanes = buffers; 157 158 /* 159 * videobuf2-vmalloc allocator is context-less so no need to set 160 * alloc_ctxs array. 161 */ 162 163 dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers); 164 for (p = 0; p < buffers; p++) 165 dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]); 166 167 return 0; 168} 169 170static int vid_cap_buf_prepare(struct vb2_buffer *vb) 171{ 172 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 173 unsigned long size; 174 unsigned buffers = tpg_g_buffers(&dev->tpg); 175 unsigned p; 176 177 dprintk(dev, 1, "%s\n", __func__); 178 179 if (WARN_ON(NULL == dev->fmt_cap)) 180 return -EINVAL; 181 182 if (dev->buf_prepare_error) { 183 /* 184 * Error injection: test what happens if buf_prepare() returns 185 * an error. 186 */ 187 dev->buf_prepare_error = false; 188 return -EINVAL; 189 } 190 for (p = 0; p < buffers; p++) { 191 size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height + 192 dev->fmt_cap->data_offset[p]; 193 194 if (vb2_plane_size(vb, p) < size) { 195 dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n", 196 __func__, p, vb2_plane_size(vb, p), size); 197 return -EINVAL; 198 } 199 200 vb2_set_plane_payload(vb, p, size); 201 vb->v4l2_planes[p].data_offset = dev->fmt_cap->data_offset[p]; 202 } 203 204 return 0; 205} 206 207static void vid_cap_buf_finish(struct vb2_buffer *vb) 208{ 209 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 210 struct v4l2_timecode *tc = &vb->v4l2_buf.timecode; 211 unsigned fps = 25; 212 unsigned seq = vb->v4l2_buf.sequence; 213 214 if (!vivid_is_sdtv_cap(dev)) 215 return; 216 217 /* 218 * Set the timecode. Rarely used, so it is interesting to 219 * test this. 220 */ 221 vb->v4l2_buf.flags |= V4L2_BUF_FLAG_TIMECODE; 222 if (dev->std_cap & V4L2_STD_525_60) 223 fps = 30; 224 tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS; 225 tc->flags = 0; 226 tc->frames = seq % fps; 227 tc->seconds = (seq / fps) % 60; 228 tc->minutes = (seq / (60 * fps)) % 60; 229 tc->hours = (seq / (60 * 60 * fps)) % 24; 230} 231 232static void vid_cap_buf_queue(struct vb2_buffer *vb) 233{ 234 struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue); 235 struct vivid_buffer *buf = container_of(vb, struct vivid_buffer, vb); 236 237 dprintk(dev, 1, "%s\n", __func__); 238 239 spin_lock(&dev->slock); 240 list_add_tail(&buf->list, &dev->vid_cap_active); 241 spin_unlock(&dev->slock); 242} 243 244static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count) 245{ 246 struct vivid_dev *dev = vb2_get_drv_priv(vq); 247 unsigned i; 248 int err; 249 250 if (vb2_is_streaming(&dev->vb_vid_out_q)) 251 dev->can_loop_video = vivid_vid_can_loop(dev); 252 253 if (dev->kthread_vid_cap) 254 return 0; 255 256 dev->vid_cap_seq_count = 0; 257 dprintk(dev, 1, "%s\n", __func__); 258 for (i = 0; i < VIDEO_MAX_FRAME; i++) 259 dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100; 260 if (dev->start_streaming_error) { 261 dev->start_streaming_error = false; 262 err = -EINVAL; 263 } else { 264 err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming); 265 } 266 if (err) { 267 struct vivid_buffer *buf, *tmp; 268 269 list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) { 270 list_del(&buf->list); 271 vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED); 272 } 273 } 274 return err; 275} 276 277/* abort streaming and wait for last buffer */ 278static void vid_cap_stop_streaming(struct vb2_queue *vq) 279{ 280 struct vivid_dev *dev = vb2_get_drv_priv(vq); 281 282 dprintk(dev, 1, "%s\n", __func__); 283 vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming); 284 dev->can_loop_video = false; 285} 286 287const struct vb2_ops vivid_vid_cap_qops = { 288 .queue_setup = vid_cap_queue_setup, 289 .buf_prepare = vid_cap_buf_prepare, 290 .buf_finish = vid_cap_buf_finish, 291 .buf_queue = vid_cap_buf_queue, 292 .start_streaming = vid_cap_start_streaming, 293 .stop_streaming = vid_cap_stop_streaming, 294 .wait_prepare = vb2_ops_wait_prepare, 295 .wait_finish = vb2_ops_wait_finish, 296}; 297 298/* 299 * Determine the 'picture' quality based on the current TV frequency: either 300 * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off 301 * signal or NOISE for no signal. 302 */ 303void vivid_update_quality(struct vivid_dev *dev) 304{ 305 unsigned freq_modulus; 306 307 if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) { 308 /* 309 * The 'noise' will only be replaced by the actual video 310 * if the output video matches the input video settings. 311 */ 312 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 313 return; 314 } 315 if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) { 316 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 317 return; 318 } 319 if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) { 320 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0); 321 return; 322 } 323 if (!vivid_is_tv_cap(dev)) { 324 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 325 return; 326 } 327 328 /* 329 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 330 * From +/- 0.25 MHz around the channel there is color, and from 331 * +/- 1 MHz there is grayscale (chroma is lost). 332 * Everywhere else it is just noise. 333 */ 334 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 335 if (freq_modulus > 2 * 16) { 336 tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 337 next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f); 338 return; 339 } 340 if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/) 341 tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0); 342 else 343 tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0); 344} 345 346/* 347 * Get the current picture quality and the associated afc value. 348 */ 349static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc) 350{ 351 unsigned freq_modulus; 352 353 if (afc) 354 *afc = 0; 355 if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR || 356 tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) 357 return tpg_g_quality(&dev->tpg); 358 359 /* 360 * There is a fake channel every 6 MHz at 49.25, 55.25, etc. 361 * From +/- 0.25 MHz around the channel there is color, and from 362 * +/- 1 MHz there is grayscale (chroma is lost). 363 * Everywhere else it is just gray. 364 */ 365 freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16); 366 if (afc) 367 *afc = freq_modulus - 1 * 16; 368 return TPG_QUAL_GRAY; 369} 370 371enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev) 372{ 373 if (vivid_is_sdtv_cap(dev)) 374 return dev->std_aspect_ratio; 375 376 if (vivid_is_hdmi_cap(dev)) 377 return dev->dv_timings_aspect_ratio; 378 379 return TPG_VIDEO_ASPECT_IMAGE; 380} 381 382static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev) 383{ 384 if (vivid_is_sdtv_cap(dev)) 385 return (dev->std_cap & V4L2_STD_525_60) ? 386 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 387 388 if (vivid_is_hdmi_cap(dev) && 389 dev->src_rect.width == 720 && dev->src_rect.height <= 576) 390 return dev->src_rect.height == 480 ? 391 TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL; 392 393 return TPG_PIXEL_ASPECT_SQUARE; 394} 395 396/* 397 * Called whenever the format has to be reset which can occur when 398 * changing inputs, standard, timings, etc. 399 */ 400void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) 401{ 402 struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt; 403 unsigned size; 404 405 switch (dev->input_type[dev->input]) { 406 case WEBCAM: 407 default: 408 dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width; 409 dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height; 410 dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx]; 411 dev->field_cap = V4L2_FIELD_NONE; 412 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 413 break; 414 case TV: 415 case SVID: 416 dev->field_cap = dev->tv_field_cap; 417 dev->src_rect.width = 720; 418 if (dev->std_cap & V4L2_STD_525_60) { 419 dev->src_rect.height = 480; 420 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 }; 421 dev->service_set_cap = V4L2_SLICED_CAPTION_525; 422 } else { 423 dev->src_rect.height = 576; 424 dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 }; 425 dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B; 426 } 427 tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO); 428 break; 429 case HDMI: 430 dev->src_rect.width = bt->width; 431 dev->src_rect.height = bt->height; 432 size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt); 433 dev->timeperframe_vid_cap = (struct v4l2_fract) { 434 size / 100, (u32)bt->pixelclock / 100 435 }; 436 if (bt->interlaced) 437 dev->field_cap = V4L2_FIELD_ALTERNATE; 438 else 439 dev->field_cap = V4L2_FIELD_NONE; 440 441 /* 442 * We can be called from within s_ctrl, in that case we can't 443 * set/get controls. Luckily we don't need to in that case. 444 */ 445 if (keep_controls || !dev->colorspace) 446 break; 447 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 448 if (bt->width == 720 && bt->height <= 576) 449 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 450 else 451 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 452 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1); 453 } else { 454 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 455 v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0); 456 } 457 tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap)); 458 break; 459 } 460 vivid_update_quality(dev); 461 tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); 462 dev->crop_cap = dev->src_rect; 463 dev->crop_bounds_cap = dev->src_rect; 464 dev->compose_cap = dev->crop_cap; 465 if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) 466 dev->compose_cap.height /= 2; 467 dev->fmt_cap_rect = dev->compose_cap; 468 tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); 469 tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); 470 tpg_update_mv_step(&dev->tpg); 471} 472 473/* Map the field to something that is valid for the current input */ 474static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field) 475{ 476 if (vivid_is_sdtv_cap(dev)) { 477 switch (field) { 478 case V4L2_FIELD_INTERLACED_TB: 479 case V4L2_FIELD_INTERLACED_BT: 480 case V4L2_FIELD_SEQ_TB: 481 case V4L2_FIELD_SEQ_BT: 482 case V4L2_FIELD_TOP: 483 case V4L2_FIELD_BOTTOM: 484 case V4L2_FIELD_ALTERNATE: 485 return field; 486 case V4L2_FIELD_INTERLACED: 487 default: 488 return V4L2_FIELD_INTERLACED; 489 } 490 } 491 if (vivid_is_hdmi_cap(dev)) 492 return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE : 493 V4L2_FIELD_NONE; 494 return V4L2_FIELD_NONE; 495} 496 497static unsigned vivid_colorspace_cap(struct vivid_dev *dev) 498{ 499 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 500 return tpg_g_colorspace(&dev->tpg); 501 return dev->colorspace_out; 502} 503 504static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev) 505{ 506 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 507 return tpg_g_ycbcr_enc(&dev->tpg); 508 return dev->ycbcr_enc_out; 509} 510 511static unsigned vivid_quantization_cap(struct vivid_dev *dev) 512{ 513 if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev)) 514 return tpg_g_quantization(&dev->tpg); 515 return dev->quantization_out; 516} 517 518int vivid_g_fmt_vid_cap(struct file *file, void *priv, 519 struct v4l2_format *f) 520{ 521 struct vivid_dev *dev = video_drvdata(file); 522 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 523 unsigned p; 524 525 mp->width = dev->fmt_cap_rect.width; 526 mp->height = dev->fmt_cap_rect.height; 527 mp->field = dev->field_cap; 528 mp->pixelformat = dev->fmt_cap->fourcc; 529 mp->colorspace = vivid_colorspace_cap(dev); 530 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 531 mp->quantization = vivid_quantization_cap(dev); 532 mp->num_planes = dev->fmt_cap->buffers; 533 for (p = 0; p < mp->num_planes; p++) { 534 mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p); 535 mp->plane_fmt[p].sizeimage = 536 tpg_g_line_width(&dev->tpg, p) * mp->height + 537 dev->fmt_cap->data_offset[p]; 538 } 539 return 0; 540} 541 542int vivid_try_fmt_vid_cap(struct file *file, void *priv, 543 struct v4l2_format *f) 544{ 545 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 546 struct v4l2_plane_pix_format *pfmt = mp->plane_fmt; 547 struct vivid_dev *dev = video_drvdata(file); 548 const struct vivid_fmt *fmt; 549 unsigned bytesperline, max_bpl; 550 unsigned factor = 1; 551 unsigned w, h; 552 unsigned p; 553 554 fmt = vivid_get_format(dev, mp->pixelformat); 555 if (!fmt) { 556 dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n", 557 mp->pixelformat); 558 mp->pixelformat = V4L2_PIX_FMT_YUYV; 559 fmt = vivid_get_format(dev, mp->pixelformat); 560 } 561 562 mp->field = vivid_field_cap(dev, mp->field); 563 if (vivid_is_webcam(dev)) { 564 const struct v4l2_frmsize_discrete *sz = 565 v4l2_find_nearest_format(&webcam_probe, mp->width, mp->height); 566 567 w = sz->width; 568 h = sz->height; 569 } else if (vivid_is_sdtv_cap(dev)) { 570 w = 720; 571 h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576; 572 } else { 573 w = dev->src_rect.width; 574 h = dev->src_rect.height; 575 } 576 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 577 factor = 2; 578 if (vivid_is_webcam(dev) || 579 (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) { 580 mp->width = w; 581 mp->height = h / factor; 582 } else { 583 struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor }; 584 585 rect_set_min_size(&r, &vivid_min_rect); 586 rect_set_max_size(&r, &vivid_max_rect); 587 if (dev->has_scaler_cap && !dev->has_compose_cap) { 588 struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h }; 589 590 rect_set_max_size(&r, &max_r); 591 } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) { 592 rect_set_max_size(&r, &dev->src_rect); 593 } else if (!dev->has_scaler_cap && !dev->has_crop_cap) { 594 rect_set_min_size(&r, &dev->src_rect); 595 } 596 mp->width = r.width; 597 mp->height = r.height / factor; 598 } 599 600 /* This driver supports custom bytesperline values */ 601 602 mp->num_planes = fmt->buffers; 603 for (p = 0; p < mp->num_planes; p++) { 604 /* Calculate the minimum supported bytesperline value */ 605 bytesperline = (mp->width * fmt->bit_depth[p]) >> 3; 606 /* Calculate the maximum supported bytesperline value */ 607 max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3; 608 609 if (pfmt[p].bytesperline > max_bpl) 610 pfmt[p].bytesperline = max_bpl; 611 if (pfmt[p].bytesperline < bytesperline) 612 pfmt[p].bytesperline = bytesperline; 613 pfmt[p].sizeimage = tpg_calc_line_width(&dev->tpg, p, pfmt[p].bytesperline) * 614 mp->height + fmt->data_offset[p]; 615 memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved)); 616 } 617 mp->colorspace = vivid_colorspace_cap(dev); 618 mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev); 619 mp->quantization = vivid_quantization_cap(dev); 620 memset(mp->reserved, 0, sizeof(mp->reserved)); 621 return 0; 622} 623 624int vivid_s_fmt_vid_cap(struct file *file, void *priv, 625 struct v4l2_format *f) 626{ 627 struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp; 628 struct vivid_dev *dev = video_drvdata(file); 629 struct v4l2_rect *crop = &dev->crop_cap; 630 struct v4l2_rect *compose = &dev->compose_cap; 631 struct vb2_queue *q = &dev->vb_vid_cap_q; 632 int ret = vivid_try_fmt_vid_cap(file, priv, f); 633 unsigned factor = 1; 634 unsigned p; 635 unsigned i; 636 637 if (ret < 0) 638 return ret; 639 640 if (vb2_is_busy(q)) { 641 dprintk(dev, 1, "%s device busy\n", __func__); 642 return -EBUSY; 643 } 644 645 if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) { 646 dprintk(dev, 1, "overlay is active, can't change pixelformat\n"); 647 return -EBUSY; 648 } 649 650 dev->fmt_cap = vivid_get_format(dev, mp->pixelformat); 651 if (V4L2_FIELD_HAS_T_OR_B(mp->field)) 652 factor = 2; 653 654 /* Note: the webcam input doesn't support scaling, cropping or composing */ 655 656 if (!vivid_is_webcam(dev) && 657 (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) { 658 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 659 660 if (dev->has_scaler_cap) { 661 if (dev->has_compose_cap) 662 rect_map_inside(compose, &r); 663 else 664 *compose = r; 665 if (dev->has_crop_cap && !dev->has_compose_cap) { 666 struct v4l2_rect min_r = { 667 0, 0, 668 r.width / MAX_ZOOM, 669 factor * r.height / MAX_ZOOM 670 }; 671 struct v4l2_rect max_r = { 672 0, 0, 673 r.width * MAX_ZOOM, 674 factor * r.height * MAX_ZOOM 675 }; 676 677 rect_set_min_size(crop, &min_r); 678 rect_set_max_size(crop, &max_r); 679 rect_map_inside(crop, &dev->crop_bounds_cap); 680 } else if (dev->has_crop_cap) { 681 struct v4l2_rect min_r = { 682 0, 0, 683 compose->width / MAX_ZOOM, 684 factor * compose->height / MAX_ZOOM 685 }; 686 struct v4l2_rect max_r = { 687 0, 0, 688 compose->width * MAX_ZOOM, 689 factor * compose->height * MAX_ZOOM 690 }; 691 692 rect_set_min_size(crop, &min_r); 693 rect_set_max_size(crop, &max_r); 694 rect_map_inside(crop, &dev->crop_bounds_cap); 695 } 696 } else if (dev->has_crop_cap && !dev->has_compose_cap) { 697 r.height *= factor; 698 rect_set_size_to(crop, &r); 699 rect_map_inside(crop, &dev->crop_bounds_cap); 700 r = *crop; 701 r.height /= factor; 702 rect_set_size_to(compose, &r); 703 } else if (!dev->has_crop_cap) { 704 rect_map_inside(compose, &r); 705 } else { 706 r.height *= factor; 707 rect_set_max_size(crop, &r); 708 rect_map_inside(crop, &dev->crop_bounds_cap); 709 compose->top *= factor; 710 compose->height *= factor; 711 rect_set_size_to(compose, crop); 712 rect_map_inside(compose, &r); 713 compose->top /= factor; 714 compose->height /= factor; 715 } 716 } else if (vivid_is_webcam(dev)) { 717 /* Guaranteed to be a match */ 718 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 719 if (webcam_sizes[i].width == mp->width && 720 webcam_sizes[i].height == mp->height) 721 break; 722 dev->webcam_size_idx = i; 723 if (dev->webcam_ival_idx >= 2 * (3 - i)) 724 dev->webcam_ival_idx = 2 * (3 - i) - 1; 725 vivid_update_format_cap(dev, false); 726 } else { 727 struct v4l2_rect r = { 0, 0, mp->width, mp->height }; 728 729 rect_set_size_to(compose, &r); 730 r.height *= factor; 731 rect_set_size_to(crop, &r); 732 } 733 734 dev->fmt_cap_rect.width = mp->width; 735 dev->fmt_cap_rect.height = mp->height; 736 tpg_s_buf_height(&dev->tpg, mp->height); 737 tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc); 738 for (p = 0; p < tpg_g_buffers(&dev->tpg); p++) 739 tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline); 740 dev->field_cap = mp->field; 741 if (dev->field_cap == V4L2_FIELD_ALTERNATE) 742 tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true); 743 else 744 tpg_s_field(&dev->tpg, dev->field_cap, false); 745 tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap); 746 if (vivid_is_sdtv_cap(dev)) 747 dev->tv_field_cap = mp->field; 748 tpg_update_mv_step(&dev->tpg); 749 return 0; 750} 751 752int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv, 753 struct v4l2_format *f) 754{ 755 struct vivid_dev *dev = video_drvdata(file); 756 757 if (!dev->multiplanar) 758 return -ENOTTY; 759 return vivid_g_fmt_vid_cap(file, priv, f); 760} 761 762int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv, 763 struct v4l2_format *f) 764{ 765 struct vivid_dev *dev = video_drvdata(file); 766 767 if (!dev->multiplanar) 768 return -ENOTTY; 769 return vivid_try_fmt_vid_cap(file, priv, f); 770} 771 772int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv, 773 struct v4l2_format *f) 774{ 775 struct vivid_dev *dev = video_drvdata(file); 776 777 if (!dev->multiplanar) 778 return -ENOTTY; 779 return vivid_s_fmt_vid_cap(file, priv, f); 780} 781 782int vidioc_g_fmt_vid_cap(struct file *file, void *priv, 783 struct v4l2_format *f) 784{ 785 struct vivid_dev *dev = video_drvdata(file); 786 787 if (dev->multiplanar) 788 return -ENOTTY; 789 return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap); 790} 791 792int vidioc_try_fmt_vid_cap(struct file *file, void *priv, 793 struct v4l2_format *f) 794{ 795 struct vivid_dev *dev = video_drvdata(file); 796 797 if (dev->multiplanar) 798 return -ENOTTY; 799 return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap); 800} 801 802int vidioc_s_fmt_vid_cap(struct file *file, void *priv, 803 struct v4l2_format *f) 804{ 805 struct vivid_dev *dev = video_drvdata(file); 806 807 if (dev->multiplanar) 808 return -ENOTTY; 809 return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap); 810} 811 812int vivid_vid_cap_g_selection(struct file *file, void *priv, 813 struct v4l2_selection *sel) 814{ 815 struct vivid_dev *dev = video_drvdata(file); 816 817 if (!dev->has_crop_cap && !dev->has_compose_cap) 818 return -ENOTTY; 819 if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 820 return -EINVAL; 821 if (vivid_is_webcam(dev)) 822 return -EINVAL; 823 824 sel->r.left = sel->r.top = 0; 825 switch (sel->target) { 826 case V4L2_SEL_TGT_CROP: 827 if (!dev->has_crop_cap) 828 return -EINVAL; 829 sel->r = dev->crop_cap; 830 break; 831 case V4L2_SEL_TGT_CROP_DEFAULT: 832 case V4L2_SEL_TGT_CROP_BOUNDS: 833 if (!dev->has_crop_cap) 834 return -EINVAL; 835 sel->r = dev->src_rect; 836 break; 837 case V4L2_SEL_TGT_COMPOSE_BOUNDS: 838 if (!dev->has_compose_cap) 839 return -EINVAL; 840 sel->r = vivid_max_rect; 841 break; 842 case V4L2_SEL_TGT_COMPOSE: 843 if (!dev->has_compose_cap) 844 return -EINVAL; 845 sel->r = dev->compose_cap; 846 break; 847 case V4L2_SEL_TGT_COMPOSE_DEFAULT: 848 if (!dev->has_compose_cap) 849 return -EINVAL; 850 sel->r = dev->fmt_cap_rect; 851 break; 852 default: 853 return -EINVAL; 854 } 855 return 0; 856} 857 858int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) 859{ 860 struct vivid_dev *dev = video_drvdata(file); 861 struct v4l2_rect *crop = &dev->crop_cap; 862 struct v4l2_rect *compose = &dev->compose_cap; 863 unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; 864 int ret; 865 866 if (!dev->has_crop_cap && !dev->has_compose_cap) 867 return -ENOTTY; 868 if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 869 return -EINVAL; 870 if (vivid_is_webcam(dev)) 871 return -EINVAL; 872 873 switch (s->target) { 874 case V4L2_SEL_TGT_CROP: 875 if (!dev->has_crop_cap) 876 return -EINVAL; 877 ret = vivid_vid_adjust_sel(s->flags, &s->r); 878 if (ret) 879 return ret; 880 rect_set_min_size(&s->r, &vivid_min_rect); 881 rect_set_max_size(&s->r, &dev->src_rect); 882 rect_map_inside(&s->r, &dev->crop_bounds_cap); 883 s->r.top /= factor; 884 s->r.height /= factor; 885 if (dev->has_scaler_cap) { 886 struct v4l2_rect fmt = dev->fmt_cap_rect; 887 struct v4l2_rect max_rect = { 888 0, 0, 889 s->r.width * MAX_ZOOM, 890 s->r.height * MAX_ZOOM 891 }; 892 struct v4l2_rect min_rect = { 893 0, 0, 894 s->r.width / MAX_ZOOM, 895 s->r.height / MAX_ZOOM 896 }; 897 898 rect_set_min_size(&fmt, &min_rect); 899 if (!dev->has_compose_cap) 900 rect_set_max_size(&fmt, &max_rect); 901 if (!rect_same_size(&dev->fmt_cap_rect, &fmt) && 902 vb2_is_busy(&dev->vb_vid_cap_q)) 903 return -EBUSY; 904 if (dev->has_compose_cap) { 905 rect_set_min_size(compose, &min_rect); 906 rect_set_max_size(compose, &max_rect); 907 } 908 dev->fmt_cap_rect = fmt; 909 tpg_s_buf_height(&dev->tpg, fmt.height); 910 } else if (dev->has_compose_cap) { 911 struct v4l2_rect fmt = dev->fmt_cap_rect; 912 913 rect_set_min_size(&fmt, &s->r); 914 if (!rect_same_size(&dev->fmt_cap_rect, &fmt) && 915 vb2_is_busy(&dev->vb_vid_cap_q)) 916 return -EBUSY; 917 dev->fmt_cap_rect = fmt; 918 tpg_s_buf_height(&dev->tpg, fmt.height); 919 rect_set_size_to(compose, &s->r); 920 rect_map_inside(compose, &dev->fmt_cap_rect); 921 } else { 922 if (!rect_same_size(&s->r, &dev->fmt_cap_rect) && 923 vb2_is_busy(&dev->vb_vid_cap_q)) 924 return -EBUSY; 925 rect_set_size_to(&dev->fmt_cap_rect, &s->r); 926 rect_set_size_to(compose, &s->r); 927 rect_map_inside(compose, &dev->fmt_cap_rect); 928 tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height); 929 } 930 s->r.top *= factor; 931 s->r.height *= factor; 932 *crop = s->r; 933 break; 934 case V4L2_SEL_TGT_COMPOSE: 935 if (!dev->has_compose_cap) 936 return -EINVAL; 937 ret = vivid_vid_adjust_sel(s->flags, &s->r); 938 if (ret) 939 return ret; 940 rect_set_min_size(&s->r, &vivid_min_rect); 941 rect_set_max_size(&s->r, &dev->fmt_cap_rect); 942 if (dev->has_scaler_cap) { 943 struct v4l2_rect max_rect = { 944 0, 0, 945 dev->src_rect.width * MAX_ZOOM, 946 (dev->src_rect.height / factor) * MAX_ZOOM 947 }; 948 949 rect_set_max_size(&s->r, &max_rect); 950 if (dev->has_crop_cap) { 951 struct v4l2_rect min_rect = { 952 0, 0, 953 s->r.width / MAX_ZOOM, 954 (s->r.height * factor) / MAX_ZOOM 955 }; 956 struct v4l2_rect max_rect = { 957 0, 0, 958 s->r.width * MAX_ZOOM, 959 (s->r.height * factor) * MAX_ZOOM 960 }; 961 962 rect_set_min_size(crop, &min_rect); 963 rect_set_max_size(crop, &max_rect); 964 rect_map_inside(crop, &dev->crop_bounds_cap); 965 } 966 } else if (dev->has_crop_cap) { 967 s->r.top *= factor; 968 s->r.height *= factor; 969 rect_set_max_size(&s->r, &dev->src_rect); 970 rect_set_size_to(crop, &s->r); 971 rect_map_inside(crop, &dev->crop_bounds_cap); 972 s->r.top /= factor; 973 s->r.height /= factor; 974 } else { 975 rect_set_size_to(&s->r, &dev->src_rect); 976 s->r.height /= factor; 977 } 978 rect_map_inside(&s->r, &dev->fmt_cap_rect); 979 if (dev->bitmap_cap && (compose->width != s->r.width || 980 compose->height != s->r.height)) { 981 kfree(dev->bitmap_cap); 982 dev->bitmap_cap = NULL; 983 } 984 *compose = s->r; 985 break; 986 default: 987 return -EINVAL; 988 } 989 990 tpg_s_crop_compose(&dev->tpg, crop, compose); 991 return 0; 992} 993 994int vivid_vid_cap_cropcap(struct file *file, void *priv, 995 struct v4l2_cropcap *cap) 996{ 997 struct vivid_dev *dev = video_drvdata(file); 998 999 if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) 1000 return -EINVAL; 1001 1002 switch (vivid_get_pixel_aspect(dev)) { 1003 case TPG_PIXEL_ASPECT_NTSC: 1004 cap->pixelaspect.numerator = 11; 1005 cap->pixelaspect.denominator = 10; 1006 break; 1007 case TPG_PIXEL_ASPECT_PAL: 1008 cap->pixelaspect.numerator = 54; 1009 cap->pixelaspect.denominator = 59; 1010 break; 1011 case TPG_PIXEL_ASPECT_SQUARE: 1012 cap->pixelaspect.numerator = 1; 1013 cap->pixelaspect.denominator = 1; 1014 break; 1015 } 1016 return 0; 1017} 1018 1019int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv, 1020 struct v4l2_fmtdesc *f) 1021{ 1022 struct vivid_dev *dev = video_drvdata(file); 1023 const struct vivid_fmt *fmt; 1024 1025 if (dev->multiplanar) 1026 return -ENOTTY; 1027 1028 if (f->index >= ARRAY_SIZE(formats_ovl)) 1029 return -EINVAL; 1030 1031 fmt = &formats_ovl[f->index]; 1032 1033 strlcpy(f->description, fmt->name, sizeof(f->description)); 1034 f->pixelformat = fmt->fourcc; 1035 return 0; 1036} 1037 1038int vidioc_g_fmt_vid_overlay(struct file *file, void *priv, 1039 struct v4l2_format *f) 1040{ 1041 struct vivid_dev *dev = video_drvdata(file); 1042 const struct v4l2_rect *compose = &dev->compose_cap; 1043 struct v4l2_window *win = &f->fmt.win; 1044 unsigned clipcount = win->clipcount; 1045 1046 if (dev->multiplanar) 1047 return -ENOTTY; 1048 1049 win->w.top = dev->overlay_cap_top; 1050 win->w.left = dev->overlay_cap_left; 1051 win->w.width = compose->width; 1052 win->w.height = compose->height; 1053 win->field = dev->overlay_cap_field; 1054 win->clipcount = dev->clipcount_cap; 1055 if (clipcount > dev->clipcount_cap) 1056 clipcount = dev->clipcount_cap; 1057 if (dev->bitmap_cap == NULL) 1058 win->bitmap = NULL; 1059 else if (win->bitmap) { 1060 if (copy_to_user(win->bitmap, dev->bitmap_cap, 1061 ((compose->width + 7) / 8) * compose->height)) 1062 return -EFAULT; 1063 } 1064 if (clipcount && win->clips) { 1065 if (copy_to_user(win->clips, dev->clips_cap, 1066 clipcount * sizeof(dev->clips_cap[0]))) 1067 return -EFAULT; 1068 } 1069 return 0; 1070} 1071 1072int vidioc_try_fmt_vid_overlay(struct file *file, void *priv, 1073 struct v4l2_format *f) 1074{ 1075 struct vivid_dev *dev = video_drvdata(file); 1076 const struct v4l2_rect *compose = &dev->compose_cap; 1077 struct v4l2_window *win = &f->fmt.win; 1078 int i, j; 1079 1080 if (dev->multiplanar) 1081 return -ENOTTY; 1082 1083 win->w.left = clamp_t(int, win->w.left, 1084 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1085 win->w.top = clamp_t(int, win->w.top, 1086 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1087 win->w.width = compose->width; 1088 win->w.height = compose->height; 1089 if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP) 1090 win->field = V4L2_FIELD_ANY; 1091 win->chromakey = 0; 1092 win->global_alpha = 0; 1093 if (win->clipcount && !win->clips) 1094 win->clipcount = 0; 1095 if (win->clipcount > MAX_CLIPS) 1096 win->clipcount = MAX_CLIPS; 1097 if (win->clipcount) { 1098 if (copy_from_user(dev->try_clips_cap, win->clips, 1099 win->clipcount * sizeof(dev->clips_cap[0]))) 1100 return -EFAULT; 1101 for (i = 0; i < win->clipcount; i++) { 1102 struct v4l2_rect *r = &dev->try_clips_cap[i].c; 1103 1104 r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1); 1105 r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top); 1106 r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1); 1107 r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left); 1108 } 1109 /* 1110 * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small 1111 * number and it's typically a one-time deal. 1112 */ 1113 for (i = 0; i < win->clipcount - 1; i++) { 1114 struct v4l2_rect *r1 = &dev->try_clips_cap[i].c; 1115 1116 for (j = i + 1; j < win->clipcount; j++) { 1117 struct v4l2_rect *r2 = &dev->try_clips_cap[j].c; 1118 1119 if (rect_overlap(r1, r2)) 1120 return -EINVAL; 1121 } 1122 } 1123 if (copy_to_user(win->clips, dev->try_clips_cap, 1124 win->clipcount * sizeof(dev->clips_cap[0]))) 1125 return -EFAULT; 1126 } 1127 return 0; 1128} 1129 1130int vidioc_s_fmt_vid_overlay(struct file *file, void *priv, 1131 struct v4l2_format *f) 1132{ 1133 struct vivid_dev *dev = video_drvdata(file); 1134 const struct v4l2_rect *compose = &dev->compose_cap; 1135 struct v4l2_window *win = &f->fmt.win; 1136 int ret = vidioc_try_fmt_vid_overlay(file, priv, f); 1137 unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height; 1138 unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]); 1139 void *new_bitmap = NULL; 1140 1141 if (ret) 1142 return ret; 1143 1144 if (win->bitmap) { 1145 new_bitmap = vzalloc(bitmap_size); 1146 1147 if (new_bitmap == NULL) 1148 return -ENOMEM; 1149 if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) { 1150 vfree(new_bitmap); 1151 return -EFAULT; 1152 } 1153 } 1154 1155 dev->overlay_cap_top = win->w.top; 1156 dev->overlay_cap_left = win->w.left; 1157 dev->overlay_cap_field = win->field; 1158 vfree(dev->bitmap_cap); 1159 dev->bitmap_cap = new_bitmap; 1160 dev->clipcount_cap = win->clipcount; 1161 if (dev->clipcount_cap) 1162 memcpy(dev->clips_cap, dev->try_clips_cap, clips_size); 1163 return 0; 1164} 1165 1166int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i) 1167{ 1168 struct vivid_dev *dev = video_drvdata(file); 1169 1170 if (dev->multiplanar) 1171 return -ENOTTY; 1172 1173 if (i && dev->fb_vbase_cap == NULL) 1174 return -EINVAL; 1175 1176 if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) { 1177 dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n"); 1178 return -EINVAL; 1179 } 1180 1181 if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh) 1182 return -EBUSY; 1183 dev->overlay_cap_owner = i ? fh : NULL; 1184 return 0; 1185} 1186 1187int vivid_vid_cap_g_fbuf(struct file *file, void *fh, 1188 struct v4l2_framebuffer *a) 1189{ 1190 struct vivid_dev *dev = video_drvdata(file); 1191 1192 if (dev->multiplanar) 1193 return -ENOTTY; 1194 1195 *a = dev->fb_cap; 1196 a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING | 1197 V4L2_FBUF_CAP_LIST_CLIPPING; 1198 a->flags = V4L2_FBUF_FLAG_PRIMARY; 1199 a->fmt.field = V4L2_FIELD_NONE; 1200 a->fmt.colorspace = V4L2_COLORSPACE_SRGB; 1201 a->fmt.priv = 0; 1202 return 0; 1203} 1204 1205int vivid_vid_cap_s_fbuf(struct file *file, void *fh, 1206 const struct v4l2_framebuffer *a) 1207{ 1208 struct vivid_dev *dev = video_drvdata(file); 1209 const struct vivid_fmt *fmt; 1210 1211 if (dev->multiplanar) 1212 return -ENOTTY; 1213 1214 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO)) 1215 return -EPERM; 1216 1217 if (dev->overlay_cap_owner) 1218 return -EBUSY; 1219 1220 if (a->base == NULL) { 1221 dev->fb_cap.base = NULL; 1222 dev->fb_vbase_cap = NULL; 1223 return 0; 1224 } 1225 1226 if (a->fmt.width < 48 || a->fmt.height < 32) 1227 return -EINVAL; 1228 fmt = vivid_get_format(dev, a->fmt.pixelformat); 1229 if (!fmt || !fmt->can_do_overlay) 1230 return -EINVAL; 1231 if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) 1232 return -EINVAL; 1233 if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage) 1234 return -EINVAL; 1235 1236 dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); 1237 dev->fb_cap = *a; 1238 dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left, 1239 -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width); 1240 dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top, 1241 -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height); 1242 return 0; 1243} 1244 1245static const struct v4l2_audio vivid_audio_inputs[] = { 1246 { 0, "TV", V4L2_AUDCAP_STEREO }, 1247 { 1, "Line-In", V4L2_AUDCAP_STEREO }, 1248}; 1249 1250int vidioc_enum_input(struct file *file, void *priv, 1251 struct v4l2_input *inp) 1252{ 1253 struct vivid_dev *dev = video_drvdata(file); 1254 1255 if (inp->index >= dev->num_inputs) 1256 return -EINVAL; 1257 1258 inp->type = V4L2_INPUT_TYPE_CAMERA; 1259 switch (dev->input_type[inp->index]) { 1260 case WEBCAM: 1261 snprintf(inp->name, sizeof(inp->name), "Webcam %u", 1262 dev->input_name_counter[inp->index]); 1263 inp->capabilities = 0; 1264 break; 1265 case TV: 1266 snprintf(inp->name, sizeof(inp->name), "TV %u", 1267 dev->input_name_counter[inp->index]); 1268 inp->type = V4L2_INPUT_TYPE_TUNER; 1269 inp->std = V4L2_STD_ALL; 1270 if (dev->has_audio_inputs) 1271 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1272 inp->capabilities = V4L2_IN_CAP_STD; 1273 break; 1274 case SVID: 1275 snprintf(inp->name, sizeof(inp->name), "S-Video %u", 1276 dev->input_name_counter[inp->index]); 1277 inp->std = V4L2_STD_ALL; 1278 if (dev->has_audio_inputs) 1279 inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1; 1280 inp->capabilities = V4L2_IN_CAP_STD; 1281 break; 1282 case HDMI: 1283 snprintf(inp->name, sizeof(inp->name), "HDMI %u", 1284 dev->input_name_counter[inp->index]); 1285 inp->capabilities = V4L2_IN_CAP_DV_TIMINGS; 1286 if (dev->edid_blocks == 0 || 1287 dev->dv_timings_signal_mode == NO_SIGNAL) 1288 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1289 else if (dev->dv_timings_signal_mode == NO_LOCK || 1290 dev->dv_timings_signal_mode == OUT_OF_RANGE) 1291 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1292 break; 1293 } 1294 if (dev->sensor_hflip) 1295 inp->status |= V4L2_IN_ST_HFLIP; 1296 if (dev->sensor_vflip) 1297 inp->status |= V4L2_IN_ST_VFLIP; 1298 if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) { 1299 if (dev->std_signal_mode == NO_SIGNAL) { 1300 inp->status |= V4L2_IN_ST_NO_SIGNAL; 1301 } else if (dev->std_signal_mode == NO_LOCK) { 1302 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1303 } else if (vivid_is_tv_cap(dev)) { 1304 switch (tpg_g_quality(&dev->tpg)) { 1305 case TPG_QUAL_GRAY: 1306 inp->status |= V4L2_IN_ST_COLOR_KILL; 1307 break; 1308 case TPG_QUAL_NOISE: 1309 inp->status |= V4L2_IN_ST_NO_H_LOCK; 1310 break; 1311 default: 1312 break; 1313 } 1314 } 1315 } 1316 return 0; 1317} 1318 1319int vidioc_g_input(struct file *file, void *priv, unsigned *i) 1320{ 1321 struct vivid_dev *dev = video_drvdata(file); 1322 1323 *i = dev->input; 1324 return 0; 1325} 1326 1327int vidioc_s_input(struct file *file, void *priv, unsigned i) 1328{ 1329 struct vivid_dev *dev = video_drvdata(file); 1330 struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt; 1331 unsigned brightness; 1332 1333 if (i >= dev->num_inputs) 1334 return -EINVAL; 1335 1336 if (i == dev->input) 1337 return 0; 1338 1339 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1340 return -EBUSY; 1341 1342 dev->input = i; 1343 dev->vid_cap_dev.tvnorms = 0; 1344 if (dev->input_type[i] == TV || dev->input_type[i] == SVID) { 1345 dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1; 1346 dev->vid_cap_dev.tvnorms = V4L2_STD_ALL; 1347 } 1348 dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms; 1349 vivid_update_format_cap(dev, false); 1350 1351 if (dev->colorspace) { 1352 switch (dev->input_type[i]) { 1353 case WEBCAM: 1354 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1355 break; 1356 case TV: 1357 case SVID: 1358 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1359 break; 1360 case HDMI: 1361 if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) { 1362 if (dev->src_rect.width == 720 && dev->src_rect.height <= 576) 1363 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M); 1364 else 1365 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709); 1366 } else { 1367 v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB); 1368 } 1369 break; 1370 } 1371 } 1372 1373 /* 1374 * Modify the brightness range depending on the input. 1375 * This makes it easy to use vivid to test if applications can 1376 * handle control range modifications and is also how this is 1377 * typically used in practice as different inputs may be hooked 1378 * up to different receivers with different control ranges. 1379 */ 1380 brightness = 128 * i + dev->input_brightness[i]; 1381 v4l2_ctrl_modify_range(dev->brightness, 1382 128 * i, 255 + 128 * i, 1, 128 + 128 * i); 1383 v4l2_ctrl_s_ctrl(dev->brightness, brightness); 1384 return 0; 1385} 1386 1387int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin) 1388{ 1389 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1390 return -EINVAL; 1391 *vin = vivid_audio_inputs[vin->index]; 1392 return 0; 1393} 1394 1395int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin) 1396{ 1397 struct vivid_dev *dev = video_drvdata(file); 1398 1399 if (!vivid_is_sdtv_cap(dev)) 1400 return -EINVAL; 1401 *vin = vivid_audio_inputs[dev->tv_audio_input]; 1402 return 0; 1403} 1404 1405int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin) 1406{ 1407 struct vivid_dev *dev = video_drvdata(file); 1408 1409 if (!vivid_is_sdtv_cap(dev)) 1410 return -EINVAL; 1411 if (vin->index >= ARRAY_SIZE(vivid_audio_inputs)) 1412 return -EINVAL; 1413 dev->tv_audio_input = vin->index; 1414 return 0; 1415} 1416 1417int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf) 1418{ 1419 struct vivid_dev *dev = video_drvdata(file); 1420 1421 if (vf->tuner != 0) 1422 return -EINVAL; 1423 vf->frequency = dev->tv_freq; 1424 return 0; 1425} 1426 1427int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf) 1428{ 1429 struct vivid_dev *dev = video_drvdata(file); 1430 1431 if (vf->tuner != 0) 1432 return -EINVAL; 1433 dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ); 1434 if (vivid_is_tv_cap(dev)) 1435 vivid_update_quality(dev); 1436 return 0; 1437} 1438 1439int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt) 1440{ 1441 struct vivid_dev *dev = video_drvdata(file); 1442 1443 if (vt->index != 0) 1444 return -EINVAL; 1445 if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2) 1446 return -EINVAL; 1447 dev->tv_audmode = vt->audmode; 1448 return 0; 1449} 1450 1451int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt) 1452{ 1453 struct vivid_dev *dev = video_drvdata(file); 1454 enum tpg_quality qual; 1455 1456 if (vt->index != 0) 1457 return -EINVAL; 1458 1459 vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO | 1460 V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; 1461 vt->audmode = dev->tv_audmode; 1462 vt->rangelow = MIN_TV_FREQ; 1463 vt->rangehigh = MAX_TV_FREQ; 1464 qual = vivid_get_quality(dev, &vt->afc); 1465 if (qual == TPG_QUAL_COLOR) 1466 vt->signal = 0xffff; 1467 else if (qual == TPG_QUAL_GRAY) 1468 vt->signal = 0x8000; 1469 else 1470 vt->signal = 0; 1471 if (qual == TPG_QUAL_NOISE) { 1472 vt->rxsubchans = 0; 1473 } else if (qual == TPG_QUAL_GRAY) { 1474 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1475 } else { 1476 unsigned channel_nr = dev->tv_freq / (6 * 16); 1477 unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3; 1478 1479 switch (channel_nr % options) { 1480 case 0: 1481 vt->rxsubchans = V4L2_TUNER_SUB_MONO; 1482 break; 1483 case 1: 1484 vt->rxsubchans = V4L2_TUNER_SUB_STEREO; 1485 break; 1486 case 2: 1487 if (dev->std_cap & V4L2_STD_NTSC_M) 1488 vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP; 1489 else 1490 vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; 1491 break; 1492 case 3: 1493 vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP; 1494 break; 1495 } 1496 } 1497 strlcpy(vt->name, "TV Tuner", sizeof(vt->name)); 1498 return 0; 1499} 1500 1501/* Must remain in sync with the vivid_ctrl_standard_strings array */ 1502const v4l2_std_id vivid_standard[] = { 1503 V4L2_STD_NTSC_M, 1504 V4L2_STD_NTSC_M_JP, 1505 V4L2_STD_NTSC_M_KR, 1506 V4L2_STD_NTSC_443, 1507 V4L2_STD_PAL_BG | V4L2_STD_PAL_H, 1508 V4L2_STD_PAL_I, 1509 V4L2_STD_PAL_DK, 1510 V4L2_STD_PAL_M, 1511 V4L2_STD_PAL_N, 1512 V4L2_STD_PAL_Nc, 1513 V4L2_STD_PAL_60, 1514 V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, 1515 V4L2_STD_SECAM_DK, 1516 V4L2_STD_SECAM_L, 1517 V4L2_STD_SECAM_LC, 1518 V4L2_STD_UNKNOWN 1519}; 1520 1521/* Must remain in sync with the vivid_standard array */ 1522const char * const vivid_ctrl_standard_strings[] = { 1523 "NTSC-M", 1524 "NTSC-M-JP", 1525 "NTSC-M-KR", 1526 "NTSC-443", 1527 "PAL-BGH", 1528 "PAL-I", 1529 "PAL-DK", 1530 "PAL-M", 1531 "PAL-N", 1532 "PAL-Nc", 1533 "PAL-60", 1534 "SECAM-BGH", 1535 "SECAM-DK", 1536 "SECAM-L", 1537 "SECAM-Lc", 1538 NULL, 1539}; 1540 1541int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id) 1542{ 1543 struct vivid_dev *dev = video_drvdata(file); 1544 1545 if (!vivid_is_sdtv_cap(dev)) 1546 return -ENODATA; 1547 if (dev->std_signal_mode == NO_SIGNAL || 1548 dev->std_signal_mode == NO_LOCK) { 1549 *id = V4L2_STD_UNKNOWN; 1550 return 0; 1551 } 1552 if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) { 1553 *id = V4L2_STD_UNKNOWN; 1554 } else if (dev->std_signal_mode == CURRENT_STD) { 1555 *id = dev->std_cap; 1556 } else if (dev->std_signal_mode == SELECTED_STD) { 1557 *id = dev->query_std; 1558 } else { 1559 *id = vivid_standard[dev->query_std_last]; 1560 dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard); 1561 } 1562 1563 return 0; 1564} 1565 1566int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id) 1567{ 1568 struct vivid_dev *dev = video_drvdata(file); 1569 1570 if (!vivid_is_sdtv_cap(dev)) 1571 return -ENODATA; 1572 if (dev->std_cap == id) 1573 return 0; 1574 if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q)) 1575 return -EBUSY; 1576 dev->std_cap = id; 1577 vivid_update_format_cap(dev, false); 1578 return 0; 1579} 1580 1581static void find_aspect_ratio(u32 width, u32 height, 1582 u32 *num, u32 *denom) 1583{ 1584 if (!(height % 3) && ((height * 4 / 3) == width)) { 1585 *num = 4; 1586 *denom = 3; 1587 } else if (!(height % 9) && ((height * 16 / 9) == width)) { 1588 *num = 16; 1589 *denom = 9; 1590 } else if (!(height % 10) && ((height * 16 / 10) == width)) { 1591 *num = 16; 1592 *denom = 10; 1593 } else if (!(height % 4) && ((height * 5 / 4) == width)) { 1594 *num = 5; 1595 *denom = 4; 1596 } else if (!(height % 9) && ((height * 15 / 9) == width)) { 1597 *num = 15; 1598 *denom = 9; 1599 } else { /* default to 16:9 */ 1600 *num = 16; 1601 *denom = 9; 1602 } 1603} 1604 1605static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings) 1606{ 1607 struct v4l2_bt_timings *bt = &timings->bt; 1608 u32 total_h_pixel; 1609 u32 total_v_lines; 1610 u32 h_freq; 1611 1612 if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap, 1613 NULL, NULL)) 1614 return false; 1615 1616 total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt); 1617 total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt); 1618 1619 h_freq = (u32)bt->pixelclock / total_h_pixel; 1620 1621 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) { 1622 if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, 1623 bt->polarities, timings)) 1624 return true; 1625 } 1626 1627 if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) { 1628 struct v4l2_fract aspect_ratio; 1629 1630 find_aspect_ratio(bt->width, bt->height, 1631 &aspect_ratio.numerator, 1632 &aspect_ratio.denominator); 1633 if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync, 1634 bt->polarities, aspect_ratio, timings)) 1635 return true; 1636 } 1637 return false; 1638} 1639 1640int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh, 1641 struct v4l2_dv_timings *timings) 1642{ 1643 struct vivid_dev *dev = video_drvdata(file); 1644 1645 if (!vivid_is_hdmi_cap(dev)) 1646 return -ENODATA; 1647 if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap, 1648 0, NULL, NULL) && 1649 !valid_cvt_gtf_timings(timings)) 1650 return -EINVAL; 1651 1652 if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0)) 1653 return 0; 1654 if (vb2_is_busy(&dev->vb_vid_cap_q)) 1655 return -EBUSY; 1656 1657 dev->dv_timings_cap = *timings; 1658 vivid_update_format_cap(dev, false); 1659 return 0; 1660} 1661 1662int vidioc_query_dv_timings(struct file *file, void *_fh, 1663 struct v4l2_dv_timings *timings) 1664{ 1665 struct vivid_dev *dev = video_drvdata(file); 1666 1667 if (!vivid_is_hdmi_cap(dev)) 1668 return -ENODATA; 1669 if (dev->dv_timings_signal_mode == NO_SIGNAL || 1670 dev->edid_blocks == 0) 1671 return -ENOLINK; 1672 if (dev->dv_timings_signal_mode == NO_LOCK) 1673 return -ENOLCK; 1674 if (dev->dv_timings_signal_mode == OUT_OF_RANGE) { 1675 timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2; 1676 return -ERANGE; 1677 } 1678 if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) { 1679 *timings = dev->dv_timings_cap; 1680 } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) { 1681 *timings = v4l2_dv_timings_presets[dev->query_dv_timings]; 1682 } else { 1683 *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last]; 1684 dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) % 1685 dev->query_dv_timings_size; 1686 } 1687 return 0; 1688} 1689 1690int vidioc_s_edid(struct file *file, void *_fh, 1691 struct v4l2_edid *edid) 1692{ 1693 struct vivid_dev *dev = video_drvdata(file); 1694 1695 memset(edid->reserved, 0, sizeof(edid->reserved)); 1696 if (edid->pad >= dev->num_inputs) 1697 return -EINVAL; 1698 if (dev->input_type[edid->pad] != HDMI || edid->start_block) 1699 return -EINVAL; 1700 if (edid->blocks == 0) { 1701 dev->edid_blocks = 0; 1702 return 0; 1703 } 1704 if (edid->blocks > dev->edid_max_blocks) { 1705 edid->blocks = dev->edid_max_blocks; 1706 return -E2BIG; 1707 } 1708 dev->edid_blocks = edid->blocks; 1709 memcpy(dev->edid, edid->edid, edid->blocks * 128); 1710 return 0; 1711} 1712 1713int vidioc_enum_framesizes(struct file *file, void *fh, 1714 struct v4l2_frmsizeenum *fsize) 1715{ 1716 struct vivid_dev *dev = video_drvdata(file); 1717 1718 if (!vivid_is_webcam(dev) && !dev->has_scaler_cap) 1719 return -EINVAL; 1720 if (vivid_get_format(dev, fsize->pixel_format) == NULL) 1721 return -EINVAL; 1722 if (vivid_is_webcam(dev)) { 1723 if (fsize->index >= ARRAY_SIZE(webcam_sizes)) 1724 return -EINVAL; 1725 fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; 1726 fsize->discrete = webcam_sizes[fsize->index]; 1727 return 0; 1728 } 1729 if (fsize->index) 1730 return -EINVAL; 1731 fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE; 1732 fsize->stepwise.min_width = MIN_WIDTH; 1733 fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM; 1734 fsize->stepwise.step_width = 2; 1735 fsize->stepwise.min_height = MIN_HEIGHT; 1736 fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM; 1737 fsize->stepwise.step_height = 2; 1738 return 0; 1739} 1740 1741/* timeperframe is arbitrary and continuous */ 1742int vidioc_enum_frameintervals(struct file *file, void *priv, 1743 struct v4l2_frmivalenum *fival) 1744{ 1745 struct vivid_dev *dev = video_drvdata(file); 1746 const struct vivid_fmt *fmt; 1747 int i; 1748 1749 fmt = vivid_get_format(dev, fival->pixel_format); 1750 if (!fmt) 1751 return -EINVAL; 1752 1753 if (!vivid_is_webcam(dev)) { 1754 if (fival->index) 1755 return -EINVAL; 1756 if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM) 1757 return -EINVAL; 1758 if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM) 1759 return -EINVAL; 1760 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1761 fival->discrete = dev->timeperframe_vid_cap; 1762 return 0; 1763 } 1764 1765 for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++) 1766 if (fival->width == webcam_sizes[i].width && 1767 fival->height == webcam_sizes[i].height) 1768 break; 1769 if (i == ARRAY_SIZE(webcam_sizes)) 1770 return -EINVAL; 1771 if (fival->index >= 2 * (3 - i)) 1772 return -EINVAL; 1773 fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; 1774 fival->discrete = webcam_intervals[fival->index]; 1775 return 0; 1776} 1777 1778int vivid_vid_cap_g_parm(struct file *file, void *priv, 1779 struct v4l2_streamparm *parm) 1780{ 1781 struct vivid_dev *dev = video_drvdata(file); 1782 1783 if (parm->type != (dev->multiplanar ? 1784 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1785 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1786 return -EINVAL; 1787 1788 parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME; 1789 parm->parm.capture.timeperframe = dev->timeperframe_vid_cap; 1790 parm->parm.capture.readbuffers = 1; 1791 return 0; 1792} 1793 1794#define FRACT_CMP(a, OP, b) \ 1795 ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator) 1796 1797int vivid_vid_cap_s_parm(struct file *file, void *priv, 1798 struct v4l2_streamparm *parm) 1799{ 1800 struct vivid_dev *dev = video_drvdata(file); 1801 unsigned ival_sz = 2 * (3 - dev->webcam_size_idx); 1802 struct v4l2_fract tpf; 1803 unsigned i; 1804 1805 if (parm->type != (dev->multiplanar ? 1806 V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : 1807 V4L2_BUF_TYPE_VIDEO_CAPTURE)) 1808 return -EINVAL; 1809 if (!vivid_is_webcam(dev)) 1810 return vivid_vid_cap_g_parm(file, priv, parm); 1811 1812 tpf = parm->parm.capture.timeperframe; 1813 1814 if (tpf.denominator == 0) 1815 tpf = webcam_intervals[ival_sz - 1]; 1816 for (i = 0; i < ival_sz; i++) 1817 if (FRACT_CMP(tpf, >=, webcam_intervals[i])) 1818 break; 1819 if (i == ival_sz) 1820 i = ival_sz - 1; 1821 dev->webcam_ival_idx = i; 1822 tpf = webcam_intervals[dev->webcam_ival_idx]; 1823 tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf; 1824 tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf; 1825 1826 /* resync the thread's timings */ 1827 dev->cap_seq_resync = true; 1828 dev->timeperframe_vid_cap = tpf; 1829 parm->parm.capture.timeperframe = tpf; 1830 parm->parm.capture.readbuffers = 1; 1831 return 0; 1832} 1833