1 /*
2  * Copyright (c) 2011 - 2012 Samsung Electronics Co., Ltd.
3  *		http://www.samsung.com
4  *
5  * Samsung EXYNOS5 SoC series G-Scaler driver
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published
9  * by the Free Software Foundation, either version 2 of the License,
10  * or (at your option) any later version.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/types.h>
16 #include <linux/errno.h>
17 #include <linux/bug.h>
18 #include <linux/interrupt.h>
19 #include <linux/workqueue.h>
20 #include <linux/device.h>
21 #include <linux/platform_device.h>
22 #include <linux/list.h>
23 #include <linux/io.h>
24 #include <linux/slab.h>
25 #include <linux/clk.h>
26 
27 #include <media/v4l2-ioctl.h>
28 
29 #include "gsc-core.h"
30 
gsc_m2m_ctx_stop_req(struct gsc_ctx * ctx)31 static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx)
32 {
33 	struct gsc_ctx *curr_ctx;
34 	struct gsc_dev *gsc = ctx->gsc_dev;
35 	int ret;
36 
37 	curr_ctx = v4l2_m2m_get_curr_priv(gsc->m2m.m2m_dev);
38 	if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx))
39 		return 0;
40 
41 	gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx);
42 	ret = wait_event_timeout(gsc->irq_queue,
43 			!gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx),
44 			GSC_SHUTDOWN_TIMEOUT);
45 
46 	return ret == 0 ? -ETIMEDOUT : ret;
47 }
48 
__gsc_m2m_job_abort(struct gsc_ctx * ctx)49 static void __gsc_m2m_job_abort(struct gsc_ctx *ctx)
50 {
51 	int ret;
52 
53 	ret = gsc_m2m_ctx_stop_req(ctx);
54 	if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) {
55 		gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx);
56 		gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
57 	}
58 }
59 
gsc_m2m_start_streaming(struct vb2_queue * q,unsigned int count)60 static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
61 {
62 	struct gsc_ctx *ctx = q->drv_priv;
63 	int ret;
64 
65 	ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev);
66 	return ret > 0 ? 0 : ret;
67 }
68 
gsc_m2m_stop_streaming(struct vb2_queue * q)69 static void gsc_m2m_stop_streaming(struct vb2_queue *q)
70 {
71 	struct gsc_ctx *ctx = q->drv_priv;
72 
73 	__gsc_m2m_job_abort(ctx);
74 
75 	pm_runtime_put(&ctx->gsc_dev->pdev->dev);
76 }
77 
gsc_m2m_job_finish(struct gsc_ctx * ctx,int vb_state)78 void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state)
79 {
80 	struct vb2_v4l2_buffer *src_vb, *dst_vb;
81 
82 	if (!ctx || !ctx->m2m_ctx)
83 		return;
84 
85 	src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
86 	dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
87 
88 	if (src_vb && dst_vb) {
89 		dst_vb->timestamp = src_vb->timestamp;
90 		dst_vb->timecode = src_vb->timecode;
91 		dst_vb->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
92 		dst_vb->flags |=
93 			src_vb->flags
94 			& V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
95 
96 		v4l2_m2m_buf_done(src_vb, vb_state);
97 		v4l2_m2m_buf_done(dst_vb, vb_state);
98 
99 		v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev,
100 				    ctx->m2m_ctx);
101 	}
102 }
103 
gsc_m2m_job_abort(void * priv)104 static void gsc_m2m_job_abort(void *priv)
105 {
106 	__gsc_m2m_job_abort((struct gsc_ctx *)priv);
107 }
108 
gsc_get_bufs(struct gsc_ctx * ctx)109 static int gsc_get_bufs(struct gsc_ctx *ctx)
110 {
111 	struct gsc_frame *s_frame, *d_frame;
112 	struct vb2_v4l2_buffer *src_vb, *dst_vb;
113 	int ret;
114 
115 	s_frame = &ctx->s_frame;
116 	d_frame = &ctx->d_frame;
117 
118 	src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
119 	ret = gsc_prepare_addr(ctx, &src_vb->vb2_buf, s_frame, &s_frame->addr);
120 	if (ret)
121 		return ret;
122 
123 	dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
124 	ret = gsc_prepare_addr(ctx, &dst_vb->vb2_buf, d_frame, &d_frame->addr);
125 	if (ret)
126 		return ret;
127 
128 	dst_vb->timestamp = src_vb->timestamp;
129 
130 	return 0;
131 }
132 
gsc_m2m_device_run(void * priv)133 static void gsc_m2m_device_run(void *priv)
134 {
135 	struct gsc_ctx *ctx = priv;
136 	struct gsc_dev *gsc;
137 	unsigned long flags;
138 	int ret;
139 	bool is_set = false;
140 
141 	if (WARN(!ctx, "null hardware context\n"))
142 		return;
143 
144 	gsc = ctx->gsc_dev;
145 	spin_lock_irqsave(&gsc->slock, flags);
146 
147 	set_bit(ST_M2M_PEND, &gsc->state);
148 
149 	/* Reconfigure hardware if the context has changed. */
150 	if (gsc->m2m.ctx != ctx) {
151 		pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p",
152 				gsc->m2m.ctx, ctx);
153 		ctx->state |= GSC_PARAMS;
154 		gsc->m2m.ctx = ctx;
155 	}
156 
157 	is_set = ctx->state & GSC_CTX_STOP_REQ;
158 	if (is_set) {
159 		ctx->state &= ~GSC_CTX_STOP_REQ;
160 		ctx->state |= GSC_CTX_ABORT;
161 		wake_up(&gsc->irq_queue);
162 		goto put_device;
163 	}
164 
165 	ret = gsc_get_bufs(ctx);
166 	if (ret) {
167 		pr_err("Wrong address");
168 		goto put_device;
169 	}
170 
171 	gsc_set_prefbuf(gsc, &ctx->s_frame);
172 	gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM);
173 	gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM);
174 
175 	if (ctx->state & GSC_PARAMS) {
176 		gsc_hw_set_input_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
177 		gsc_hw_set_output_buf_masking(gsc, GSC_M2M_BUF_NUM, false);
178 		gsc_hw_set_frm_done_irq_mask(gsc, false);
179 		gsc_hw_set_gsc_irq_enable(gsc, true);
180 
181 		if (gsc_set_scaler_info(ctx)) {
182 			pr_err("Scaler setup error");
183 			goto put_device;
184 		}
185 
186 		gsc_hw_set_input_path(ctx);
187 		gsc_hw_set_in_size(ctx);
188 		gsc_hw_set_in_image_format(ctx);
189 
190 		gsc_hw_set_output_path(ctx);
191 		gsc_hw_set_out_size(ctx);
192 		gsc_hw_set_out_image_format(ctx);
193 
194 		gsc_hw_set_prescaler(ctx);
195 		gsc_hw_set_mainscaler(ctx);
196 		gsc_hw_set_rotation(ctx);
197 		gsc_hw_set_global_alpha(ctx);
198 	}
199 
200 	/* update shadow registers */
201 	gsc_hw_set_sfr_update(ctx);
202 
203 	ctx->state &= ~GSC_PARAMS;
204 	gsc_hw_enable_control(gsc, true);
205 
206 	spin_unlock_irqrestore(&gsc->slock, flags);
207 	return;
208 
209 put_device:
210 	ctx->state &= ~GSC_PARAMS;
211 	spin_unlock_irqrestore(&gsc->slock, flags);
212 }
213 
gsc_m2m_queue_setup(struct vb2_queue * vq,const void * parg,unsigned int * num_buffers,unsigned int * num_planes,unsigned int sizes[],void * allocators[])214 static int gsc_m2m_queue_setup(struct vb2_queue *vq,
215 			const void *parg,
216 			unsigned int *num_buffers, unsigned int *num_planes,
217 			unsigned int sizes[], void *allocators[])
218 {
219 	struct gsc_ctx *ctx = vb2_get_drv_priv(vq);
220 	struct gsc_frame *frame;
221 	int i;
222 
223 	frame = ctx_get_frame(ctx, vq->type);
224 	if (IS_ERR(frame))
225 		return PTR_ERR(frame);
226 
227 	if (!frame->fmt)
228 		return -EINVAL;
229 
230 	*num_planes = frame->fmt->num_planes;
231 	for (i = 0; i < frame->fmt->num_planes; i++) {
232 		sizes[i] = frame->payload[i];
233 		allocators[i] = ctx->gsc_dev->alloc_ctx;
234 	}
235 	return 0;
236 }
237 
gsc_m2m_buf_prepare(struct vb2_buffer * vb)238 static int gsc_m2m_buf_prepare(struct vb2_buffer *vb)
239 {
240 	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
241 	struct gsc_frame *frame;
242 	int i;
243 
244 	frame = ctx_get_frame(ctx, vb->vb2_queue->type);
245 	if (IS_ERR(frame))
246 		return PTR_ERR(frame);
247 
248 	if (!V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
249 		for (i = 0; i < frame->fmt->num_planes; i++)
250 			vb2_set_plane_payload(vb, i, frame->payload[i]);
251 	}
252 
253 	return 0;
254 }
255 
gsc_m2m_buf_queue(struct vb2_buffer * vb)256 static void gsc_m2m_buf_queue(struct vb2_buffer *vb)
257 {
258 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
259 	struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
260 
261 	pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state);
262 
263 	if (ctx->m2m_ctx)
264 		v4l2_m2m_buf_queue(ctx->m2m_ctx, vbuf);
265 }
266 
267 static struct vb2_ops gsc_m2m_qops = {
268 	.queue_setup	 = gsc_m2m_queue_setup,
269 	.buf_prepare	 = gsc_m2m_buf_prepare,
270 	.buf_queue	 = gsc_m2m_buf_queue,
271 	.wait_prepare	 = vb2_ops_wait_prepare,
272 	.wait_finish	 = vb2_ops_wait_finish,
273 	.stop_streaming	 = gsc_m2m_stop_streaming,
274 	.start_streaming = gsc_m2m_start_streaming,
275 };
276 
gsc_m2m_querycap(struct file * file,void * fh,struct v4l2_capability * cap)277 static int gsc_m2m_querycap(struct file *file, void *fh,
278 			   struct v4l2_capability *cap)
279 {
280 	struct gsc_ctx *ctx = fh_to_ctx(fh);
281 	struct gsc_dev *gsc = ctx->gsc_dev;
282 
283 	strlcpy(cap->driver, gsc->pdev->name, sizeof(cap->driver));
284 	strlcpy(cap->card, gsc->pdev->name, sizeof(cap->card));
285 	strlcpy(cap->bus_info, "platform", sizeof(cap->bus_info));
286 	cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE |
287 		V4L2_CAP_VIDEO_CAPTURE_MPLANE |	V4L2_CAP_VIDEO_OUTPUT_MPLANE;
288 
289 	cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
290 	return 0;
291 }
292 
gsc_m2m_enum_fmt_mplane(struct file * file,void * priv,struct v4l2_fmtdesc * f)293 static int gsc_m2m_enum_fmt_mplane(struct file *file, void *priv,
294 				struct v4l2_fmtdesc *f)
295 {
296 	return gsc_enum_fmt_mplane(f);
297 }
298 
gsc_m2m_g_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)299 static int gsc_m2m_g_fmt_mplane(struct file *file, void *fh,
300 			     struct v4l2_format *f)
301 {
302 	struct gsc_ctx *ctx = fh_to_ctx(fh);
303 
304 	return gsc_g_fmt_mplane(ctx, f);
305 }
306 
gsc_m2m_try_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)307 static int gsc_m2m_try_fmt_mplane(struct file *file, void *fh,
308 				  struct v4l2_format *f)
309 {
310 	struct gsc_ctx *ctx = fh_to_ctx(fh);
311 
312 	return gsc_try_fmt_mplane(ctx, f);
313 }
314 
gsc_m2m_s_fmt_mplane(struct file * file,void * fh,struct v4l2_format * f)315 static int gsc_m2m_s_fmt_mplane(struct file *file, void *fh,
316 				 struct v4l2_format *f)
317 {
318 	struct gsc_ctx *ctx = fh_to_ctx(fh);
319 	struct vb2_queue *vq;
320 	struct gsc_frame *frame;
321 	struct v4l2_pix_format_mplane *pix;
322 	int i, ret = 0;
323 
324 	ret = gsc_m2m_try_fmt_mplane(file, fh, f);
325 	if (ret)
326 		return ret;
327 
328 	vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
329 
330 	if (vb2_is_streaming(vq)) {
331 		pr_err("queue (%d) busy", f->type);
332 		return -EBUSY;
333 	}
334 
335 	if (V4L2_TYPE_IS_OUTPUT(f->type))
336 		frame = &ctx->s_frame;
337 	else
338 		frame = &ctx->d_frame;
339 
340 	pix = &f->fmt.pix_mp;
341 	frame->fmt = find_fmt(&pix->pixelformat, NULL, 0);
342 	frame->colorspace = pix->colorspace;
343 	if (!frame->fmt)
344 		return -EINVAL;
345 
346 	for (i = 0; i < frame->fmt->num_planes; i++)
347 		frame->payload[i] = pix->plane_fmt[i].sizeimage;
348 
349 	gsc_set_frame_size(frame, pix->width, pix->height);
350 
351 	if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
352 		gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx);
353 	else
354 		gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx);
355 
356 	pr_debug("f_w: %d, f_h: %d", frame->f_width, frame->f_height);
357 
358 	return 0;
359 }
360 
gsc_m2m_reqbufs(struct file * file,void * fh,struct v4l2_requestbuffers * reqbufs)361 static int gsc_m2m_reqbufs(struct file *file, void *fh,
362 			  struct v4l2_requestbuffers *reqbufs)
363 {
364 	struct gsc_ctx *ctx = fh_to_ctx(fh);
365 	struct gsc_dev *gsc = ctx->gsc_dev;
366 	u32 max_cnt;
367 
368 	max_cnt = (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) ?
369 		gsc->variant->in_buf_cnt : gsc->variant->out_buf_cnt;
370 	if (reqbufs->count > max_cnt) {
371 		return -EINVAL;
372 	} else if (reqbufs->count == 0) {
373 		if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
374 			gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx);
375 		else
376 			gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx);
377 	}
378 
379 	return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
380 }
381 
gsc_m2m_expbuf(struct file * file,void * fh,struct v4l2_exportbuffer * eb)382 static int gsc_m2m_expbuf(struct file *file, void *fh,
383 				struct v4l2_exportbuffer *eb)
384 {
385 	struct gsc_ctx *ctx = fh_to_ctx(fh);
386 	return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
387 }
388 
gsc_m2m_querybuf(struct file * file,void * fh,struct v4l2_buffer * buf)389 static int gsc_m2m_querybuf(struct file *file, void *fh,
390 					struct v4l2_buffer *buf)
391 {
392 	struct gsc_ctx *ctx = fh_to_ctx(fh);
393 	return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
394 }
395 
gsc_m2m_qbuf(struct file * file,void * fh,struct v4l2_buffer * buf)396 static int gsc_m2m_qbuf(struct file *file, void *fh,
397 			  struct v4l2_buffer *buf)
398 {
399 	struct gsc_ctx *ctx = fh_to_ctx(fh);
400 	return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
401 }
402 
gsc_m2m_dqbuf(struct file * file,void * fh,struct v4l2_buffer * buf)403 static int gsc_m2m_dqbuf(struct file *file, void *fh,
404 			   struct v4l2_buffer *buf)
405 {
406 	struct gsc_ctx *ctx = fh_to_ctx(fh);
407 	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
408 }
409 
gsc_m2m_streamon(struct file * file,void * fh,enum v4l2_buf_type type)410 static int gsc_m2m_streamon(struct file *file, void *fh,
411 			   enum v4l2_buf_type type)
412 {
413 	struct gsc_ctx *ctx = fh_to_ctx(fh);
414 
415 	/* The source and target color format need to be set */
416 	if (V4L2_TYPE_IS_OUTPUT(type)) {
417 		if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx))
418 			return -EINVAL;
419 	} else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) {
420 		return -EINVAL;
421 	}
422 
423 	return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
424 }
425 
gsc_m2m_streamoff(struct file * file,void * fh,enum v4l2_buf_type type)426 static int gsc_m2m_streamoff(struct file *file, void *fh,
427 			    enum v4l2_buf_type type)
428 {
429 	struct gsc_ctx *ctx = fh_to_ctx(fh);
430 	return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
431 }
432 
433 /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */
is_rectangle_enclosed(struct v4l2_rect * a,struct v4l2_rect * b)434 static int is_rectangle_enclosed(struct v4l2_rect *a, struct v4l2_rect *b)
435 {
436 	if (a->left < b->left || a->top < b->top)
437 		return 0;
438 
439 	if (a->left + a->width > b->left + b->width)
440 		return 0;
441 
442 	if (a->top + a->height > b->top + b->height)
443 		return 0;
444 
445 	return 1;
446 }
447 
gsc_m2m_g_selection(struct file * file,void * fh,struct v4l2_selection * s)448 static int gsc_m2m_g_selection(struct file *file, void *fh,
449 			struct v4l2_selection *s)
450 {
451 	struct gsc_frame *frame;
452 	struct gsc_ctx *ctx = fh_to_ctx(fh);
453 
454 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
455 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
456 		return -EINVAL;
457 
458 	frame = ctx_get_frame(ctx, s->type);
459 	if (IS_ERR(frame))
460 		return PTR_ERR(frame);
461 
462 	switch (s->target) {
463 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
464 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
465 	case V4L2_SEL_TGT_CROP_BOUNDS:
466 	case V4L2_SEL_TGT_CROP_DEFAULT:
467 		s->r.left = 0;
468 		s->r.top = 0;
469 		s->r.width = frame->f_width;
470 		s->r.height = frame->f_height;
471 		return 0;
472 
473 	case V4L2_SEL_TGT_COMPOSE:
474 	case V4L2_SEL_TGT_CROP:
475 		s->r.left = frame->crop.left;
476 		s->r.top = frame->crop.top;
477 		s->r.width = frame->crop.width;
478 		s->r.height = frame->crop.height;
479 		return 0;
480 	}
481 
482 	return -EINVAL;
483 }
484 
gsc_m2m_s_selection(struct file * file,void * fh,struct v4l2_selection * s)485 static int gsc_m2m_s_selection(struct file *file, void *fh,
486 				struct v4l2_selection *s)
487 {
488 	struct gsc_frame *frame;
489 	struct gsc_ctx *ctx = fh_to_ctx(fh);
490 	struct v4l2_crop cr;
491 	struct gsc_variant *variant = ctx->gsc_dev->variant;
492 	int ret;
493 
494 	cr.type = s->type;
495 	cr.c = s->r;
496 
497 	if ((s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) &&
498 	    (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE))
499 		return -EINVAL;
500 
501 	ret = gsc_try_crop(ctx, &cr);
502 	if (ret)
503 		return ret;
504 
505 	if (s->flags & V4L2_SEL_FLAG_LE &&
506 	    !is_rectangle_enclosed(&cr.c, &s->r))
507 		return -ERANGE;
508 
509 	if (s->flags & V4L2_SEL_FLAG_GE &&
510 	    !is_rectangle_enclosed(&s->r, &cr.c))
511 		return -ERANGE;
512 
513 	s->r = cr.c;
514 
515 	switch (s->target) {
516 	case V4L2_SEL_TGT_COMPOSE_BOUNDS:
517 	case V4L2_SEL_TGT_COMPOSE_DEFAULT:
518 	case V4L2_SEL_TGT_COMPOSE:
519 		frame = &ctx->s_frame;
520 		break;
521 
522 	case V4L2_SEL_TGT_CROP_BOUNDS:
523 	case V4L2_SEL_TGT_CROP:
524 	case V4L2_SEL_TGT_CROP_DEFAULT:
525 		frame = &ctx->d_frame;
526 		break;
527 
528 	default:
529 		return -EINVAL;
530 	}
531 
532 	/* Check to see if scaling ratio is within supported range */
533 	if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) {
534 		if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
535 			ret = gsc_check_scaler_ratio(variant, cr.c.width,
536 				cr.c.height, ctx->d_frame.crop.width,
537 				ctx->d_frame.crop.height,
538 				ctx->gsc_ctrls.rotate->val, ctx->out_path);
539 		} else {
540 			ret = gsc_check_scaler_ratio(variant,
541 				ctx->s_frame.crop.width,
542 				ctx->s_frame.crop.height, cr.c.width,
543 				cr.c.height, ctx->gsc_ctrls.rotate->val,
544 				ctx->out_path);
545 		}
546 
547 		if (ret) {
548 			pr_err("Out of scaler range");
549 			return -EINVAL;
550 		}
551 	}
552 
553 	frame->crop = cr.c;
554 
555 	gsc_ctx_state_lock_set(GSC_PARAMS, ctx);
556 	return 0;
557 }
558 
559 static const struct v4l2_ioctl_ops gsc_m2m_ioctl_ops = {
560 	.vidioc_querycap		= gsc_m2m_querycap,
561 	.vidioc_enum_fmt_vid_cap_mplane	= gsc_m2m_enum_fmt_mplane,
562 	.vidioc_enum_fmt_vid_out_mplane	= gsc_m2m_enum_fmt_mplane,
563 	.vidioc_g_fmt_vid_cap_mplane	= gsc_m2m_g_fmt_mplane,
564 	.vidioc_g_fmt_vid_out_mplane	= gsc_m2m_g_fmt_mplane,
565 	.vidioc_try_fmt_vid_cap_mplane	= gsc_m2m_try_fmt_mplane,
566 	.vidioc_try_fmt_vid_out_mplane	= gsc_m2m_try_fmt_mplane,
567 	.vidioc_s_fmt_vid_cap_mplane	= gsc_m2m_s_fmt_mplane,
568 	.vidioc_s_fmt_vid_out_mplane	= gsc_m2m_s_fmt_mplane,
569 	.vidioc_reqbufs			= gsc_m2m_reqbufs,
570 	.vidioc_expbuf                  = gsc_m2m_expbuf,
571 	.vidioc_querybuf		= gsc_m2m_querybuf,
572 	.vidioc_qbuf			= gsc_m2m_qbuf,
573 	.vidioc_dqbuf			= gsc_m2m_dqbuf,
574 	.vidioc_streamon		= gsc_m2m_streamon,
575 	.vidioc_streamoff		= gsc_m2m_streamoff,
576 	.vidioc_g_selection		= gsc_m2m_g_selection,
577 	.vidioc_s_selection		= gsc_m2m_s_selection
578 };
579 
queue_init(void * priv,struct vb2_queue * src_vq,struct vb2_queue * dst_vq)580 static int queue_init(void *priv, struct vb2_queue *src_vq,
581 			struct vb2_queue *dst_vq)
582 {
583 	struct gsc_ctx *ctx = priv;
584 	int ret;
585 
586 	memset(src_vq, 0, sizeof(*src_vq));
587 	src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
588 	src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
589 	src_vq->drv_priv = ctx;
590 	src_vq->ops = &gsc_m2m_qops;
591 	src_vq->mem_ops = &vb2_dma_contig_memops;
592 	src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
593 	src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
594 	src_vq->lock = &ctx->gsc_dev->lock;
595 
596 	ret = vb2_queue_init(src_vq);
597 	if (ret)
598 		return ret;
599 
600 	memset(dst_vq, 0, sizeof(*dst_vq));
601 	dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
602 	dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
603 	dst_vq->drv_priv = ctx;
604 	dst_vq->ops = &gsc_m2m_qops;
605 	dst_vq->mem_ops = &vb2_dma_contig_memops;
606 	dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
607 	dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
608 	dst_vq->lock = &ctx->gsc_dev->lock;
609 
610 	return vb2_queue_init(dst_vq);
611 }
612 
gsc_m2m_open(struct file * file)613 static int gsc_m2m_open(struct file *file)
614 {
615 	struct gsc_dev *gsc = video_drvdata(file);
616 	struct gsc_ctx *ctx = NULL;
617 	int ret;
618 
619 	pr_debug("pid: %d, state: 0x%lx", task_pid_nr(current), gsc->state);
620 
621 	if (mutex_lock_interruptible(&gsc->lock))
622 		return -ERESTARTSYS;
623 
624 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
625 	if (!ctx) {
626 		ret = -ENOMEM;
627 		goto unlock;
628 	}
629 
630 	v4l2_fh_init(&ctx->fh, gsc->m2m.vfd);
631 	ret = gsc_ctrls_create(ctx);
632 	if (ret)
633 		goto error_fh;
634 
635 	/* Use separate control handler per file handle */
636 	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
637 	file->private_data = &ctx->fh;
638 	v4l2_fh_add(&ctx->fh);
639 
640 	ctx->gsc_dev = gsc;
641 	/* Default color format */
642 	ctx->s_frame.fmt = get_format(0);
643 	ctx->d_frame.fmt = get_format(0);
644 	/* Setup the device context for mem2mem mode. */
645 	ctx->state = GSC_CTX_M2M;
646 	ctx->flags = 0;
647 	ctx->in_path = GSC_DMA;
648 	ctx->out_path = GSC_DMA;
649 
650 	ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init);
651 	if (IS_ERR(ctx->m2m_ctx)) {
652 		pr_err("Failed to initialize m2m context");
653 		ret = PTR_ERR(ctx->m2m_ctx);
654 		goto error_ctrls;
655 	}
656 
657 	if (gsc->m2m.refcnt++ == 0)
658 		set_bit(ST_M2M_OPEN, &gsc->state);
659 
660 	pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx);
661 
662 	mutex_unlock(&gsc->lock);
663 	return 0;
664 
665 error_ctrls:
666 	gsc_ctrls_delete(ctx);
667 error_fh:
668 	v4l2_fh_del(&ctx->fh);
669 	v4l2_fh_exit(&ctx->fh);
670 	kfree(ctx);
671 unlock:
672 	mutex_unlock(&gsc->lock);
673 	return ret;
674 }
675 
gsc_m2m_release(struct file * file)676 static int gsc_m2m_release(struct file *file)
677 {
678 	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
679 	struct gsc_dev *gsc = ctx->gsc_dev;
680 
681 	pr_debug("pid: %d, state: 0x%lx, refcnt= %d",
682 		task_pid_nr(current), gsc->state, gsc->m2m.refcnt);
683 
684 	mutex_lock(&gsc->lock);
685 
686 	v4l2_m2m_ctx_release(ctx->m2m_ctx);
687 	gsc_ctrls_delete(ctx);
688 	v4l2_fh_del(&ctx->fh);
689 	v4l2_fh_exit(&ctx->fh);
690 
691 	if (--gsc->m2m.refcnt <= 0)
692 		clear_bit(ST_M2M_OPEN, &gsc->state);
693 	kfree(ctx);
694 
695 	mutex_unlock(&gsc->lock);
696 	return 0;
697 }
698 
gsc_m2m_poll(struct file * file,struct poll_table_struct * wait)699 static unsigned int gsc_m2m_poll(struct file *file,
700 					struct poll_table_struct *wait)
701 {
702 	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
703 	struct gsc_dev *gsc = ctx->gsc_dev;
704 	int ret;
705 
706 	if (mutex_lock_interruptible(&gsc->lock))
707 		return -ERESTARTSYS;
708 
709 	ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
710 	mutex_unlock(&gsc->lock);
711 
712 	return ret;
713 }
714 
gsc_m2m_mmap(struct file * file,struct vm_area_struct * vma)715 static int gsc_m2m_mmap(struct file *file, struct vm_area_struct *vma)
716 {
717 	struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
718 	struct gsc_dev *gsc = ctx->gsc_dev;
719 	int ret;
720 
721 	if (mutex_lock_interruptible(&gsc->lock))
722 		return -ERESTARTSYS;
723 
724 	ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
725 	mutex_unlock(&gsc->lock);
726 
727 	return ret;
728 }
729 
730 static const struct v4l2_file_operations gsc_m2m_fops = {
731 	.owner		= THIS_MODULE,
732 	.open		= gsc_m2m_open,
733 	.release	= gsc_m2m_release,
734 	.poll		= gsc_m2m_poll,
735 	.unlocked_ioctl	= video_ioctl2,
736 	.mmap		= gsc_m2m_mmap,
737 };
738 
739 static struct v4l2_m2m_ops gsc_m2m_ops = {
740 	.device_run	= gsc_m2m_device_run,
741 	.job_abort	= gsc_m2m_job_abort,
742 };
743 
gsc_register_m2m_device(struct gsc_dev * gsc)744 int gsc_register_m2m_device(struct gsc_dev *gsc)
745 {
746 	struct platform_device *pdev;
747 	int ret;
748 
749 	if (!gsc)
750 		return -ENODEV;
751 
752 	pdev = gsc->pdev;
753 
754 	gsc->vdev.fops		= &gsc_m2m_fops;
755 	gsc->vdev.ioctl_ops	= &gsc_m2m_ioctl_ops;
756 	gsc->vdev.release	= video_device_release_empty;
757 	gsc->vdev.lock		= &gsc->lock;
758 	gsc->vdev.vfl_dir	= VFL_DIR_M2M;
759 	gsc->vdev.v4l2_dev	= &gsc->v4l2_dev;
760 	snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
761 					GSC_MODULE_NAME, gsc->id);
762 
763 	video_set_drvdata(&gsc->vdev, gsc);
764 
765 	gsc->m2m.vfd = &gsc->vdev;
766 	gsc->m2m.m2m_dev = v4l2_m2m_init(&gsc_m2m_ops);
767 	if (IS_ERR(gsc->m2m.m2m_dev)) {
768 		dev_err(&pdev->dev, "failed to initialize v4l2-m2m device\n");
769 		ret = PTR_ERR(gsc->m2m.m2m_dev);
770 		goto err_m2m_r1;
771 	}
772 
773 	ret = video_register_device(&gsc->vdev, VFL_TYPE_GRABBER, -1);
774 	if (ret) {
775 		dev_err(&pdev->dev,
776 			 "%s(): failed to register video device\n", __func__);
777 		goto err_m2m_r2;
778 	}
779 
780 	pr_debug("gsc m2m driver registered as /dev/video%d", gsc->vdev.num);
781 	return 0;
782 
783 err_m2m_r2:
784 	v4l2_m2m_release(gsc->m2m.m2m_dev);
785 err_m2m_r1:
786 	video_device_release(gsc->m2m.vfd);
787 
788 	return ret;
789 }
790 
gsc_unregister_m2m_device(struct gsc_dev * gsc)791 void gsc_unregister_m2m_device(struct gsc_dev *gsc)
792 {
793 	if (gsc)
794 		v4l2_m2m_release(gsc->m2m.m2m_dev);
795 }
796