Lines Matching refs:m2m_ctx

70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,  in get_queue_ctx()  argument
74 return &m2m_ctx->out_q_ctx; in get_queue_ctx()
76 return &m2m_ctx->cap_q_ctx; in get_queue_ctx()
82 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_get_vq() argument
87 q_ctx = get_queue_ctx(m2m_ctx, type); in v4l2_m2m_get_vq()
211 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_try_schedule() argument
216 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_try_schedule()
217 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); in v4l2_m2m_try_schedule()
219 if (!m2m_ctx->out_q_ctx.q.streaming in v4l2_m2m_try_schedule()
220 || !m2m_ctx->cap_q_ctx.q.streaming) { in v4l2_m2m_try_schedule()
228 if (m2m_ctx->job_flags & TRANS_ABORT) { in v4l2_m2m_try_schedule()
234 if (m2m_ctx->job_flags & TRANS_QUEUED) { in v4l2_m2m_try_schedule()
240 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); in v4l2_m2m_try_schedule()
241 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) in v4l2_m2m_try_schedule()
242 && !m2m_ctx->out_q_ctx.buffered) { in v4l2_m2m_try_schedule()
243 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, in v4l2_m2m_try_schedule()
249 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); in v4l2_m2m_try_schedule()
250 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) in v4l2_m2m_try_schedule()
251 && !m2m_ctx->cap_q_ctx.buffered) { in v4l2_m2m_try_schedule()
252 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, in v4l2_m2m_try_schedule()
254 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, in v4l2_m2m_try_schedule()
260 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); in v4l2_m2m_try_schedule()
261 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); in v4l2_m2m_try_schedule()
264 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { in v4l2_m2m_try_schedule()
270 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); in v4l2_m2m_try_schedule()
271 m2m_ctx->job_flags |= TRANS_QUEUED; in v4l2_m2m_try_schedule()
287 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_cancel_job() argument
292 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_cancel_job()
295 m2m_ctx->job_flags |= TRANS_ABORT; in v4l2_m2m_cancel_job()
296 if (m2m_ctx->job_flags & TRANS_RUNNING) { in v4l2_m2m_cancel_job()
298 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); in v4l2_m2m_cancel_job()
299 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); in v4l2_m2m_cancel_job()
300 wait_event(m2m_ctx->finished, in v4l2_m2m_cancel_job()
301 !(m2m_ctx->job_flags & TRANS_RUNNING)); in v4l2_m2m_cancel_job()
302 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { in v4l2_m2m_cancel_job()
303 list_del(&m2m_ctx->queue); in v4l2_m2m_cancel_job()
304 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); in v4l2_m2m_cancel_job()
307 m2m_ctx); in v4l2_m2m_cancel_job()
327 struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_job_finish() argument
332 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { in v4l2_m2m_job_finish()
348 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_job_finish()
356 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_reqbufs() argument
362 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); in v4l2_m2m_reqbufs()
378 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_querybuf() argument
385 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_querybuf()
407 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_qbuf() argument
413 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_qbuf()
416 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_qbuf()
426 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_dqbuf() argument
431 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_dqbuf()
440 int v4l2_m2m_prepare_buf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_prepare_buf() argument
446 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_prepare_buf()
449 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_prepare_buf()
459 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_create_bufs() argument
464 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); in v4l2_m2m_create_bufs()
473 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_expbuf() argument
478 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); in v4l2_m2m_expbuf()
485 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_streamon() argument
491 vq = v4l2_m2m_get_vq(m2m_ctx, type); in v4l2_m2m_streamon()
494 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_streamon()
503 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_streamoff() argument
512 v4l2_m2m_cancel_job(m2m_ctx); in v4l2_m2m_streamoff()
514 q_ctx = get_queue_ctx(m2m_ctx, type); in v4l2_m2m_streamoff()
519 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_streamoff()
522 if (m2m_ctx->job_flags & TRANS_QUEUED) in v4l2_m2m_streamoff()
523 list_del(&m2m_ctx->queue); in v4l2_m2m_streamoff()
524 m2m_ctx->job_flags = 0; in v4l2_m2m_streamoff()
533 if (m2m_dev->curr_ctx == m2m_ctx) { in v4l2_m2m_streamoff()
535 wake_up(&m2m_ctx->finished); in v4l2_m2m_streamoff()
551 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_poll() argument
572 src_q = v4l2_m2m_get_src_vq(m2m_ctx); in v4l2_m2m_poll()
573 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); in v4l2_m2m_poll()
639 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_mmap() argument
646 vq = v4l2_m2m_get_src_vq(m2m_ctx); in v4l2_m2m_mmap()
648 vq = v4l2_m2m_get_dst_vq(m2m_ctx); in v4l2_m2m_mmap()
706 struct v4l2_m2m_ctx *m2m_ctx; in v4l2_m2m_ctx_init() local
710 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); in v4l2_m2m_ctx_init()
711 if (!m2m_ctx) in v4l2_m2m_ctx_init()
714 m2m_ctx->priv = drv_priv; in v4l2_m2m_ctx_init()
715 m2m_ctx->m2m_dev = m2m_dev; in v4l2_m2m_ctx_init()
716 init_waitqueue_head(&m2m_ctx->finished); in v4l2_m2m_ctx_init()
718 out_q_ctx = &m2m_ctx->out_q_ctx; in v4l2_m2m_ctx_init()
719 cap_q_ctx = &m2m_ctx->cap_q_ctx; in v4l2_m2m_ctx_init()
726 INIT_LIST_HEAD(&m2m_ctx->queue); in v4l2_m2m_ctx_init()
738 m2m_ctx->q_lock = out_q_ctx->q.lock; in v4l2_m2m_ctx_init()
740 return m2m_ctx; in v4l2_m2m_ctx_init()
742 kfree(m2m_ctx); in v4l2_m2m_ctx_init()
752 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_ctx_release() argument
755 v4l2_m2m_cancel_job(m2m_ctx); in v4l2_m2m_ctx_release()
757 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); in v4l2_m2m_ctx_release()
758 vb2_queue_release(&m2m_ctx->out_q_ctx.q); in v4l2_m2m_ctx_release()
760 kfree(m2m_ctx); in v4l2_m2m_ctx_release()
769 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_buf_queue() argument
777 q_ctx = get_queue_ctx(m2m_ctx, vbuf->vb2_buf.vb2_queue->type); in v4l2_m2m_buf_queue()
795 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); in v4l2_m2m_ioctl_reqbufs()
804 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); in v4l2_m2m_ioctl_create_bufs()
813 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_querybuf()
822 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_qbuf()
831 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_dqbuf()
840 return v4l2_m2m_prepare_buf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_prepare_buf()
849 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); in v4l2_m2m_ioctl_expbuf()
858 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamon()
867 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamoff()
880 return v4l2_m2m_mmap(file, fh->m2m_ctx, vma); in v4l2_m2m_fop_mmap()
887 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; in v4l2_m2m_fop_poll() local
890 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
891 mutex_lock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()
893 ret = v4l2_m2m_poll(file, m2m_ctx, wait); in v4l2_m2m_fop_poll()
895 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
896 mutex_unlock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()