Lines Matching refs:m2m_ctx

70 static struct v4l2_m2m_queue_ctx *get_queue_ctx(struct v4l2_m2m_ctx *m2m_ctx,  in get_queue_ctx()  argument
74 return &m2m_ctx->out_q_ctx; in get_queue_ctx()
76 return &m2m_ctx->cap_q_ctx; in get_queue_ctx()
82 struct vb2_queue *v4l2_m2m_get_vq(struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_get_vq() argument
87 q_ctx = get_queue_ctx(m2m_ctx, type); in v4l2_m2m_get_vq()
211 void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_try_schedule() argument
216 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_try_schedule()
217 dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx); in v4l2_m2m_try_schedule()
219 if (!m2m_ctx->out_q_ctx.q.streaming in v4l2_m2m_try_schedule()
220 || !m2m_ctx->cap_q_ctx.q.streaming) { in v4l2_m2m_try_schedule()
228 if (m2m_ctx->job_flags & TRANS_ABORT) { in v4l2_m2m_try_schedule()
234 if (m2m_ctx->job_flags & TRANS_QUEUED) { in v4l2_m2m_try_schedule()
240 spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); in v4l2_m2m_try_schedule()
241 if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue) in v4l2_m2m_try_schedule()
242 && !m2m_ctx->out_q_ctx.buffered) { in v4l2_m2m_try_schedule()
243 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, in v4l2_m2m_try_schedule()
249 spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); in v4l2_m2m_try_schedule()
250 if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue) in v4l2_m2m_try_schedule()
251 && !m2m_ctx->cap_q_ctx.buffered) { in v4l2_m2m_try_schedule()
252 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, in v4l2_m2m_try_schedule()
254 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, in v4l2_m2m_try_schedule()
260 spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap); in v4l2_m2m_try_schedule()
261 spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out); in v4l2_m2m_try_schedule()
264 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { in v4l2_m2m_try_schedule()
270 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); in v4l2_m2m_try_schedule()
271 m2m_ctx->job_flags |= TRANS_QUEUED; in v4l2_m2m_try_schedule()
287 static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_cancel_job() argument
292 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_cancel_job()
295 m2m_ctx->job_flags |= TRANS_ABORT; in v4l2_m2m_cancel_job()
296 if (m2m_ctx->job_flags & TRANS_RUNNING) { in v4l2_m2m_cancel_job()
298 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); in v4l2_m2m_cancel_job()
299 dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx); in v4l2_m2m_cancel_job()
300 wait_event(m2m_ctx->finished, in v4l2_m2m_cancel_job()
301 !(m2m_ctx->job_flags & TRANS_RUNNING)); in v4l2_m2m_cancel_job()
302 } else if (m2m_ctx->job_flags & TRANS_QUEUED) { in v4l2_m2m_cancel_job()
303 list_del(&m2m_ctx->queue); in v4l2_m2m_cancel_job()
304 m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); in v4l2_m2m_cancel_job()
307 m2m_ctx); in v4l2_m2m_cancel_job()
327 struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_job_finish() argument
332 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { in v4l2_m2m_job_finish()
348 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_job_finish()
356 int v4l2_m2m_reqbufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_reqbufs() argument
361 vq = v4l2_m2m_get_vq(m2m_ctx, reqbufs->type); in v4l2_m2m_reqbufs()
371 int v4l2_m2m_querybuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_querybuf() argument
378 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_querybuf()
400 int v4l2_m2m_qbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_qbuf() argument
406 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_qbuf()
409 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_qbuf()
419 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_dqbuf() argument
424 vq = v4l2_m2m_get_vq(m2m_ctx, buf->type); in v4l2_m2m_dqbuf()
433 int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_create_bufs() argument
438 vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type); in v4l2_m2m_create_bufs()
447 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_expbuf() argument
452 vq = v4l2_m2m_get_vq(m2m_ctx, eb->type); in v4l2_m2m_expbuf()
459 int v4l2_m2m_streamon(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_streamon() argument
465 vq = v4l2_m2m_get_vq(m2m_ctx, type); in v4l2_m2m_streamon()
468 v4l2_m2m_try_schedule(m2m_ctx); in v4l2_m2m_streamon()
477 int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_streamoff() argument
486 v4l2_m2m_cancel_job(m2m_ctx); in v4l2_m2m_streamoff()
488 q_ctx = get_queue_ctx(m2m_ctx, type); in v4l2_m2m_streamoff()
493 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_streamoff()
496 if (m2m_ctx->job_flags & TRANS_QUEUED) in v4l2_m2m_streamoff()
497 list_del(&m2m_ctx->queue); in v4l2_m2m_streamoff()
498 m2m_ctx->job_flags = 0; in v4l2_m2m_streamoff()
507 if (m2m_dev->curr_ctx == m2m_ctx) { in v4l2_m2m_streamoff()
509 wake_up(&m2m_ctx->finished); in v4l2_m2m_streamoff()
525 unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_poll() argument
546 src_q = v4l2_m2m_get_src_vq(m2m_ctx); in v4l2_m2m_poll()
547 dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); in v4l2_m2m_poll()
560 if (m2m_ctx->m2m_dev->m2m_ops->unlock) in v4l2_m2m_poll()
561 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); in v4l2_m2m_poll()
562 else if (m2m_ctx->q_lock) in v4l2_m2m_poll()
563 mutex_unlock(m2m_ctx->q_lock); in v4l2_m2m_poll()
570 if (m2m_ctx->m2m_dev->m2m_ops->lock) in v4l2_m2m_poll()
571 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); in v4l2_m2m_poll()
572 else if (m2m_ctx->q_lock) { in v4l2_m2m_poll()
573 if (mutex_lock_interruptible(m2m_ctx->q_lock)) { in v4l2_m2m_poll()
612 int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, in v4l2_m2m_mmap() argument
619 vq = v4l2_m2m_get_src_vq(m2m_ctx); in v4l2_m2m_mmap()
621 vq = v4l2_m2m_get_dst_vq(m2m_ctx); in v4l2_m2m_mmap()
679 struct v4l2_m2m_ctx *m2m_ctx; in v4l2_m2m_ctx_init() local
683 m2m_ctx = kzalloc(sizeof *m2m_ctx, GFP_KERNEL); in v4l2_m2m_ctx_init()
684 if (!m2m_ctx) in v4l2_m2m_ctx_init()
687 m2m_ctx->priv = drv_priv; in v4l2_m2m_ctx_init()
688 m2m_ctx->m2m_dev = m2m_dev; in v4l2_m2m_ctx_init()
689 init_waitqueue_head(&m2m_ctx->finished); in v4l2_m2m_ctx_init()
691 out_q_ctx = &m2m_ctx->out_q_ctx; in v4l2_m2m_ctx_init()
692 cap_q_ctx = &m2m_ctx->cap_q_ctx; in v4l2_m2m_ctx_init()
699 INIT_LIST_HEAD(&m2m_ctx->queue); in v4l2_m2m_ctx_init()
711 m2m_ctx->q_lock = out_q_ctx->q.lock; in v4l2_m2m_ctx_init()
713 return m2m_ctx; in v4l2_m2m_ctx_init()
715 kfree(m2m_ctx); in v4l2_m2m_ctx_init()
725 void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx) in v4l2_m2m_ctx_release() argument
728 v4l2_m2m_cancel_job(m2m_ctx); in v4l2_m2m_ctx_release()
730 vb2_queue_release(&m2m_ctx->cap_q_ctx.q); in v4l2_m2m_ctx_release()
731 vb2_queue_release(&m2m_ctx->out_q_ctx.q); in v4l2_m2m_ctx_release()
733 kfree(m2m_ctx); in v4l2_m2m_ctx_release()
742 void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb) in v4l2_m2m_buf_queue() argument
748 q_ctx = get_queue_ctx(m2m_ctx, vb->vb2_queue->type); in v4l2_m2m_buf_queue()
766 return v4l2_m2m_reqbufs(file, fh->m2m_ctx, rb); in v4l2_m2m_ioctl_reqbufs()
775 return v4l2_m2m_create_bufs(file, fh->m2m_ctx, create); in v4l2_m2m_ioctl_create_bufs()
784 return v4l2_m2m_querybuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_querybuf()
793 return v4l2_m2m_qbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_qbuf()
802 return v4l2_m2m_dqbuf(file, fh->m2m_ctx, buf); in v4l2_m2m_ioctl_dqbuf()
811 return v4l2_m2m_expbuf(file, fh->m2m_ctx, eb); in v4l2_m2m_ioctl_expbuf()
820 return v4l2_m2m_streamon(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamon()
829 return v4l2_m2m_streamoff(file, fh->m2m_ctx, type); in v4l2_m2m_ioctl_streamoff()
841 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; in v4l2_m2m_fop_mmap() local
844 if (m2m_ctx->q_lock && mutex_lock_interruptible(m2m_ctx->q_lock)) in v4l2_m2m_fop_mmap()
847 ret = v4l2_m2m_mmap(file, m2m_ctx, vma); in v4l2_m2m_fop_mmap()
849 if (m2m_ctx->q_lock) in v4l2_m2m_fop_mmap()
850 mutex_unlock(m2m_ctx->q_lock); in v4l2_m2m_fop_mmap()
859 struct v4l2_m2m_ctx *m2m_ctx = fh->m2m_ctx; in v4l2_m2m_fop_poll() local
862 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
863 mutex_lock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()
865 ret = v4l2_m2m_poll(file, m2m_ctx, wait); in v4l2_m2m_fop_poll()
867 if (m2m_ctx->q_lock) in v4l2_m2m_fop_poll()
868 mutex_unlock(m2m_ctx->q_lock); in v4l2_m2m_fop_poll()