Lines Matching refs:m2m_dev

147 void *v4l2_m2m_get_curr_priv(struct v4l2_m2m_dev *m2m_dev)  in v4l2_m2m_get_curr_priv()  argument
152 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_get_curr_priv()
153 if (m2m_dev->curr_ctx) in v4l2_m2m_get_curr_priv()
154 ret = m2m_dev->curr_ctx->priv; in v4l2_m2m_get_curr_priv()
155 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_get_curr_priv()
166 static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev) in v4l2_m2m_try_run() argument
170 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
171 if (NULL != m2m_dev->curr_ctx) { in v4l2_m2m_try_run()
172 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
177 if (list_empty(&m2m_dev->job_queue)) { in v4l2_m2m_try_run()
178 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
183 m2m_dev->curr_ctx = list_first_entry(&m2m_dev->job_queue, in v4l2_m2m_try_run()
185 m2m_dev->curr_ctx->job_flags |= TRANS_RUNNING; in v4l2_m2m_try_run()
186 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_try_run()
188 m2m_dev->m2m_ops->device_run(m2m_dev->curr_ctx->priv); in v4l2_m2m_try_run()
213 struct v4l2_m2m_dev *m2m_dev; in v4l2_m2m_try_schedule() local
216 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_try_schedule()
225 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
229 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
235 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
245 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
256 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
263 if (m2m_dev->m2m_ops->job_ready in v4l2_m2m_try_schedule()
264 && (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) { in v4l2_m2m_try_schedule()
265 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
270 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); in v4l2_m2m_try_schedule()
273 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_try_schedule()
275 v4l2_m2m_try_run(m2m_dev); in v4l2_m2m_try_schedule()
289 struct v4l2_m2m_dev *m2m_dev; in v4l2_m2m_cancel_job() local
292 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_cancel_job()
293 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
297 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
298 m2m_dev->m2m_ops->job_abort(m2m_ctx->priv); in v4l2_m2m_cancel_job()
305 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
310 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_cancel_job()
326 void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev, in v4l2_m2m_job_finish() argument
331 spin_lock_irqsave(&m2m_dev->job_spinlock, flags); in v4l2_m2m_job_finish()
332 if (!m2m_dev->curr_ctx || m2m_dev->curr_ctx != m2m_ctx) { in v4l2_m2m_job_finish()
333 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_job_finish()
338 list_del(&m2m_dev->curr_ctx->queue); in v4l2_m2m_job_finish()
339 m2m_dev->curr_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING); in v4l2_m2m_job_finish()
340 wake_up(&m2m_dev->curr_ctx->finished); in v4l2_m2m_job_finish()
341 m2m_dev->curr_ctx = NULL; in v4l2_m2m_job_finish()
343 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags); in v4l2_m2m_job_finish()
349 v4l2_m2m_try_run(m2m_dev); in v4l2_m2m_job_finish()
480 struct v4l2_m2m_dev *m2m_dev; in v4l2_m2m_streamoff() local
493 m2m_dev = m2m_ctx->m2m_dev; in v4l2_m2m_streamoff()
494 spin_lock_irqsave(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_streamoff()
507 if (m2m_dev->curr_ctx == m2m_ctx) { in v4l2_m2m_streamoff()
508 m2m_dev->curr_ctx = NULL; in v4l2_m2m_streamoff()
511 spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job); in v4l2_m2m_streamoff()
560 if (m2m_ctx->m2m_dev->m2m_ops->unlock) in v4l2_m2m_poll()
561 m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); in v4l2_m2m_poll()
570 if (m2m_ctx->m2m_dev->m2m_ops->lock) in v4l2_m2m_poll()
571 m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); in v4l2_m2m_poll()
636 struct v4l2_m2m_dev *m2m_dev; in v4l2_m2m_init() local
642 m2m_dev = kzalloc(sizeof *m2m_dev, GFP_KERNEL); in v4l2_m2m_init()
643 if (!m2m_dev) in v4l2_m2m_init()
646 m2m_dev->curr_ctx = NULL; in v4l2_m2m_init()
647 m2m_dev->m2m_ops = m2m_ops; in v4l2_m2m_init()
648 INIT_LIST_HEAD(&m2m_dev->job_queue); in v4l2_m2m_init()
649 spin_lock_init(&m2m_dev->job_spinlock); in v4l2_m2m_init()
651 return m2m_dev; in v4l2_m2m_init()
660 void v4l2_m2m_release(struct v4l2_m2m_dev *m2m_dev) in v4l2_m2m_release() argument
662 kfree(m2m_dev); in v4l2_m2m_release()
675 struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev, in v4l2_m2m_ctx_init() argument
688 m2m_ctx->m2m_dev = m2m_dev; in v4l2_m2m_ctx_init()