1/*
2 * Copyright (C) 2012 Samsung Electronics Co.Ltd
3 * Authors:
4 *	Eunchul Kim <chulspro.kim@samsung.com>
5 *	Jinyoung Jeon <jy0.jeon@samsung.com>
6 *	Sangmin Lee <lsmin.lee@samsung.com>
7 *
8 * This program is free software; you can redistribute  it and/or modify it
9 * under  the terms of  the GNU General  Public License as published by the
10 * Free Software Foundation;  either version 2 of the  License, or (at your
11 * option) any later version.
12 *
13 */
14#include <linux/kernel.h>
15#include <linux/platform_device.h>
16#include <linux/types.h>
17#include <linux/clk.h>
18#include <linux/pm_runtime.h>
19
20#include <drm/drmP.h>
21#include <drm/exynos_drm.h>
22#include "exynos_drm_drv.h"
23#include "exynos_drm_gem.h"
24#include "exynos_drm_ipp.h"
25#include "exynos_drm_iommu.h"
26
27/*
28 * IPP stands for Image Post Processing and
29 * supports image scaler/rotator and input/output DMA operations.
30 * using FIMC, GSC, Rotator, so on.
31 * IPP is integration device driver of same attribute h/w
32 */
33
34/*
35 * TODO
36 * 1. expand command control id.
37 * 2. integrate	property and config.
38 * 3. removed send_event id check routine.
39 * 4. compare send_event id if needed.
40 * 5. free subdrv_remove notifier callback list if needed.
41 * 6. need to check subdrv_open about multi-open.
42 * 7. need to power_on implement power and sysmmu ctrl.
43 */
44
45#define get_ipp_context(dev)	platform_get_drvdata(to_platform_device(dev))
46#define ipp_is_m2m_cmd(c)	(c == IPP_CMD_M2M)
47
48/* platform device pointer for ipp device. */
49static struct platform_device *exynos_drm_ipp_pdev;
50
51/*
52 * A structure of event.
53 *
54 * @base: base of event.
55 * @event: ipp event.
56 */
57struct drm_exynos_ipp_send_event {
58	struct drm_pending_event	base;
59	struct drm_exynos_ipp_event	event;
60};
61
62/*
63 * A structure of memory node.
64 *
65 * @list: list head to memory queue information.
66 * @ops_id: id of operations.
67 * @prop_id: id of property.
68 * @buf_id: id of buffer.
69 * @buf_info: gem objects and dma address, size.
70 * @filp: a pointer to drm_file.
71 */
72struct drm_exynos_ipp_mem_node {
73	struct list_head	list;
74	enum drm_exynos_ops_id	ops_id;
75	u32	prop_id;
76	u32	buf_id;
77	struct drm_exynos_ipp_buf_info	buf_info;
78};
79
80/*
81 * A structure of ipp context.
82 *
83 * @subdrv: prepare initialization using subdrv.
84 * @ipp_lock: lock for synchronization of access to ipp_idr.
85 * @prop_lock: lock for synchronization of access to prop_idr.
86 * @ipp_idr: ipp driver idr.
87 * @prop_idr: property idr.
88 * @event_workq: event work queue.
89 * @cmd_workq: command work queue.
90 */
91struct ipp_context {
92	struct exynos_drm_subdrv	subdrv;
93	struct mutex	ipp_lock;
94	struct mutex	prop_lock;
95	struct idr	ipp_idr;
96	struct idr	prop_idr;
97	struct workqueue_struct	*event_workq;
98	struct workqueue_struct	*cmd_workq;
99};
100
101static LIST_HEAD(exynos_drm_ippdrv_list);
102static DEFINE_MUTEX(exynos_drm_ippdrv_lock);
103static BLOCKING_NOTIFIER_HEAD(exynos_drm_ippnb_list);
104
105int exynos_platform_device_ipp_register(void)
106{
107	struct platform_device *pdev;
108
109	if (exynos_drm_ipp_pdev)
110		return -EEXIST;
111
112	pdev = platform_device_register_simple("exynos-drm-ipp", -1, NULL, 0);
113	if (IS_ERR(pdev))
114		return PTR_ERR(pdev);
115
116	exynos_drm_ipp_pdev = pdev;
117
118	return 0;
119}
120
121void exynos_platform_device_ipp_unregister(void)
122{
123	if (exynos_drm_ipp_pdev) {
124		platform_device_unregister(exynos_drm_ipp_pdev);
125		exynos_drm_ipp_pdev = NULL;
126	}
127}
128
129int exynos_drm_ippdrv_register(struct exynos_drm_ippdrv *ippdrv)
130{
131	mutex_lock(&exynos_drm_ippdrv_lock);
132	list_add_tail(&ippdrv->drv_list, &exynos_drm_ippdrv_list);
133	mutex_unlock(&exynos_drm_ippdrv_lock);
134
135	return 0;
136}
137
138int exynos_drm_ippdrv_unregister(struct exynos_drm_ippdrv *ippdrv)
139{
140	mutex_lock(&exynos_drm_ippdrv_lock);
141	list_del(&ippdrv->drv_list);
142	mutex_unlock(&exynos_drm_ippdrv_lock);
143
144	return 0;
145}
146
147static int ipp_create_id(struct idr *id_idr, struct mutex *lock, void *obj)
148{
149	int ret;
150
151	mutex_lock(lock);
152	ret = idr_alloc(id_idr, obj, 1, 0, GFP_KERNEL);
153	mutex_unlock(lock);
154
155	return ret;
156}
157
158static void ipp_remove_id(struct idr *id_idr, struct mutex *lock, u32 id)
159{
160	mutex_lock(lock);
161	idr_remove(id_idr, id);
162	mutex_unlock(lock);
163}
164
165static void *ipp_find_obj(struct idr *id_idr, struct mutex *lock, u32 id)
166{
167	void *obj;
168
169	mutex_lock(lock);
170	obj = idr_find(id_idr, id);
171	mutex_unlock(lock);
172
173	return obj;
174}
175
176static int ipp_check_driver(struct exynos_drm_ippdrv *ippdrv,
177			    struct drm_exynos_ipp_property *property)
178{
179	if (ippdrv->dedicated || (!ipp_is_m2m_cmd(property->cmd) &&
180				  !pm_runtime_suspended(ippdrv->dev)))
181		return -EBUSY;
182
183	if (ippdrv->check_property &&
184	    ippdrv->check_property(ippdrv->dev, property))
185		return -EINVAL;
186
187	return 0;
188}
189
190static struct exynos_drm_ippdrv *ipp_find_driver(struct ipp_context *ctx,
191		struct drm_exynos_ipp_property *property)
192{
193	struct exynos_drm_ippdrv *ippdrv;
194	u32 ipp_id = property->ipp_id;
195	int ret;
196
197	if (ipp_id) {
198		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, ipp_id);
199		if (!ippdrv) {
200			DRM_DEBUG("ipp%d driver not found\n", ipp_id);
201			return ERR_PTR(-ENODEV);
202		}
203
204		ret = ipp_check_driver(ippdrv, property);
205		if (ret < 0) {
206			DRM_DEBUG("ipp%d driver check error %d\n", ipp_id, ret);
207			return ERR_PTR(ret);
208		}
209
210		return ippdrv;
211	} else {
212		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
213			ret = ipp_check_driver(ippdrv, property);
214			if (ret == 0)
215				return ippdrv;
216		}
217
218		DRM_DEBUG("cannot find driver suitable for given property.\n");
219	}
220
221	return ERR_PTR(-ENODEV);
222}
223
224static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id)
225{
226	struct exynos_drm_ippdrv *ippdrv;
227	struct drm_exynos_ipp_cmd_node *c_node;
228	int count = 0;
229
230	DRM_DEBUG_KMS("prop_id[%d]\n", prop_id);
231
232	/*
233	 * This case is search ipp driver by prop_id handle.
234	 * sometimes, ipp subsystem find driver by prop_id.
235	 * e.g PAUSE state, queue buf, command control.
236	 */
237	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
238		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
239
240		mutex_lock(&ippdrv->cmd_lock);
241		list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
242			if (c_node->property.prop_id == prop_id) {
243				mutex_unlock(&ippdrv->cmd_lock);
244				return ippdrv;
245			}
246		}
247		mutex_unlock(&ippdrv->cmd_lock);
248	}
249
250	return ERR_PTR(-ENODEV);
251}
252
253int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
254		struct drm_file *file)
255{
256	struct drm_exynos_file_private *file_priv = file->driver_priv;
257	struct device *dev = file_priv->ipp_dev;
258	struct ipp_context *ctx = get_ipp_context(dev);
259	struct drm_exynos_ipp_prop_list *prop_list = data;
260	struct exynos_drm_ippdrv *ippdrv;
261	int count = 0;
262
263	if (!ctx) {
264		DRM_ERROR("invalid context.\n");
265		return -EINVAL;
266	}
267
268	if (!prop_list) {
269		DRM_ERROR("invalid property parameter.\n");
270		return -EINVAL;
271	}
272
273	DRM_DEBUG_KMS("ipp_id[%d]\n", prop_list->ipp_id);
274
275	if (!prop_list->ipp_id) {
276		list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list)
277			count++;
278
279		/*
280		 * Supports ippdrv list count for user application.
281		 * First step user application getting ippdrv count.
282		 * and second step getting ippdrv capability using ipp_id.
283		 */
284		prop_list->count = count;
285	} else {
286		/*
287		 * Getting ippdrv capability by ipp_id.
288		 * some device not supported wb, output interface.
289		 * so, user application detect correct ipp driver
290		 * using this ioctl.
291		 */
292		ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
293						prop_list->ipp_id);
294		if (!ippdrv) {
295			DRM_ERROR("not found ipp%d driver.\n",
296					prop_list->ipp_id);
297			return -ENODEV;
298		}
299
300		*prop_list = ippdrv->prop_list;
301	}
302
303	return 0;
304}
305
306static void ipp_print_property(struct drm_exynos_ipp_property *property,
307		int idx)
308{
309	struct drm_exynos_ipp_config *config = &property->config[idx];
310	struct drm_exynos_pos *pos = &config->pos;
311	struct drm_exynos_sz *sz = &config->sz;
312
313	DRM_DEBUG_KMS("prop_id[%d]ops[%s]fmt[0x%x]\n",
314		property->prop_id, idx ? "dst" : "src", config->fmt);
315
316	DRM_DEBUG_KMS("pos[%d %d %d %d]sz[%d %d]f[%d]r[%d]\n",
317		pos->x, pos->y, pos->w, pos->h,
318		sz->hsize, sz->vsize, config->flip, config->degree);
319}
320
321static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
322{
323	struct drm_exynos_ipp_cmd_work *cmd_work;
324
325	cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
326	if (!cmd_work)
327		return ERR_PTR(-ENOMEM);
328
329	INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
330
331	return cmd_work;
332}
333
334static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
335{
336	struct drm_exynos_ipp_event_work *event_work;
337
338	event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
339	if (!event_work)
340		return ERR_PTR(-ENOMEM);
341
342	INIT_WORK(&event_work->work, ipp_sched_event);
343
344	return event_work;
345}
346
347int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
348		struct drm_file *file)
349{
350	struct drm_exynos_file_private *file_priv = file->driver_priv;
351	struct device *dev = file_priv->ipp_dev;
352	struct ipp_context *ctx = get_ipp_context(dev);
353	struct drm_exynos_ipp_property *property = data;
354	struct exynos_drm_ippdrv *ippdrv;
355	struct drm_exynos_ipp_cmd_node *c_node;
356	u32 prop_id;
357	int ret, i;
358
359	if (!ctx) {
360		DRM_ERROR("invalid context.\n");
361		return -EINVAL;
362	}
363
364	if (!property) {
365		DRM_ERROR("invalid property parameter.\n");
366		return -EINVAL;
367	}
368
369	prop_id = property->prop_id;
370
371	/*
372	 * This is log print for user application property.
373	 * user application set various property.
374	 */
375	for_each_ipp_ops(i)
376		ipp_print_property(property, i);
377
378	/*
379	 * In case prop_id is not zero try to set existing property.
380	 */
381	if (prop_id) {
382		c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, prop_id);
383
384		if (!c_node || c_node->filp != file) {
385			DRM_DEBUG_KMS("prop_id[%d] not found\n", prop_id);
386			return -EINVAL;
387		}
388
389		if (c_node->state != IPP_STATE_STOP) {
390			DRM_DEBUG_KMS("prop_id[%d] not stopped\n", prop_id);
391			return -EINVAL;
392		}
393
394		c_node->property = *property;
395
396		return 0;
397	}
398
399	/* find ipp driver using ipp id */
400	ippdrv = ipp_find_driver(ctx, property);
401	if (IS_ERR(ippdrv)) {
402		DRM_ERROR("failed to get ipp driver.\n");
403		return -EINVAL;
404	}
405
406	/* allocate command node */
407	c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
408	if (!c_node)
409		return -ENOMEM;
410
411	ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node);
412	if (ret < 0) {
413		DRM_ERROR("failed to create id.\n");
414		goto err_clear;
415	}
416	property->prop_id = ret;
417
418	DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
419		property->prop_id, property->cmd, (int)ippdrv);
420
421	/* stored property information and ippdrv in private data */
422	c_node->property = *property;
423	c_node->state = IPP_STATE_IDLE;
424	c_node->filp = file;
425
426	c_node->start_work = ipp_create_cmd_work();
427	if (IS_ERR(c_node->start_work)) {
428		DRM_ERROR("failed to create start work.\n");
429		ret = PTR_ERR(c_node->start_work);
430		goto err_remove_id;
431	}
432
433	c_node->stop_work = ipp_create_cmd_work();
434	if (IS_ERR(c_node->stop_work)) {
435		DRM_ERROR("failed to create stop work.\n");
436		ret = PTR_ERR(c_node->stop_work);
437		goto err_free_start;
438	}
439
440	c_node->event_work = ipp_create_event_work();
441	if (IS_ERR(c_node->event_work)) {
442		DRM_ERROR("failed to create event work.\n");
443		ret = PTR_ERR(c_node->event_work);
444		goto err_free_stop;
445	}
446
447	mutex_init(&c_node->lock);
448	mutex_init(&c_node->mem_lock);
449	mutex_init(&c_node->event_lock);
450
451	init_completion(&c_node->start_complete);
452	init_completion(&c_node->stop_complete);
453
454	for_each_ipp_ops(i)
455		INIT_LIST_HEAD(&c_node->mem_list[i]);
456
457	INIT_LIST_HEAD(&c_node->event_list);
458	mutex_lock(&ippdrv->cmd_lock);
459	list_add_tail(&c_node->list, &ippdrv->cmd_list);
460	mutex_unlock(&ippdrv->cmd_lock);
461
462	/* make dedicated state without m2m */
463	if (!ipp_is_m2m_cmd(property->cmd))
464		ippdrv->dedicated = true;
465
466	return 0;
467
468err_free_stop:
469	kfree(c_node->stop_work);
470err_free_start:
471	kfree(c_node->start_work);
472err_remove_id:
473	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock, property->prop_id);
474err_clear:
475	kfree(c_node);
476	return ret;
477}
478
479static int ipp_validate_mem_node(struct drm_device *drm_dev,
480				 struct drm_exynos_ipp_mem_node *m_node,
481				 struct drm_exynos_ipp_cmd_node *c_node)
482{
483	struct drm_exynos_ipp_config *ipp_cfg;
484	unsigned int num_plane;
485	unsigned long min_size, size;
486	unsigned int bpp;
487	int i;
488
489	/* The property id should already be varified */
490	ipp_cfg = &c_node->property.config[m_node->prop_id];
491	num_plane = drm_format_num_planes(ipp_cfg->fmt);
492
493	/**
494	 * This is a rather simplified validation of a memory node.
495	 * It basically verifies provided gem object handles
496	 * and the buffer sizes with respect to current configuration.
497	 * This is not the best that can be done
498	 * but it seems more than enough
499	 */
500	for (i = 0; i < num_plane; ++i) {
501		if (!m_node->buf_info.handles[i]) {
502			DRM_ERROR("invalid handle for plane %d\n", i);
503			return -EINVAL;
504		}
505		bpp = drm_format_plane_cpp(ipp_cfg->fmt, i);
506		min_size = (ipp_cfg->sz.hsize * ipp_cfg->sz.vsize * bpp) >> 3;
507		size = exynos_drm_gem_get_size(drm_dev,
508					       m_node->buf_info.handles[i],
509					       c_node->filp);
510		if (min_size > size) {
511			DRM_ERROR("invalid size for plane %d\n", i);
512			return -EINVAL;
513		}
514	}
515	return 0;
516}
517
518static int ipp_put_mem_node(struct drm_device *drm_dev,
519		struct drm_exynos_ipp_cmd_node *c_node,
520		struct drm_exynos_ipp_mem_node *m_node)
521{
522	int i;
523
524	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
525
526	if (!m_node) {
527		DRM_ERROR("invalid dequeue node.\n");
528		return -EFAULT;
529	}
530
531	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
532
533	/* put gem buffer */
534	for_each_ipp_planar(i) {
535		unsigned long handle = m_node->buf_info.handles[i];
536		if (handle)
537			exynos_drm_gem_put_dma_addr(drm_dev, handle,
538							c_node->filp);
539	}
540
541	list_del(&m_node->list);
542	kfree(m_node);
543
544	return 0;
545}
546
547static struct drm_exynos_ipp_mem_node
548		*ipp_get_mem_node(struct drm_device *drm_dev,
549		struct drm_exynos_ipp_cmd_node *c_node,
550		struct drm_exynos_ipp_queue_buf *qbuf)
551{
552	struct drm_exynos_ipp_mem_node *m_node;
553	struct drm_exynos_ipp_buf_info *buf_info;
554	int i;
555
556	m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
557	if (!m_node)
558		return ERR_PTR(-ENOMEM);
559
560	buf_info = &m_node->buf_info;
561
562	/* operations, buffer id */
563	m_node->ops_id = qbuf->ops_id;
564	m_node->prop_id = qbuf->prop_id;
565	m_node->buf_id = qbuf->buf_id;
566	INIT_LIST_HEAD(&m_node->list);
567
568	DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
569	DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
570
571	for_each_ipp_planar(i) {
572		DRM_DEBUG_KMS("i[%d]handle[0x%x]\n", i, qbuf->handle[i]);
573
574		/* get dma address by handle */
575		if (qbuf->handle[i]) {
576			dma_addr_t *addr;
577
578			addr = exynos_drm_gem_get_dma_addr(drm_dev,
579					qbuf->handle[i], c_node->filp);
580			if (IS_ERR(addr)) {
581				DRM_ERROR("failed to get addr.\n");
582				ipp_put_mem_node(drm_dev, c_node, m_node);
583				return ERR_PTR(-EFAULT);
584			}
585
586			buf_info->handles[i] = qbuf->handle[i];
587			buf_info->base[i] = *addr;
588			DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
589				      buf_info->base[i], buf_info->handles[i]);
590		}
591	}
592
593	mutex_lock(&c_node->mem_lock);
594	if (ipp_validate_mem_node(drm_dev, m_node, c_node)) {
595		ipp_put_mem_node(drm_dev, c_node, m_node);
596		mutex_unlock(&c_node->mem_lock);
597		return ERR_PTR(-EFAULT);
598	}
599	list_add_tail(&m_node->list, &c_node->mem_list[qbuf->ops_id]);
600	mutex_unlock(&c_node->mem_lock);
601
602	return m_node;
603}
604
605static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
606			       struct drm_exynos_ipp_cmd_node *c_node, int ops)
607{
608	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
609	struct list_head *head = &c_node->mem_list[ops];
610
611	mutex_lock(&c_node->mem_lock);
612
613	list_for_each_entry_safe(m_node, tm_node, head, list) {
614		int ret;
615
616		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
617		if (ret)
618			DRM_ERROR("failed to put m_node.\n");
619	}
620
621	mutex_unlock(&c_node->mem_lock);
622}
623
624static void ipp_free_event(struct drm_pending_event *event)
625{
626	kfree(event);
627}
628
629static int ipp_get_event(struct drm_device *drm_dev,
630		struct drm_exynos_ipp_cmd_node *c_node,
631		struct drm_exynos_ipp_queue_buf *qbuf)
632{
633	struct drm_exynos_ipp_send_event *e;
634	unsigned long flags;
635
636	DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
637
638	e = kzalloc(sizeof(*e), GFP_KERNEL);
639	if (!e) {
640		spin_lock_irqsave(&drm_dev->event_lock, flags);
641		c_node->filp->event_space += sizeof(e->event);
642		spin_unlock_irqrestore(&drm_dev->event_lock, flags);
643		return -ENOMEM;
644	}
645
646	/* make event */
647	e->event.base.type = DRM_EXYNOS_IPP_EVENT;
648	e->event.base.length = sizeof(e->event);
649	e->event.user_data = qbuf->user_data;
650	e->event.prop_id = qbuf->prop_id;
651	e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
652	e->base.event = &e->event.base;
653	e->base.file_priv = c_node->filp;
654	e->base.destroy = ipp_free_event;
655	mutex_lock(&c_node->event_lock);
656	list_add_tail(&e->base.link, &c_node->event_list);
657	mutex_unlock(&c_node->event_lock);
658
659	return 0;
660}
661
662static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
663		struct drm_exynos_ipp_queue_buf *qbuf)
664{
665	struct drm_exynos_ipp_send_event *e, *te;
666	int count = 0;
667
668	mutex_lock(&c_node->event_lock);
669	list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
670		DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
671
672		/*
673		 * qbuf == NULL condition means all event deletion.
674		 * stop operations want to delete all event list.
675		 * another case delete only same buf id.
676		 */
677		if (!qbuf) {
678			/* delete list */
679			list_del(&e->base.link);
680			kfree(e);
681		}
682
683		/* compare buffer id */
684		if (qbuf && (qbuf->buf_id ==
685		    e->event.buf_id[EXYNOS_DRM_OPS_DST])) {
686			/* delete list */
687			list_del(&e->base.link);
688			kfree(e);
689			goto out_unlock;
690		}
691	}
692
693out_unlock:
694	mutex_unlock(&c_node->event_lock);
695	return;
696}
697
698static void ipp_clean_cmd_node(struct ipp_context *ctx,
699				struct drm_exynos_ipp_cmd_node *c_node)
700{
701	int i;
702
703	/* cancel works */
704	cancel_work_sync(&c_node->start_work->work);
705	cancel_work_sync(&c_node->stop_work->work);
706	cancel_work_sync(&c_node->event_work->work);
707
708	/* put event */
709	ipp_put_event(c_node, NULL);
710
711	for_each_ipp_ops(i)
712		ipp_clean_mem_nodes(ctx->subdrv.drm_dev, c_node, i);
713
714	/* delete list */
715	list_del(&c_node->list);
716
717	ipp_remove_id(&ctx->prop_idr, &ctx->prop_lock,
718			c_node->property.prop_id);
719
720	/* destroy mutex */
721	mutex_destroy(&c_node->lock);
722	mutex_destroy(&c_node->mem_lock);
723	mutex_destroy(&c_node->event_lock);
724
725	/* free command node */
726	kfree(c_node->start_work);
727	kfree(c_node->stop_work);
728	kfree(c_node->event_work);
729	kfree(c_node);
730}
731
732static bool ipp_check_mem_list(struct drm_exynos_ipp_cmd_node *c_node)
733{
734	switch (c_node->property.cmd) {
735	case IPP_CMD_WB:
736		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
737	case IPP_CMD_OUTPUT:
738		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]);
739	case IPP_CMD_M2M:
740	default:
741		return !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_SRC]) &&
742		       !list_empty(&c_node->mem_list[EXYNOS_DRM_OPS_DST]);
743	}
744}
745
746static struct drm_exynos_ipp_mem_node
747		*ipp_find_mem_node(struct drm_exynos_ipp_cmd_node *c_node,
748		struct drm_exynos_ipp_queue_buf *qbuf)
749{
750	struct drm_exynos_ipp_mem_node *m_node;
751	struct list_head *head;
752	int count = 0;
753
754	DRM_DEBUG_KMS("buf_id[%d]\n", qbuf->buf_id);
755
756	/* source/destination memory list */
757	head = &c_node->mem_list[qbuf->ops_id];
758
759	/* find memory node from memory list */
760	list_for_each_entry(m_node, head, list) {
761		DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
762
763		/* compare buffer id */
764		if (m_node->buf_id == qbuf->buf_id)
765			return m_node;
766	}
767
768	return NULL;
769}
770
771static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv,
772		struct drm_exynos_ipp_cmd_node *c_node,
773		struct drm_exynos_ipp_mem_node *m_node)
774{
775	struct exynos_drm_ipp_ops *ops = NULL;
776	int ret = 0;
777
778	DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
779
780	if (!m_node) {
781		DRM_ERROR("invalid queue node.\n");
782		return -EFAULT;
783	}
784
785	DRM_DEBUG_KMS("ops_id[%d]\n", m_node->ops_id);
786
787	/* get operations callback */
788	ops = ippdrv->ops[m_node->ops_id];
789	if (!ops) {
790		DRM_ERROR("not support ops.\n");
791		return -EFAULT;
792	}
793
794	/* set address and enable irq */
795	if (ops->set_addr) {
796		ret = ops->set_addr(ippdrv->dev, &m_node->buf_info,
797			m_node->buf_id, IPP_BUF_ENQUEUE);
798		if (ret) {
799			DRM_ERROR("failed to set addr.\n");
800			return ret;
801		}
802	}
803
804	return ret;
805}
806
807static void ipp_handle_cmd_work(struct device *dev,
808		struct exynos_drm_ippdrv *ippdrv,
809		struct drm_exynos_ipp_cmd_work *cmd_work,
810		struct drm_exynos_ipp_cmd_node *c_node)
811{
812	struct ipp_context *ctx = get_ipp_context(dev);
813
814	cmd_work->ippdrv = ippdrv;
815	cmd_work->c_node = c_node;
816	queue_work(ctx->cmd_workq, &cmd_work->work);
817}
818
819static int ipp_queue_buf_with_run(struct device *dev,
820		struct drm_exynos_ipp_cmd_node *c_node,
821		struct drm_exynos_ipp_mem_node *m_node,
822		struct drm_exynos_ipp_queue_buf *qbuf)
823{
824	struct exynos_drm_ippdrv *ippdrv;
825	struct drm_exynos_ipp_property *property;
826	struct exynos_drm_ipp_ops *ops;
827	int ret;
828
829	ippdrv = ipp_find_drv_by_handle(qbuf->prop_id);
830	if (IS_ERR(ippdrv)) {
831		DRM_ERROR("failed to get ipp driver.\n");
832		return -EFAULT;
833	}
834
835	ops = ippdrv->ops[qbuf->ops_id];
836	if (!ops) {
837		DRM_ERROR("failed to get ops.\n");
838		return -EFAULT;
839	}
840
841	property = &c_node->property;
842
843	if (c_node->state != IPP_STATE_START) {
844		DRM_DEBUG_KMS("bypass for invalid state.\n");
845		return 0;
846	}
847
848	mutex_lock(&c_node->mem_lock);
849	if (!ipp_check_mem_list(c_node)) {
850		mutex_unlock(&c_node->mem_lock);
851		DRM_DEBUG_KMS("empty memory.\n");
852		return 0;
853	}
854
855	/*
856	 * If set destination buffer and enabled clock,
857	 * then m2m operations need start operations at queue_buf
858	 */
859	if (ipp_is_m2m_cmd(property->cmd)) {
860		struct drm_exynos_ipp_cmd_work *cmd_work = c_node->start_work;
861
862		cmd_work->ctrl = IPP_CTRL_PLAY;
863		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
864	} else {
865		ret = ipp_set_mem_node(ippdrv, c_node, m_node);
866		if (ret) {
867			mutex_unlock(&c_node->mem_lock);
868			DRM_ERROR("failed to set m node.\n");
869			return ret;
870		}
871	}
872	mutex_unlock(&c_node->mem_lock);
873
874	return 0;
875}
876
877static void ipp_clean_queue_buf(struct drm_device *drm_dev,
878		struct drm_exynos_ipp_cmd_node *c_node,
879		struct drm_exynos_ipp_queue_buf *qbuf)
880{
881	struct drm_exynos_ipp_mem_node *m_node, *tm_node;
882
883	/* delete list */
884	mutex_lock(&c_node->mem_lock);
885	list_for_each_entry_safe(m_node, tm_node,
886		&c_node->mem_list[qbuf->ops_id], list) {
887		if (m_node->buf_id == qbuf->buf_id &&
888		    m_node->ops_id == qbuf->ops_id)
889			ipp_put_mem_node(drm_dev, c_node, m_node);
890	}
891	mutex_unlock(&c_node->mem_lock);
892}
893
894int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
895		struct drm_file *file)
896{
897	struct drm_exynos_file_private *file_priv = file->driver_priv;
898	struct device *dev = file_priv->ipp_dev;
899	struct ipp_context *ctx = get_ipp_context(dev);
900	struct drm_exynos_ipp_queue_buf *qbuf = data;
901	struct drm_exynos_ipp_cmd_node *c_node;
902	struct drm_exynos_ipp_mem_node *m_node;
903	int ret;
904
905	if (!qbuf) {
906		DRM_ERROR("invalid buf parameter.\n");
907		return -EINVAL;
908	}
909
910	if (qbuf->ops_id >= EXYNOS_DRM_OPS_MAX) {
911		DRM_ERROR("invalid ops parameter.\n");
912		return -EINVAL;
913	}
914
915	DRM_DEBUG_KMS("prop_id[%d]ops_id[%s]buf_id[%d]buf_type[%d]\n",
916		qbuf->prop_id, qbuf->ops_id ? "dst" : "src",
917		qbuf->buf_id, qbuf->buf_type);
918
919	/* find command node */
920	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
921		qbuf->prop_id);
922	if (!c_node || c_node->filp != file) {
923		DRM_ERROR("failed to get command node.\n");
924		return -ENODEV;
925	}
926
927	/* buffer control */
928	switch (qbuf->buf_type) {
929	case IPP_BUF_ENQUEUE:
930		/* get memory node */
931		m_node = ipp_get_mem_node(drm_dev, c_node, qbuf);
932		if (IS_ERR(m_node)) {
933			DRM_ERROR("failed to get m_node.\n");
934			return PTR_ERR(m_node);
935		}
936
937		/*
938		 * first step get event for destination buffer.
939		 * and second step when M2M case run with destination buffer
940		 * if needed.
941		 */
942		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST) {
943			/* get event for destination buffer */
944			ret = ipp_get_event(drm_dev, c_node, qbuf);
945			if (ret) {
946				DRM_ERROR("failed to get event.\n");
947				goto err_clean_node;
948			}
949
950			/*
951			 * M2M case run play control for streaming feature.
952			 * other case set address and waiting.
953			 */
954			ret = ipp_queue_buf_with_run(dev, c_node, m_node, qbuf);
955			if (ret) {
956				DRM_ERROR("failed to run command.\n");
957				goto err_clean_node;
958			}
959		}
960		break;
961	case IPP_BUF_DEQUEUE:
962		mutex_lock(&c_node->lock);
963
964		/* put event for destination buffer */
965		if (qbuf->ops_id == EXYNOS_DRM_OPS_DST)
966			ipp_put_event(c_node, qbuf);
967
968		ipp_clean_queue_buf(drm_dev, c_node, qbuf);
969
970		mutex_unlock(&c_node->lock);
971		break;
972	default:
973		DRM_ERROR("invalid buffer control.\n");
974		return -EINVAL;
975	}
976
977	return 0;
978
979err_clean_node:
980	DRM_ERROR("clean memory nodes.\n");
981
982	ipp_clean_queue_buf(drm_dev, c_node, qbuf);
983	return ret;
984}
985
986static bool exynos_drm_ipp_check_valid(struct device *dev,
987		enum drm_exynos_ipp_ctrl ctrl, enum drm_exynos_ipp_state state)
988{
989	if (ctrl != IPP_CTRL_PLAY) {
990		if (pm_runtime_suspended(dev)) {
991			DRM_ERROR("pm:runtime_suspended.\n");
992			goto err_status;
993		}
994	}
995
996	switch (ctrl) {
997	case IPP_CTRL_PLAY:
998		if (state != IPP_STATE_IDLE)
999			goto err_status;
1000		break;
1001	case IPP_CTRL_STOP:
1002		if (state == IPP_STATE_STOP)
1003			goto err_status;
1004		break;
1005	case IPP_CTRL_PAUSE:
1006		if (state != IPP_STATE_START)
1007			goto err_status;
1008		break;
1009	case IPP_CTRL_RESUME:
1010		if (state != IPP_STATE_STOP)
1011			goto err_status;
1012		break;
1013	default:
1014		DRM_ERROR("invalid state.\n");
1015		goto err_status;
1016	}
1017
1018	return true;
1019
1020err_status:
1021	DRM_ERROR("invalid status:ctrl[%d]state[%d]\n", ctrl, state);
1022	return false;
1023}
1024
1025int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
1026		struct drm_file *file)
1027{
1028	struct drm_exynos_file_private *file_priv = file->driver_priv;
1029	struct exynos_drm_ippdrv *ippdrv = NULL;
1030	struct device *dev = file_priv->ipp_dev;
1031	struct ipp_context *ctx = get_ipp_context(dev);
1032	struct drm_exynos_ipp_cmd_ctrl *cmd_ctrl = data;
1033	struct drm_exynos_ipp_cmd_work *cmd_work;
1034	struct drm_exynos_ipp_cmd_node *c_node;
1035
1036	if (!ctx) {
1037		DRM_ERROR("invalid context.\n");
1038		return -EINVAL;
1039	}
1040
1041	if (!cmd_ctrl) {
1042		DRM_ERROR("invalid control parameter.\n");
1043		return -EINVAL;
1044	}
1045
1046	DRM_DEBUG_KMS("ctrl[%d]prop_id[%d]\n",
1047		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1048
1049	ippdrv = ipp_find_drv_by_handle(cmd_ctrl->prop_id);
1050	if (IS_ERR(ippdrv)) {
1051		DRM_ERROR("failed to get ipp driver.\n");
1052		return PTR_ERR(ippdrv);
1053	}
1054
1055	c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
1056		cmd_ctrl->prop_id);
1057	if (!c_node || c_node->filp != file) {
1058		DRM_ERROR("invalid command node list.\n");
1059		return -ENODEV;
1060	}
1061
1062	if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
1063	    c_node->state)) {
1064		DRM_ERROR("invalid state.\n");
1065		return -EINVAL;
1066	}
1067
1068	switch (cmd_ctrl->ctrl) {
1069	case IPP_CTRL_PLAY:
1070		if (pm_runtime_suspended(ippdrv->dev))
1071			pm_runtime_get_sync(ippdrv->dev);
1072
1073		c_node->state = IPP_STATE_START;
1074
1075		cmd_work = c_node->start_work;
1076		cmd_work->ctrl = cmd_ctrl->ctrl;
1077		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1078		break;
1079	case IPP_CTRL_STOP:
1080		cmd_work = c_node->stop_work;
1081		cmd_work->ctrl = cmd_ctrl->ctrl;
1082		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1083
1084		if (!wait_for_completion_timeout(&c_node->stop_complete,
1085		    msecs_to_jiffies(300))) {
1086			DRM_ERROR("timeout stop:prop_id[%d]\n",
1087				c_node->property.prop_id);
1088		}
1089
1090		c_node->state = IPP_STATE_STOP;
1091		ippdrv->dedicated = false;
1092		mutex_lock(&ippdrv->cmd_lock);
1093		ipp_clean_cmd_node(ctx, c_node);
1094
1095		if (list_empty(&ippdrv->cmd_list))
1096			pm_runtime_put_sync(ippdrv->dev);
1097		mutex_unlock(&ippdrv->cmd_lock);
1098		break;
1099	case IPP_CTRL_PAUSE:
1100		cmd_work = c_node->stop_work;
1101		cmd_work->ctrl = cmd_ctrl->ctrl;
1102		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1103
1104		if (!wait_for_completion_timeout(&c_node->stop_complete,
1105		    msecs_to_jiffies(200))) {
1106			DRM_ERROR("timeout stop:prop_id[%d]\n",
1107				c_node->property.prop_id);
1108		}
1109
1110		c_node->state = IPP_STATE_STOP;
1111		break;
1112	case IPP_CTRL_RESUME:
1113		c_node->state = IPP_STATE_START;
1114		cmd_work = c_node->start_work;
1115		cmd_work->ctrl = cmd_ctrl->ctrl;
1116		ipp_handle_cmd_work(dev, ippdrv, cmd_work, c_node);
1117		break;
1118	default:
1119		DRM_ERROR("could not support this state currently.\n");
1120		return -EINVAL;
1121	}
1122
1123	DRM_DEBUG_KMS("done ctrl[%d]prop_id[%d]\n",
1124		cmd_ctrl->ctrl, cmd_ctrl->prop_id);
1125
1126	return 0;
1127}
1128
1129int exynos_drm_ippnb_register(struct notifier_block *nb)
1130{
1131	return blocking_notifier_chain_register(
1132		&exynos_drm_ippnb_list, nb);
1133}
1134
1135int exynos_drm_ippnb_unregister(struct notifier_block *nb)
1136{
1137	return blocking_notifier_chain_unregister(
1138		&exynos_drm_ippnb_list, nb);
1139}
1140
1141int exynos_drm_ippnb_send_event(unsigned long val, void *v)
1142{
1143	return blocking_notifier_call_chain(
1144		&exynos_drm_ippnb_list, val, v);
1145}
1146
1147static int ipp_set_property(struct exynos_drm_ippdrv *ippdrv,
1148		struct drm_exynos_ipp_property *property)
1149{
1150	struct exynos_drm_ipp_ops *ops = NULL;
1151	bool swap = false;
1152	int ret, i;
1153
1154	if (!property) {
1155		DRM_ERROR("invalid property parameter.\n");
1156		return -EINVAL;
1157	}
1158
1159	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1160
1161	/* reset h/w block */
1162	if (ippdrv->reset &&
1163	    ippdrv->reset(ippdrv->dev)) {
1164		return -EINVAL;
1165	}
1166
1167	/* set source,destination operations */
1168	for_each_ipp_ops(i) {
1169		struct drm_exynos_ipp_config *config =
1170			&property->config[i];
1171
1172		ops = ippdrv->ops[i];
1173		if (!ops || !config) {
1174			DRM_ERROR("not support ops and config.\n");
1175			return -EINVAL;
1176		}
1177
1178		/* set format */
1179		if (ops->set_fmt) {
1180			ret = ops->set_fmt(ippdrv->dev, config->fmt);
1181			if (ret)
1182				return ret;
1183		}
1184
1185		/* set transform for rotation, flip */
1186		if (ops->set_transf) {
1187			ret = ops->set_transf(ippdrv->dev, config->degree,
1188				config->flip, &swap);
1189			if (ret)
1190				return ret;
1191		}
1192
1193		/* set size */
1194		if (ops->set_size) {
1195			ret = ops->set_size(ippdrv->dev, swap, &config->pos,
1196				&config->sz);
1197			if (ret)
1198				return ret;
1199		}
1200	}
1201
1202	return 0;
1203}
1204
1205static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv,
1206		struct drm_exynos_ipp_cmd_node *c_node)
1207{
1208	struct drm_exynos_ipp_mem_node *m_node;
1209	struct drm_exynos_ipp_property *property = &c_node->property;
1210	struct list_head *head;
1211	int ret, i;
1212
1213	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1214
1215	/* store command info in ippdrv */
1216	ippdrv->c_node = c_node;
1217
1218	mutex_lock(&c_node->mem_lock);
1219	if (!ipp_check_mem_list(c_node)) {
1220		DRM_DEBUG_KMS("empty memory.\n");
1221		ret = -ENOMEM;
1222		goto err_unlock;
1223	}
1224
1225	/* set current property in ippdrv */
1226	ret = ipp_set_property(ippdrv, property);
1227	if (ret) {
1228		DRM_ERROR("failed to set property.\n");
1229		ippdrv->c_node = NULL;
1230		goto err_unlock;
1231	}
1232
1233	/* check command */
1234	switch (property->cmd) {
1235	case IPP_CMD_M2M:
1236		for_each_ipp_ops(i) {
1237			/* source/destination memory list */
1238			head = &c_node->mem_list[i];
1239
1240			m_node = list_first_entry(head,
1241				struct drm_exynos_ipp_mem_node, list);
1242
1243			DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
1244
1245			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1246			if (ret) {
1247				DRM_ERROR("failed to set m node.\n");
1248				goto err_unlock;
1249			}
1250		}
1251		break;
1252	case IPP_CMD_WB:
1253		/* destination memory list */
1254		head = &c_node->mem_list[EXYNOS_DRM_OPS_DST];
1255
1256		list_for_each_entry(m_node, head, list) {
1257			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1258			if (ret) {
1259				DRM_ERROR("failed to set m node.\n");
1260				goto err_unlock;
1261			}
1262		}
1263		break;
1264	case IPP_CMD_OUTPUT:
1265		/* source memory list */
1266		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1267
1268		list_for_each_entry(m_node, head, list) {
1269			ret = ipp_set_mem_node(ippdrv, c_node, m_node);
1270			if (ret) {
1271				DRM_ERROR("failed to set m node.\n");
1272				goto err_unlock;
1273			}
1274		}
1275		break;
1276	default:
1277		DRM_ERROR("invalid operations.\n");
1278		ret = -EINVAL;
1279		goto err_unlock;
1280	}
1281	mutex_unlock(&c_node->mem_lock);
1282
1283	DRM_DEBUG_KMS("cmd[%d]\n", property->cmd);
1284
1285	/* start operations */
1286	if (ippdrv->start) {
1287		ret = ippdrv->start(ippdrv->dev, property->cmd);
1288		if (ret) {
1289			DRM_ERROR("failed to start ops.\n");
1290			ippdrv->c_node = NULL;
1291			return ret;
1292		}
1293	}
1294
1295	return 0;
1296
1297err_unlock:
1298	mutex_unlock(&c_node->mem_lock);
1299	ippdrv->c_node = NULL;
1300	return ret;
1301}
1302
1303static int ipp_stop_property(struct drm_device *drm_dev,
1304		struct exynos_drm_ippdrv *ippdrv,
1305		struct drm_exynos_ipp_cmd_node *c_node)
1306{
1307	struct drm_exynos_ipp_property *property = &c_node->property;
1308	int i;
1309
1310	DRM_DEBUG_KMS("prop_id[%d]\n", property->prop_id);
1311
1312	/* stop operations */
1313	if (ippdrv->stop)
1314		ippdrv->stop(ippdrv->dev, property->cmd);
1315
1316	/* check command */
1317	switch (property->cmd) {
1318	case IPP_CMD_M2M:
1319		for_each_ipp_ops(i)
1320			ipp_clean_mem_nodes(drm_dev, c_node, i);
1321		break;
1322	case IPP_CMD_WB:
1323		ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_DST);
1324		break;
1325	case IPP_CMD_OUTPUT:
1326		ipp_clean_mem_nodes(drm_dev, c_node, EXYNOS_DRM_OPS_SRC);
1327		break;
1328	default:
1329		DRM_ERROR("invalid operations.\n");
1330		return -EINVAL;
1331	}
1332
1333	return 0;
1334}
1335
1336void ipp_sched_cmd(struct work_struct *work)
1337{
1338	struct drm_exynos_ipp_cmd_work *cmd_work =
1339		container_of(work, struct drm_exynos_ipp_cmd_work, work);
1340	struct exynos_drm_ippdrv *ippdrv;
1341	struct drm_exynos_ipp_cmd_node *c_node;
1342	struct drm_exynos_ipp_property *property;
1343	int ret;
1344
1345	ippdrv = cmd_work->ippdrv;
1346	if (!ippdrv) {
1347		DRM_ERROR("invalid ippdrv list.\n");
1348		return;
1349	}
1350
1351	c_node = cmd_work->c_node;
1352	if (!c_node) {
1353		DRM_ERROR("invalid command node list.\n");
1354		return;
1355	}
1356
1357	mutex_lock(&c_node->lock);
1358
1359	property = &c_node->property;
1360
1361	switch (cmd_work->ctrl) {
1362	case IPP_CTRL_PLAY:
1363	case IPP_CTRL_RESUME:
1364		ret = ipp_start_property(ippdrv, c_node);
1365		if (ret) {
1366			DRM_ERROR("failed to start property:prop_id[%d]\n",
1367				c_node->property.prop_id);
1368			goto err_unlock;
1369		}
1370
1371		/*
1372		 * M2M case supports wait_completion of transfer.
1373		 * because M2M case supports single unit operation
1374		 * with multiple queue.
1375		 * M2M need to wait completion of data transfer.
1376		 */
1377		if (ipp_is_m2m_cmd(property->cmd)) {
1378			if (!wait_for_completion_timeout
1379			    (&c_node->start_complete, msecs_to_jiffies(200))) {
1380				DRM_ERROR("timeout event:prop_id[%d]\n",
1381					c_node->property.prop_id);
1382				goto err_unlock;
1383			}
1384		}
1385		break;
1386	case IPP_CTRL_STOP:
1387	case IPP_CTRL_PAUSE:
1388		ret = ipp_stop_property(ippdrv->drm_dev, ippdrv,
1389			c_node);
1390		if (ret) {
1391			DRM_ERROR("failed to stop property.\n");
1392			goto err_unlock;
1393		}
1394
1395		complete(&c_node->stop_complete);
1396		break;
1397	default:
1398		DRM_ERROR("unknown control type\n");
1399		break;
1400	}
1401
1402	DRM_DEBUG_KMS("ctrl[%d] done.\n", cmd_work->ctrl);
1403
1404err_unlock:
1405	mutex_unlock(&c_node->lock);
1406}
1407
1408static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
1409		struct drm_exynos_ipp_cmd_node *c_node, int *buf_id)
1410{
1411	struct drm_device *drm_dev = ippdrv->drm_dev;
1412	struct drm_exynos_ipp_property *property = &c_node->property;
1413	struct drm_exynos_ipp_mem_node *m_node;
1414	struct drm_exynos_ipp_queue_buf qbuf;
1415	struct drm_exynos_ipp_send_event *e;
1416	struct list_head *head;
1417	struct timeval now;
1418	unsigned long flags;
1419	u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
1420	int ret, i;
1421
1422	for_each_ipp_ops(i)
1423		DRM_DEBUG_KMS("%s buf_id[%d]\n", i ? "dst" : "src", buf_id[i]);
1424
1425	if (!drm_dev) {
1426		DRM_ERROR("failed to get drm_dev.\n");
1427		return -EINVAL;
1428	}
1429
1430	if (!property) {
1431		DRM_ERROR("failed to get property.\n");
1432		return -EINVAL;
1433	}
1434
1435	mutex_lock(&c_node->event_lock);
1436	if (list_empty(&c_node->event_list)) {
1437		DRM_DEBUG_KMS("event list is empty.\n");
1438		ret = 0;
1439		goto err_event_unlock;
1440	}
1441
1442	mutex_lock(&c_node->mem_lock);
1443	if (!ipp_check_mem_list(c_node)) {
1444		DRM_DEBUG_KMS("empty memory.\n");
1445		ret = 0;
1446		goto err_mem_unlock;
1447	}
1448
1449	/* check command */
1450	switch (property->cmd) {
1451	case IPP_CMD_M2M:
1452		for_each_ipp_ops(i) {
1453			/* source/destination memory list */
1454			head = &c_node->mem_list[i];
1455
1456			m_node = list_first_entry(head,
1457				struct drm_exynos_ipp_mem_node, list);
1458
1459			tbuf_id[i] = m_node->buf_id;
1460			DRM_DEBUG_KMS("%s buf_id[%d]\n",
1461				i ? "dst" : "src", tbuf_id[i]);
1462
1463			ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1464			if (ret)
1465				DRM_ERROR("failed to put m_node.\n");
1466		}
1467		break;
1468	case IPP_CMD_WB:
1469		/* clear buf for finding */
1470		memset(&qbuf, 0x0, sizeof(qbuf));
1471		qbuf.ops_id = EXYNOS_DRM_OPS_DST;
1472		qbuf.buf_id = buf_id[EXYNOS_DRM_OPS_DST];
1473
1474		/* get memory node entry */
1475		m_node = ipp_find_mem_node(c_node, &qbuf);
1476		if (!m_node) {
1477			DRM_ERROR("empty memory node.\n");
1478			ret = -ENOMEM;
1479			goto err_mem_unlock;
1480		}
1481
1482		tbuf_id[EXYNOS_DRM_OPS_DST] = m_node->buf_id;
1483
1484		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1485		if (ret)
1486			DRM_ERROR("failed to put m_node.\n");
1487		break;
1488	case IPP_CMD_OUTPUT:
1489		/* source memory list */
1490		head = &c_node->mem_list[EXYNOS_DRM_OPS_SRC];
1491
1492		m_node = list_first_entry(head,
1493			struct drm_exynos_ipp_mem_node, list);
1494
1495		tbuf_id[EXYNOS_DRM_OPS_SRC] = m_node->buf_id;
1496
1497		ret = ipp_put_mem_node(drm_dev, c_node, m_node);
1498		if (ret)
1499			DRM_ERROR("failed to put m_node.\n");
1500		break;
1501	default:
1502		DRM_ERROR("invalid operations.\n");
1503		ret = -EINVAL;
1504		goto err_mem_unlock;
1505	}
1506	mutex_unlock(&c_node->mem_lock);
1507
1508	if (tbuf_id[EXYNOS_DRM_OPS_DST] != buf_id[EXYNOS_DRM_OPS_DST])
1509		DRM_ERROR("failed to match buf_id[%d %d]prop_id[%d]\n",
1510			tbuf_id[1], buf_id[1], property->prop_id);
1511
1512	/*
1513	 * command node have event list of destination buffer
1514	 * If destination buffer enqueue to mem list,
1515	 * then we make event and link to event list tail.
1516	 * so, we get first event for first enqueued buffer.
1517	 */
1518	e = list_first_entry(&c_node->event_list,
1519		struct drm_exynos_ipp_send_event, base.link);
1520
1521	do_gettimeofday(&now);
1522	DRM_DEBUG_KMS("tv_sec[%ld]tv_usec[%ld]\n", now.tv_sec, now.tv_usec);
1523	e->event.tv_sec = now.tv_sec;
1524	e->event.tv_usec = now.tv_usec;
1525	e->event.prop_id = property->prop_id;
1526
1527	/* set buffer id about source destination */
1528	for_each_ipp_ops(i)
1529		e->event.buf_id[i] = tbuf_id[i];
1530
1531	spin_lock_irqsave(&drm_dev->event_lock, flags);
1532	list_move_tail(&e->base.link, &e->base.file_priv->event_list);
1533	wake_up_interruptible(&e->base.file_priv->event_wait);
1534	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
1535	mutex_unlock(&c_node->event_lock);
1536
1537	DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
1538		property->cmd, property->prop_id, tbuf_id[EXYNOS_DRM_OPS_DST]);
1539
1540	return 0;
1541
1542err_mem_unlock:
1543	mutex_unlock(&c_node->mem_lock);
1544err_event_unlock:
1545	mutex_unlock(&c_node->event_lock);
1546	return ret;
1547}
1548
1549void ipp_sched_event(struct work_struct *work)
1550{
1551	struct drm_exynos_ipp_event_work *event_work =
1552		container_of(work, struct drm_exynos_ipp_event_work, work);
1553	struct exynos_drm_ippdrv *ippdrv;
1554	struct drm_exynos_ipp_cmd_node *c_node;
1555	int ret;
1556
1557	if (!event_work) {
1558		DRM_ERROR("failed to get event_work.\n");
1559		return;
1560	}
1561
1562	DRM_DEBUG_KMS("buf_id[%d]\n", event_work->buf_id[EXYNOS_DRM_OPS_DST]);
1563
1564	ippdrv = event_work->ippdrv;
1565	if (!ippdrv) {
1566		DRM_ERROR("failed to get ipp driver.\n");
1567		return;
1568	}
1569
1570	c_node = ippdrv->c_node;
1571	if (!c_node) {
1572		DRM_ERROR("failed to get command node.\n");
1573		return;
1574	}
1575
1576	/*
1577	 * IPP supports command thread, event thread synchronization.
1578	 * If IPP close immediately from user land, then IPP make
1579	 * synchronization with command thread, so make complete event.
1580	 * or going out operations.
1581	 */
1582	if (c_node->state != IPP_STATE_START) {
1583		DRM_DEBUG_KMS("bypass state[%d]prop_id[%d]\n",
1584			c_node->state, c_node->property.prop_id);
1585		goto err_completion;
1586	}
1587
1588	ret = ipp_send_event(ippdrv, c_node, event_work->buf_id);
1589	if (ret) {
1590		DRM_ERROR("failed to send event.\n");
1591		goto err_completion;
1592	}
1593
1594err_completion:
1595	if (ipp_is_m2m_cmd(c_node->property.cmd))
1596		complete(&c_node->start_complete);
1597}
1598
1599static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
1600{
1601	struct ipp_context *ctx = get_ipp_context(dev);
1602	struct exynos_drm_ippdrv *ippdrv;
1603	int ret, count = 0;
1604
1605	/* get ipp driver entry */
1606	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1607		ippdrv->drm_dev = drm_dev;
1608
1609		ret = ipp_create_id(&ctx->ipp_idr, &ctx->ipp_lock, ippdrv);
1610		if (ret < 0) {
1611			DRM_ERROR("failed to create id.\n");
1612			goto err;
1613		}
1614		ippdrv->prop_list.ipp_id = ret;
1615
1616		DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
1617			count++, (int)ippdrv, ret);
1618
1619		/* store parent device for node */
1620		ippdrv->parent_dev = dev;
1621
1622		/* store event work queue and handler */
1623		ippdrv->event_workq = ctx->event_workq;
1624		ippdrv->sched_event = ipp_sched_event;
1625		INIT_LIST_HEAD(&ippdrv->cmd_list);
1626		mutex_init(&ippdrv->cmd_lock);
1627
1628		if (is_drm_iommu_supported(drm_dev)) {
1629			ret = drm_iommu_attach_device(drm_dev, ippdrv->dev);
1630			if (ret) {
1631				DRM_ERROR("failed to activate iommu\n");
1632				goto err;
1633			}
1634		}
1635	}
1636
1637	return 0;
1638
1639err:
1640	/* get ipp driver entry */
1641	list_for_each_entry_continue_reverse(ippdrv, &exynos_drm_ippdrv_list,
1642						drv_list) {
1643		if (is_drm_iommu_supported(drm_dev))
1644			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1645
1646		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1647				ippdrv->prop_list.ipp_id);
1648	}
1649
1650	return ret;
1651}
1652
1653static void ipp_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
1654{
1655	struct exynos_drm_ippdrv *ippdrv, *t;
1656	struct ipp_context *ctx = get_ipp_context(dev);
1657
1658	/* get ipp driver entry */
1659	list_for_each_entry_safe(ippdrv, t, &exynos_drm_ippdrv_list, drv_list) {
1660		if (is_drm_iommu_supported(drm_dev))
1661			drm_iommu_detach_device(drm_dev, ippdrv->dev);
1662
1663		ipp_remove_id(&ctx->ipp_idr, &ctx->ipp_lock,
1664				ippdrv->prop_list.ipp_id);
1665
1666		ippdrv->drm_dev = NULL;
1667		exynos_drm_ippdrv_unregister(ippdrv);
1668	}
1669}
1670
1671static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
1672		struct drm_file *file)
1673{
1674	struct drm_exynos_file_private *file_priv = file->driver_priv;
1675
1676	file_priv->ipp_dev = dev;
1677
1678	DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
1679
1680	return 0;
1681}
1682
1683static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev,
1684		struct drm_file *file)
1685{
1686	struct exynos_drm_ippdrv *ippdrv = NULL;
1687	struct ipp_context *ctx = get_ipp_context(dev);
1688	struct drm_exynos_ipp_cmd_node *c_node, *tc_node;
1689	int count = 0;
1690
1691	list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
1692		mutex_lock(&ippdrv->cmd_lock);
1693		list_for_each_entry_safe(c_node, tc_node,
1694			&ippdrv->cmd_list, list) {
1695			DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
1696				count++, (int)ippdrv);
1697
1698			if (c_node->filp == file) {
1699				/*
1700				 * userland goto unnormal state. process killed.
1701				 * and close the file.
1702				 * so, IPP didn't called stop cmd ctrl.
1703				 * so, we are make stop operation in this state.
1704				 */
1705				if (c_node->state == IPP_STATE_START) {
1706					ipp_stop_property(drm_dev, ippdrv,
1707						c_node);
1708					c_node->state = IPP_STATE_STOP;
1709				}
1710
1711				ippdrv->dedicated = false;
1712				ipp_clean_cmd_node(ctx, c_node);
1713				if (list_empty(&ippdrv->cmd_list))
1714					pm_runtime_put_sync(ippdrv->dev);
1715			}
1716		}
1717		mutex_unlock(&ippdrv->cmd_lock);
1718	}
1719
1720	return;
1721}
1722
1723static int ipp_probe(struct platform_device *pdev)
1724{
1725	struct device *dev = &pdev->dev;
1726	struct ipp_context *ctx;
1727	struct exynos_drm_subdrv *subdrv;
1728	int ret;
1729
1730	ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1731	if (!ctx)
1732		return -ENOMEM;
1733
1734	mutex_init(&ctx->ipp_lock);
1735	mutex_init(&ctx->prop_lock);
1736
1737	idr_init(&ctx->ipp_idr);
1738	idr_init(&ctx->prop_idr);
1739
1740	/*
1741	 * create single thread for ipp event
1742	 * IPP supports event thread for IPP drivers.
1743	 * IPP driver send event_work to this thread.
1744	 * and IPP event thread send event to user process.
1745	 */
1746	ctx->event_workq = create_singlethread_workqueue("ipp_event");
1747	if (!ctx->event_workq) {
1748		dev_err(dev, "failed to create event workqueue\n");
1749		return -EINVAL;
1750	}
1751
1752	/*
1753	 * create single thread for ipp command
1754	 * IPP supports command thread for user process.
1755	 * user process make command node using set property ioctl.
1756	 * and make start_work and send this work to command thread.
1757	 * and then this command thread start property.
1758	 */
1759	ctx->cmd_workq = create_singlethread_workqueue("ipp_cmd");
1760	if (!ctx->cmd_workq) {
1761		dev_err(dev, "failed to create cmd workqueue\n");
1762		ret = -EINVAL;
1763		goto err_event_workq;
1764	}
1765
1766	/* set sub driver informations */
1767	subdrv = &ctx->subdrv;
1768	subdrv->dev = dev;
1769	subdrv->probe = ipp_subdrv_probe;
1770	subdrv->remove = ipp_subdrv_remove;
1771	subdrv->open = ipp_subdrv_open;
1772	subdrv->close = ipp_subdrv_close;
1773
1774	platform_set_drvdata(pdev, ctx);
1775
1776	ret = exynos_drm_subdrv_register(subdrv);
1777	if (ret < 0) {
1778		DRM_ERROR("failed to register drm ipp device.\n");
1779		goto err_cmd_workq;
1780	}
1781
1782	dev_info(dev, "drm ipp registered successfully.\n");
1783
1784	return 0;
1785
1786err_cmd_workq:
1787	destroy_workqueue(ctx->cmd_workq);
1788err_event_workq:
1789	destroy_workqueue(ctx->event_workq);
1790	return ret;
1791}
1792
1793static int ipp_remove(struct platform_device *pdev)
1794{
1795	struct ipp_context *ctx = platform_get_drvdata(pdev);
1796
1797	/* unregister sub driver */
1798	exynos_drm_subdrv_unregister(&ctx->subdrv);
1799
1800	/* remove,destroy ipp idr */
1801	idr_destroy(&ctx->ipp_idr);
1802	idr_destroy(&ctx->prop_idr);
1803
1804	mutex_destroy(&ctx->ipp_lock);
1805	mutex_destroy(&ctx->prop_lock);
1806
1807	/* destroy command, event work queue */
1808	destroy_workqueue(ctx->cmd_workq);
1809	destroy_workqueue(ctx->event_workq);
1810
1811	return 0;
1812}
1813
1814struct platform_driver ipp_driver = {
1815	.probe		= ipp_probe,
1816	.remove		= ipp_remove,
1817	.driver		= {
1818		.name	= "exynos-drm-ipp",
1819		.owner	= THIS_MODULE,
1820	},
1821};
1822
1823