1/* exynos_drm_dmabuf.c
2 *
3 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
4 * Author: Inki Dae <inki.dae@samsung.com>
5 *
6 * This program is free software; you can redistribute  it and/or modify it
7 * under  the terms of  the GNU General  Public License as published by the
8 * Free Software Foundation;  either version 2 of the  License, or (at your
9 * option) any later version.
10 */
11
12#include <drm/drmP.h>
13#include <drm/exynos_drm.h>
14#include "exynos_drm_dmabuf.h"
15#include "exynos_drm_drv.h"
16#include "exynos_drm_gem.h"
17
18#include <linux/dma-buf.h>
19
20struct exynos_drm_dmabuf_attachment {
21	struct sg_table sgt;
22	enum dma_data_direction dir;
23	bool is_mapped;
24};
25
26static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
27{
28	return to_exynos_gem_obj(buf->priv);
29}
30
31static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
32					struct device *dev,
33					struct dma_buf_attachment *attach)
34{
35	struct exynos_drm_dmabuf_attachment *exynos_attach;
36
37	exynos_attach = kzalloc(sizeof(*exynos_attach), GFP_KERNEL);
38	if (!exynos_attach)
39		return -ENOMEM;
40
41	exynos_attach->dir = DMA_NONE;
42	attach->priv = exynos_attach;
43
44	return 0;
45}
46
47static void exynos_gem_detach_dma_buf(struct dma_buf *dmabuf,
48					struct dma_buf_attachment *attach)
49{
50	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
51	struct sg_table *sgt;
52
53	if (!exynos_attach)
54		return;
55
56	sgt = &exynos_attach->sgt;
57
58	if (exynos_attach->dir != DMA_NONE)
59		dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
60				exynos_attach->dir);
61
62	sg_free_table(sgt);
63	kfree(exynos_attach);
64	attach->priv = NULL;
65}
66
67static struct sg_table *
68		exynos_gem_map_dma_buf(struct dma_buf_attachment *attach,
69					enum dma_data_direction dir)
70{
71	struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
72	struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
73	struct drm_device *dev = gem_obj->base.dev;
74	struct exynos_drm_gem_buf *buf;
75	struct scatterlist *rd, *wr;
76	struct sg_table *sgt = NULL;
77	unsigned int i;
78	int nents, ret;
79
80	/* just return current sgt if already requested. */
81	if (exynos_attach->dir == dir && exynos_attach->is_mapped)
82		return &exynos_attach->sgt;
83
84	buf = gem_obj->buffer;
85	if (!buf) {
86		DRM_ERROR("buffer is null.\n");
87		return ERR_PTR(-ENOMEM);
88	}
89
90	sgt = &exynos_attach->sgt;
91
92	ret = sg_alloc_table(sgt, buf->sgt->orig_nents, GFP_KERNEL);
93	if (ret) {
94		DRM_ERROR("failed to alloc sgt.\n");
95		return ERR_PTR(-ENOMEM);
96	}
97
98	mutex_lock(&dev->struct_mutex);
99
100	rd = buf->sgt->sgl;
101	wr = sgt->sgl;
102	for (i = 0; i < sgt->orig_nents; ++i) {
103		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
104		rd = sg_next(rd);
105		wr = sg_next(wr);
106	}
107
108	if (dir != DMA_NONE) {
109		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
110		if (!nents) {
111			DRM_ERROR("failed to map sgl with iommu.\n");
112			sg_free_table(sgt);
113			sgt = ERR_PTR(-EIO);
114			goto err_unlock;
115		}
116	}
117
118	exynos_attach->is_mapped = true;
119	exynos_attach->dir = dir;
120	attach->priv = exynos_attach;
121
122	DRM_DEBUG_PRIME("buffer size = 0x%lx\n", buf->size);
123
124err_unlock:
125	mutex_unlock(&dev->struct_mutex);
126	return sgt;
127}
128
129static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
130						struct sg_table *sgt,
131						enum dma_data_direction dir)
132{
133	/* Nothing to do. */
134}
135
136static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
137						unsigned long page_num)
138{
139	/* TODO */
140
141	return NULL;
142}
143
144static void exynos_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
145						unsigned long page_num,
146						void *addr)
147{
148	/* TODO */
149}
150
151static void *exynos_gem_dmabuf_kmap(struct dma_buf *dma_buf,
152					unsigned long page_num)
153{
154	/* TODO */
155
156	return NULL;
157}
158
159static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf,
160					unsigned long page_num, void *addr)
161{
162	/* TODO */
163}
164
165static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf,
166	struct vm_area_struct *vma)
167{
168	return -ENOTTY;
169}
170
171static struct dma_buf_ops exynos_dmabuf_ops = {
172	.attach			= exynos_gem_attach_dma_buf,
173	.detach			= exynos_gem_detach_dma_buf,
174	.map_dma_buf		= exynos_gem_map_dma_buf,
175	.unmap_dma_buf		= exynos_gem_unmap_dma_buf,
176	.kmap			= exynos_gem_dmabuf_kmap,
177	.kmap_atomic		= exynos_gem_dmabuf_kmap_atomic,
178	.kunmap			= exynos_gem_dmabuf_kunmap,
179	.kunmap_atomic		= exynos_gem_dmabuf_kunmap_atomic,
180	.mmap			= exynos_gem_dmabuf_mmap,
181	.release		= drm_gem_dmabuf_release,
182};
183
184struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
185				struct drm_gem_object *obj, int flags)
186{
187	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
188	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
189
190	exp_info.ops = &exynos_dmabuf_ops;
191	exp_info.size = exynos_gem_obj->base.size;
192	exp_info.flags = flags;
193	exp_info.priv = obj;
194
195	return dma_buf_export(&exp_info);
196}
197
198struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
199				struct dma_buf *dma_buf)
200{
201	struct dma_buf_attachment *attach;
202	struct sg_table *sgt;
203	struct scatterlist *sgl;
204	struct exynos_drm_gem_obj *exynos_gem_obj;
205	struct exynos_drm_gem_buf *buffer;
206	int ret;
207
208	/* is this one of own objects? */
209	if (dma_buf->ops == &exynos_dmabuf_ops) {
210		struct drm_gem_object *obj;
211
212		obj = dma_buf->priv;
213
214		/* is it from our device? */
215		if (obj->dev == drm_dev) {
216			/*
217			 * Importing dmabuf exported from out own gem increases
218			 * refcount on gem itself instead of f_count of dmabuf.
219			 */
220			drm_gem_object_reference(obj);
221			return obj;
222		}
223	}
224
225	attach = dma_buf_attach(dma_buf, drm_dev->dev);
226	if (IS_ERR(attach))
227		return ERR_PTR(-EINVAL);
228
229	get_dma_buf(dma_buf);
230
231	sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
232	if (IS_ERR(sgt)) {
233		ret = PTR_ERR(sgt);
234		goto err_buf_detach;
235	}
236
237	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
238	if (!buffer) {
239		ret = -ENOMEM;
240		goto err_unmap_attach;
241	}
242
243	exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
244	if (!exynos_gem_obj) {
245		ret = -ENOMEM;
246		goto err_free_buffer;
247	}
248
249	sgl = sgt->sgl;
250
251	buffer->size = dma_buf->size;
252	buffer->dma_addr = sg_dma_address(sgl);
253
254	if (sgt->nents == 1) {
255		/* always physically continuous memory if sgt->nents is 1. */
256		exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
257	} else {
258		/*
259		 * this case could be CONTIG or NONCONTIG type but for now
260		 * sets NONCONTIG.
261		 * TODO. we have to find a way that exporter can notify
262		 * the type of its own buffer to importer.
263		 */
264		exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
265	}
266
267	exynos_gem_obj->buffer = buffer;
268	buffer->sgt = sgt;
269	exynos_gem_obj->base.import_attach = attach;
270
271	DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
272								buffer->size);
273
274	return &exynos_gem_obj->base;
275
276err_free_buffer:
277	kfree(buffer);
278	buffer = NULL;
279err_unmap_attach:
280	dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
281err_buf_detach:
282	dma_buf_detach(dma_buf, attach);
283	dma_buf_put(dma_buf);
284
285	return ERR_PTR(ret);
286}
287