1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 *    Christian K��nig <deathsimple@vodafone.de>
29 */
30
31#include <drm/drmP.h>
32#include "amdgpu.h"
33#include "amdgpu_trace.h"
34
35static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
36				 struct amdgpu_bo_list **result,
37				 int *id)
38{
39	int r;
40
41	*result = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
42	if (!*result)
43		return -ENOMEM;
44
45	mutex_lock(&fpriv->bo_list_lock);
46	r = idr_alloc(&fpriv->bo_list_handles, *result,
47		      1, 0, GFP_KERNEL);
48	if (r < 0) {
49		mutex_unlock(&fpriv->bo_list_lock);
50		kfree(*result);
51		return r;
52	}
53	*id = r;
54
55	mutex_init(&(*result)->lock);
56	(*result)->num_entries = 0;
57	(*result)->array = NULL;
58
59	mutex_lock(&(*result)->lock);
60	mutex_unlock(&fpriv->bo_list_lock);
61
62	return 0;
63}
64
65static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
66{
67	struct amdgpu_bo_list *list;
68
69	mutex_lock(&fpriv->bo_list_lock);
70	list = idr_find(&fpriv->bo_list_handles, id);
71	if (list) {
72		mutex_lock(&list->lock);
73		idr_remove(&fpriv->bo_list_handles, id);
74		mutex_unlock(&list->lock);
75		amdgpu_bo_list_free(list);
76	}
77	mutex_unlock(&fpriv->bo_list_lock);
78}
79
80static int amdgpu_bo_list_set(struct amdgpu_device *adev,
81				     struct drm_file *filp,
82				     struct amdgpu_bo_list *list,
83				     struct drm_amdgpu_bo_list_entry *info,
84				     unsigned num_entries)
85{
86	struct amdgpu_bo_list_entry *array;
87	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
88	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
89	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
90
91	bool has_userptr = false;
92	unsigned i;
93
94	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
95	if (!array)
96		return -ENOMEM;
97	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
98
99	for (i = 0; i < num_entries; ++i) {
100		struct amdgpu_bo_list_entry *entry = &array[i];
101		struct drm_gem_object *gobj;
102
103		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
104		if (!gobj)
105			goto error_free;
106
107		entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
108		drm_gem_object_unreference_unlocked(gobj);
109		entry->priority = info[i].bo_priority;
110		entry->prefered_domains = entry->robj->initial_domain;
111		entry->allowed_domains = entry->prefered_domains;
112		if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
113			entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
114		if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
115			has_userptr = true;
116			entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
117			entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
118		}
119		entry->tv.bo = &entry->robj->tbo;
120		entry->tv.shared = true;
121
122		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
123			gds_obj = entry->robj;
124		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
125			gws_obj = entry->robj;
126		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
127			oa_obj = entry->robj;
128
129		trace_amdgpu_bo_list_set(list, entry->robj);
130	}
131
132	for (i = 0; i < list->num_entries; ++i)
133		amdgpu_bo_unref(&list->array[i].robj);
134
135	drm_free_large(list->array);
136
137	list->gds_obj = gds_obj;
138	list->gws_obj = gws_obj;
139	list->oa_obj = oa_obj;
140	list->has_userptr = has_userptr;
141	list->array = array;
142	list->num_entries = num_entries;
143
144	return 0;
145
146error_free:
147	drm_free_large(array);
148	return -ENOENT;
149}
150
151struct amdgpu_bo_list *
152amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
153{
154	struct amdgpu_bo_list *result;
155
156	mutex_lock(&fpriv->bo_list_lock);
157	result = idr_find(&fpriv->bo_list_handles, id);
158	if (result)
159		mutex_lock(&result->lock);
160	mutex_unlock(&fpriv->bo_list_lock);
161	return result;
162}
163
164void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
165{
166	mutex_unlock(&list->lock);
167}
168
169void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
170{
171	unsigned i;
172
173	for (i = 0; i < list->num_entries; ++i)
174		amdgpu_bo_unref(&list->array[i].robj);
175
176	mutex_destroy(&list->lock);
177	drm_free_large(list->array);
178	kfree(list);
179}
180
181int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
182				struct drm_file *filp)
183{
184	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
185
186	struct amdgpu_device *adev = dev->dev_private;
187	struct amdgpu_fpriv *fpriv = filp->driver_priv;
188	union drm_amdgpu_bo_list *args = data;
189	uint32_t handle = args->in.list_handle;
190	const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
191
192	struct drm_amdgpu_bo_list_entry *info;
193	struct amdgpu_bo_list *list;
194
195	int r;
196
197	info = drm_malloc_ab(args->in.bo_number,
198			     sizeof(struct drm_amdgpu_bo_list_entry));
199	if (!info)
200		return -ENOMEM;
201
202	/* copy the handle array from userspace to a kernel buffer */
203	r = -EFAULT;
204	if (likely(info_size == args->in.bo_info_size)) {
205		unsigned long bytes = args->in.bo_number *
206			args->in.bo_info_size;
207
208		if (copy_from_user(info, uptr, bytes))
209			goto error_free;
210
211	} else {
212		unsigned long bytes = min(args->in.bo_info_size, info_size);
213		unsigned i;
214
215		memset(info, 0, args->in.bo_number * info_size);
216		for (i = 0; i < args->in.bo_number; ++i) {
217			if (copy_from_user(&info[i], uptr, bytes))
218				goto error_free;
219
220			uptr += args->in.bo_info_size;
221		}
222	}
223
224	switch (args->in.operation) {
225	case AMDGPU_BO_LIST_OP_CREATE:
226		r = amdgpu_bo_list_create(fpriv, &list, &handle);
227		if (r)
228			goto error_free;
229
230		r = amdgpu_bo_list_set(adev, filp, list, info,
231					      args->in.bo_number);
232		amdgpu_bo_list_put(list);
233		if (r)
234			goto error_free;
235
236		break;
237
238	case AMDGPU_BO_LIST_OP_DESTROY:
239		amdgpu_bo_list_destroy(fpriv, handle);
240		handle = 0;
241		break;
242
243	case AMDGPU_BO_LIST_OP_UPDATE:
244		r = -ENOENT;
245		list = amdgpu_bo_list_get(fpriv, handle);
246		if (!list)
247			goto error_free;
248
249		r = amdgpu_bo_list_set(adev, filp, list, info,
250					      args->in.bo_number);
251		amdgpu_bo_list_put(list);
252		if (r)
253			goto error_free;
254
255		break;
256
257	default:
258		r = -EINVAL;
259		goto error_free;
260	}
261
262	memset(args, 0, sizeof(*args));
263	args->out.list_handle = handle;
264	drm_free_large(info);
265
266	return 0;
267
268error_free:
269	drm_free_large(info);
270	return r;
271}
272