1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 *          Alon Levy
24 */
25
26#include "qxl_drv.h"
27#include "qxl_object.h"
28
29/*
30 * TODO: allocating a new gem(in qxl_bo) for each request.
31 * This is wasteful since bo's are page aligned.
32 */
33static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
34			   struct drm_file *file_priv)
35{
36	struct qxl_device *qdev = dev->dev_private;
37	struct drm_qxl_alloc *qxl_alloc = data;
38	int ret;
39	struct qxl_bo *qobj;
40	uint32_t handle;
41	u32 domain = QXL_GEM_DOMAIN_VRAM;
42
43	if (qxl_alloc->size == 0) {
44		DRM_ERROR("invalid size %d\n", qxl_alloc->size);
45		return -EINVAL;
46	}
47	ret = qxl_gem_object_create_with_handle(qdev, file_priv,
48						domain,
49						qxl_alloc->size,
50						NULL,
51						&qobj, &handle);
52	if (ret) {
53		DRM_ERROR("%s: failed to create gem ret=%d\n",
54			  __func__, ret);
55		return -ENOMEM;
56	}
57	qxl_alloc->handle = handle;
58	return 0;
59}
60
61static int qxl_map_ioctl(struct drm_device *dev, void *data,
62			 struct drm_file *file_priv)
63{
64	struct qxl_device *qdev = dev->dev_private;
65	struct drm_qxl_map *qxl_map = data;
66
67	return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
68				  &qxl_map->offset);
69}
70
71struct qxl_reloc_info {
72	int type;
73	struct qxl_bo *dst_bo;
74	uint32_t dst_offset;
75	struct qxl_bo *src_bo;
76	int src_offset;
77};
78
79/*
80 * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
81 * are on vram).
82 * *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
83 */
84static void
85apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
86{
87	void *reloc_page;
88	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
89	*(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
90											      info->src_bo,
91											      info->src_offset);
92	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
93}
94
95static void
96apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
97{
98	uint32_t id = 0;
99	void *reloc_page;
100
101	if (info->src_bo && !info->src_bo->is_primary)
102		id = info->src_bo->surface_id;
103
104	reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
105	*(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
106	qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
107}
108
109/* return holding the reference to this object */
110static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
111					 struct drm_file *file_priv, uint64_t handle,
112					 struct qxl_release *release)
113{
114	struct drm_gem_object *gobj;
115	struct qxl_bo *qobj;
116	int ret;
117
118	gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
119	if (!gobj)
120		return NULL;
121
122	qobj = gem_to_qxl_bo(gobj);
123
124	ret = qxl_release_list_add(release, qobj);
125	if (ret) {
126		drm_gem_object_unreference_unlocked(gobj);
127		return NULL;
128	}
129
130	return qobj;
131}
132
133/*
134 * Usage of execbuffer:
135 * Relocations need to take into account the full QXLDrawable size.
136 * However, the command as passed from user space must *not* contain the initial
137 * QXLReleaseInfo struct (first XXX bytes)
138 */
139static int qxl_process_single_command(struct qxl_device *qdev,
140				      struct drm_qxl_command *cmd,
141				      struct drm_file *file_priv)
142{
143	struct qxl_reloc_info *reloc_info;
144	int release_type;
145	struct qxl_release *release;
146	struct qxl_bo *cmd_bo;
147	void *fb_cmd;
148	int i, j, ret, num_relocs;
149	int unwritten;
150
151	switch (cmd->type) {
152	case QXL_CMD_DRAW:
153		release_type = QXL_RELEASE_DRAWABLE;
154		break;
155	case QXL_CMD_SURFACE:
156	case QXL_CMD_CURSOR:
157	default:
158		DRM_DEBUG("Only draw commands in execbuffers\n");
159		return -EINVAL;
160		break;
161	}
162
163	if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
164		return -EINVAL;
165
166	if (!access_ok(VERIFY_READ,
167		       (void *)(unsigned long)cmd->command,
168		       cmd->command_size))
169		return -EFAULT;
170
171	reloc_info = kmalloc_array(cmd->relocs_num,
172				   sizeof(struct qxl_reloc_info), GFP_KERNEL);
173	if (!reloc_info)
174		return -ENOMEM;
175
176	ret = qxl_alloc_release_reserved(qdev,
177					 sizeof(union qxl_release_info) +
178					 cmd->command_size,
179					 release_type,
180					 &release,
181					 &cmd_bo);
182	if (ret)
183		goto out_free_reloc;
184
185	/* TODO copy slow path code from i915 */
186	fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
187	unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
188
189	{
190		struct qxl_drawable *draw = fb_cmd;
191		draw->mm_time = qdev->rom->mm_clock;
192	}
193
194	qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
195	if (unwritten) {
196		DRM_ERROR("got unwritten %d\n", unwritten);
197		ret = -EFAULT;
198		goto out_free_release;
199	}
200
201	/* fill out reloc info structs */
202	num_relocs = 0;
203	for (i = 0; i < cmd->relocs_num; ++i) {
204		struct drm_qxl_reloc reloc;
205
206		if (copy_from_user(&reloc,
207				       &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
208				       sizeof(reloc))) {
209			ret = -EFAULT;
210			goto out_free_bos;
211		}
212
213		/* add the bos to the list of bos to validate -
214		   need to validate first then process relocs? */
215		if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
216			DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
217
218			ret = -EINVAL;
219			goto out_free_bos;
220		}
221		reloc_info[i].type = reloc.reloc_type;
222
223		if (reloc.dst_handle) {
224			reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
225								  reloc.dst_handle, release);
226			if (!reloc_info[i].dst_bo) {
227				ret = -EINVAL;
228				reloc_info[i].src_bo = NULL;
229				goto out_free_bos;
230			}
231			reloc_info[i].dst_offset = reloc.dst_offset;
232		} else {
233			reloc_info[i].dst_bo = cmd_bo;
234			reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
235		}
236		num_relocs++;
237
238		/* reserve and validate the reloc dst bo */
239		if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
240			reloc_info[i].src_bo =
241				qxlhw_handle_to_bo(qdev, file_priv,
242						   reloc.src_handle, release);
243			if (!reloc_info[i].src_bo) {
244				if (reloc_info[i].dst_bo != cmd_bo)
245					drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
246				ret = -EINVAL;
247				goto out_free_bos;
248			}
249			reloc_info[i].src_offset = reloc.src_offset;
250		} else {
251			reloc_info[i].src_bo = NULL;
252			reloc_info[i].src_offset = 0;
253		}
254	}
255
256	/* validate all buffers */
257	ret = qxl_release_reserve_list(release, false);
258	if (ret)
259		goto out_free_bos;
260
261	for (i = 0; i < cmd->relocs_num; ++i) {
262		if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
263			apply_reloc(qdev, &reloc_info[i]);
264		else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
265			apply_surf_reloc(qdev, &reloc_info[i]);
266	}
267
268	ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
269	if (ret)
270		qxl_release_backoff_reserve_list(release);
271	else
272		qxl_release_fence_buffer_objects(release);
273
274out_free_bos:
275	for (j = 0; j < num_relocs; j++) {
276		if (reloc_info[j].dst_bo != cmd_bo)
277			drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
278		if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
279			drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
280	}
281out_free_release:
282	if (ret)
283		qxl_release_free(qdev, release);
284out_free_reloc:
285	kfree(reloc_info);
286	return ret;
287}
288
289static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
290				struct drm_file *file_priv)
291{
292	struct qxl_device *qdev = dev->dev_private;
293	struct drm_qxl_execbuffer *execbuffer = data;
294	struct drm_qxl_command user_cmd;
295	int cmd_num;
296	int ret;
297
298	for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
299
300		struct drm_qxl_command *commands =
301			(struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
302
303		if (copy_from_user(&user_cmd, &commands[cmd_num],
304				       sizeof(user_cmd)))
305			return -EFAULT;
306
307		ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
308		if (ret)
309			return ret;
310	}
311	return 0;
312}
313
314static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
315				 struct drm_file *file)
316{
317	struct qxl_device *qdev = dev->dev_private;
318	struct drm_qxl_update_area *update_area = data;
319	struct qxl_rect area = {.left = update_area->left,
320				.top = update_area->top,
321				.right = update_area->right,
322				.bottom = update_area->bottom};
323	int ret;
324	struct drm_gem_object *gobj = NULL;
325	struct qxl_bo *qobj = NULL;
326
327	if (update_area->left >= update_area->right ||
328	    update_area->top >= update_area->bottom)
329		return -EINVAL;
330
331	gobj = drm_gem_object_lookup(dev, file, update_area->handle);
332	if (gobj == NULL)
333		return -ENOENT;
334
335	qobj = gem_to_qxl_bo(gobj);
336
337	ret = qxl_bo_reserve(qobj, false);
338	if (ret)
339		goto out;
340
341	if (!qobj->pin_count) {
342		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
343		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
344				      true, false);
345		if (unlikely(ret))
346			goto out;
347	}
348
349	ret = qxl_bo_check_id(qdev, qobj);
350	if (ret)
351		goto out2;
352	if (!qobj->surface_id)
353		DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
354	ret = qxl_io_update_area(qdev, qobj, &area);
355
356out2:
357	qxl_bo_unreserve(qobj);
358
359out:
360	drm_gem_object_unreference_unlocked(gobj);
361	return ret;
362}
363
364static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
365		       struct drm_file *file_priv)
366{
367	struct qxl_device *qdev = dev->dev_private;
368	struct drm_qxl_getparam *param = data;
369
370	switch (param->param) {
371	case QXL_PARAM_NUM_SURFACES:
372		param->value = qdev->rom->n_surfaces;
373		break;
374	case QXL_PARAM_MAX_RELOCS:
375		param->value = QXL_MAX_RES;
376		break;
377	default:
378		return -EINVAL;
379	}
380	return 0;
381}
382
383static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
384				  struct drm_file *file_priv)
385{
386	struct qxl_device *qdev = dev->dev_private;
387	struct drm_qxl_clientcap *param = data;
388	int byte, idx;
389
390	byte = param->index / 8;
391	idx = param->index % 8;
392
393	if (qdev->pdev->revision < 4)
394		return -ENOSYS;
395
396	if (byte >= 58)
397		return -ENOSYS;
398
399	if (qdev->rom->client_capabilities[byte] & (1 << idx))
400		return 0;
401	return -ENOSYS;
402}
403
404static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
405				struct drm_file *file)
406{
407	struct qxl_device *qdev = dev->dev_private;
408	struct drm_qxl_alloc_surf *param = data;
409	struct qxl_bo *qobj;
410	int handle;
411	int ret;
412	int size, actual_stride;
413	struct qxl_surface surf;
414
415	/* work out size allocate bo with handle */
416	actual_stride = param->stride < 0 ? -param->stride : param->stride;
417	size = actual_stride * param->height + actual_stride;
418
419	surf.format = param->format;
420	surf.width = param->width;
421	surf.height = param->height;
422	surf.stride = param->stride;
423	surf.data = 0;
424
425	ret = qxl_gem_object_create_with_handle(qdev, file,
426						QXL_GEM_DOMAIN_SURFACE,
427						size,
428						&surf,
429						&qobj, &handle);
430	if (ret) {
431		DRM_ERROR("%s: failed to create gem ret=%d\n",
432			  __func__, ret);
433		return -ENOMEM;
434	} else
435		param->handle = handle;
436	return ret;
437}
438
439const struct drm_ioctl_desc qxl_ioctls[] = {
440	DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
441
442	DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
443
444	DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
445							DRM_AUTH|DRM_UNLOCKED),
446	DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
447							DRM_AUTH|DRM_UNLOCKED),
448	DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
449							DRM_AUTH|DRM_UNLOCKED),
450	DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
451							DRM_AUTH|DRM_UNLOCKED),
452
453	DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
454			  DRM_AUTH|DRM_UNLOCKED),
455};
456
457int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
458