1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
32 
33 #define VMW_RES_HT_ORDER 12
34 
35 /**
36  * struct vmw_resource_relocation - Relocation info for resources
37  *
38  * @head: List head for the software context's relocation list.
39  * @res: Non-ref-counted pointer to the resource.
40  * @offset: Offset of 4 byte entries into the command buffer where the
41  * id that needs fixup is located.
42  */
43 struct vmw_resource_relocation {
44 	struct list_head head;
45 	const struct vmw_resource *res;
46 	unsigned long offset;
47 };
48 
49 /**
50  * struct vmw_resource_val_node - Validation info for resources
51  *
52  * @head: List head for the software context's resource list.
53  * @hash: Hash entry for quick resouce to val_node lookup.
54  * @res: Ref-counted pointer to the resource.
55  * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56  * @new_backup: Refcounted pointer to the new backup buffer.
57  * @staged_bindings: If @res is a context, tracks bindings set up during
58  * the command batch. Otherwise NULL.
59  * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60  * @first_usage: Set to true the first time the resource is referenced in
61  * the command stream.
62  * @no_buffer_needed: Resources do not need to allocate buffer backup on
63  * reservation. The command stream will provide one.
64  */
65 struct vmw_resource_val_node {
66 	struct list_head head;
67 	struct drm_hash_item hash;
68 	struct vmw_resource *res;
69 	struct vmw_dma_buffer *new_backup;
70 	struct vmw_ctx_binding_state *staged_bindings;
71 	unsigned long new_backup_offset;
72 	bool first_usage;
73 	bool no_buffer_needed;
74 };
75 
76 /**
77  * struct vmw_cmd_entry - Describe a command for the verifier
78  *
79  * @user_allow: Whether allowed from the execbuf ioctl.
80  * @gb_disable: Whether disabled if guest-backed objects are available.
81  * @gb_enable: Whether enabled iff guest-backed objects are available.
82  */
83 struct vmw_cmd_entry {
84 	int (*func) (struct vmw_private *, struct vmw_sw_context *,
85 		     SVGA3dCmdHeader *);
86 	bool user_allow;
87 	bool gb_disable;
88 	bool gb_enable;
89 };
90 
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable)	\
92 	[(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 				       (_gb_disable), (_gb_enable)}
94 
95 /**
96  * vmw_resource_unreserve - unreserve resources previously reserved for
97  * command submission.
98  *
99  * @list_head: list of resources to unreserve.
100  * @backoff: Whether command submission failed.
101  */
vmw_resource_list_unreserve(struct list_head * list,bool backoff)102 static void vmw_resource_list_unreserve(struct list_head *list,
103 					bool backoff)
104 {
105 	struct vmw_resource_val_node *val;
106 
107 	list_for_each_entry(val, list, head) {
108 		struct vmw_resource *res = val->res;
109 		struct vmw_dma_buffer *new_backup =
110 			backoff ? NULL : val->new_backup;
111 
112 		/*
113 		 * Transfer staged context bindings to the
114 		 * persistent context binding tracker.
115 		 */
116 		if (unlikely(val->staged_bindings)) {
117 			if (!backoff) {
118 				vmw_context_binding_state_transfer
119 					(val->res, val->staged_bindings);
120 			}
121 			kfree(val->staged_bindings);
122 			val->staged_bindings = NULL;
123 		}
124 		vmw_resource_unreserve(res, new_backup,
125 			val->new_backup_offset);
126 		vmw_dmabuf_unreference(&val->new_backup);
127 	}
128 }
129 
130 
131 /**
132  * vmw_resource_val_add - Add a resource to the software context's
133  * resource list if it's not already on it.
134  *
135  * @sw_context: Pointer to the software context.
136  * @res: Pointer to the resource.
137  * @p_node On successful return points to a valid pointer to a
138  * struct vmw_resource_val_node, if non-NULL on entry.
139  */
vmw_resource_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_resource_val_node ** p_node)140 static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
141 				struct vmw_resource *res,
142 				struct vmw_resource_val_node **p_node)
143 {
144 	struct vmw_resource_val_node *node;
145 	struct drm_hash_item *hash;
146 	int ret;
147 
148 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) res,
149 				    &hash) == 0)) {
150 		node = container_of(hash, struct vmw_resource_val_node, hash);
151 		node->first_usage = false;
152 		if (unlikely(p_node != NULL))
153 			*p_node = node;
154 		return 0;
155 	}
156 
157 	node = kzalloc(sizeof(*node), GFP_KERNEL);
158 	if (unlikely(node == NULL)) {
159 		DRM_ERROR("Failed to allocate a resource validation "
160 			  "entry.\n");
161 		return -ENOMEM;
162 	}
163 
164 	node->hash.key = (unsigned long) res;
165 	ret = drm_ht_insert_item(&sw_context->res_ht, &node->hash);
166 	if (unlikely(ret != 0)) {
167 		DRM_ERROR("Failed to initialize a resource validation "
168 			  "entry.\n");
169 		kfree(node);
170 		return ret;
171 	}
172 	list_add_tail(&node->head, &sw_context->resource_list);
173 	node->res = vmw_resource_reference(res);
174 	node->first_usage = true;
175 
176 	if (unlikely(p_node != NULL))
177 		*p_node = node;
178 
179 	return 0;
180 }
181 
182 /**
183  * vmw_resource_context_res_add - Put resources previously bound to a context on
184  * the validation list
185  *
186  * @dev_priv: Pointer to a device private structure
187  * @sw_context: Pointer to a software context used for this command submission
188  * @ctx: Pointer to the context resource
189  *
190  * This function puts all resources that were previously bound to @ctx on
191  * the resource validation list. This is part of the context state reemission
192  */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)193 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
194 					struct vmw_sw_context *sw_context,
195 					struct vmw_resource *ctx)
196 {
197 	struct list_head *binding_list;
198 	struct vmw_ctx_binding *entry;
199 	int ret = 0;
200 	struct vmw_resource *res;
201 
202 	mutex_lock(&dev_priv->binding_mutex);
203 	binding_list = vmw_context_binding_list(ctx);
204 
205 	list_for_each_entry(entry, binding_list, ctx_list) {
206 		res = vmw_resource_reference_unless_doomed(entry->bi.res);
207 		if (unlikely(res == NULL))
208 			continue;
209 
210 		ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
211 		vmw_resource_unreference(&res);
212 		if (unlikely(ret != 0))
213 			break;
214 	}
215 
216 	mutex_unlock(&dev_priv->binding_mutex);
217 	return ret;
218 }
219 
220 /**
221  * vmw_resource_relocation_add - Add a relocation to the relocation list
222  *
223  * @list: Pointer to head of relocation list.
224  * @res: The resource.
225  * @offset: Offset into the command buffer currently being parsed where the
226  * id that needs fixup is located. Granularity is 4 bytes.
227  */
vmw_resource_relocation_add(struct list_head * list,const struct vmw_resource * res,unsigned long offset)228 static int vmw_resource_relocation_add(struct list_head *list,
229 				       const struct vmw_resource *res,
230 				       unsigned long offset)
231 {
232 	struct vmw_resource_relocation *rel;
233 
234 	rel = kmalloc(sizeof(*rel), GFP_KERNEL);
235 	if (unlikely(rel == NULL)) {
236 		DRM_ERROR("Failed to allocate a resource relocation.\n");
237 		return -ENOMEM;
238 	}
239 
240 	rel->res = res;
241 	rel->offset = offset;
242 	list_add_tail(&rel->head, list);
243 
244 	return 0;
245 }
246 
247 /**
248  * vmw_resource_relocations_free - Free all relocations on a list
249  *
250  * @list: Pointer to the head of the relocation list.
251  */
vmw_resource_relocations_free(struct list_head * list)252 static void vmw_resource_relocations_free(struct list_head *list)
253 {
254 	struct vmw_resource_relocation *rel, *n;
255 
256 	list_for_each_entry_safe(rel, n, list, head) {
257 		list_del(&rel->head);
258 		kfree(rel);
259 	}
260 }
261 
262 /**
263  * vmw_resource_relocations_apply - Apply all relocations on a list
264  *
265  * @cb: Pointer to the start of the command buffer bein patch. This need
266  * not be the same buffer as the one being parsed when the relocation
267  * list was built, but the contents must be the same modulo the
268  * resource ids.
269  * @list: Pointer to the head of the relocation list.
270  */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)271 static void vmw_resource_relocations_apply(uint32_t *cb,
272 					   struct list_head *list)
273 {
274 	struct vmw_resource_relocation *rel;
275 
276 	list_for_each_entry(rel, list, head) {
277 		if (likely(rel->res != NULL))
278 			cb[rel->offset] = rel->res->id;
279 		else
280 			cb[rel->offset] = SVGA_3D_CMD_NOP;
281 	}
282 }
283 
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)284 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
285 			   struct vmw_sw_context *sw_context,
286 			   SVGA3dCmdHeader *header)
287 {
288 	return capable(CAP_SYS_ADMIN) ? : -EINVAL;
289 }
290 
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)291 static int vmw_cmd_ok(struct vmw_private *dev_priv,
292 		      struct vmw_sw_context *sw_context,
293 		      SVGA3dCmdHeader *header)
294 {
295 	return 0;
296 }
297 
298 /**
299  * vmw_bo_to_validate_list - add a bo to a validate list
300  *
301  * @sw_context: The software context used for this command submission batch.
302  * @bo: The buffer object to add.
303  * @validate_as_mob: Validate this buffer as a MOB.
304  * @p_val_node: If non-NULL Will be updated with the validate node number
305  * on return.
306  *
307  * Returns -EINVAL if the limit of number of buffer objects per command
308  * submission is reached.
309  */
vmw_bo_to_validate_list(struct vmw_sw_context * sw_context,struct ttm_buffer_object * bo,bool validate_as_mob,uint32_t * p_val_node)310 static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
311 				   struct ttm_buffer_object *bo,
312 				   bool validate_as_mob,
313 				   uint32_t *p_val_node)
314 {
315 	uint32_t val_node;
316 	struct vmw_validate_buffer *vval_buf;
317 	struct ttm_validate_buffer *val_buf;
318 	struct drm_hash_item *hash;
319 	int ret;
320 
321 	if (likely(drm_ht_find_item(&sw_context->res_ht, (unsigned long) bo,
322 				    &hash) == 0)) {
323 		vval_buf = container_of(hash, struct vmw_validate_buffer,
324 					hash);
325 		if (unlikely(vval_buf->validate_as_mob != validate_as_mob)) {
326 			DRM_ERROR("Inconsistent buffer usage.\n");
327 			return -EINVAL;
328 		}
329 		val_buf = &vval_buf->base;
330 		val_node = vval_buf - sw_context->val_bufs;
331 	} else {
332 		val_node = sw_context->cur_val_buf;
333 		if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
334 			DRM_ERROR("Max number of DMA buffers per submission "
335 				  "exceeded.\n");
336 			return -EINVAL;
337 		}
338 		vval_buf = &sw_context->val_bufs[val_node];
339 		vval_buf->hash.key = (unsigned long) bo;
340 		ret = drm_ht_insert_item(&sw_context->res_ht, &vval_buf->hash);
341 		if (unlikely(ret != 0)) {
342 			DRM_ERROR("Failed to initialize a buffer validation "
343 				  "entry.\n");
344 			return ret;
345 		}
346 		++sw_context->cur_val_buf;
347 		val_buf = &vval_buf->base;
348 		val_buf->bo = ttm_bo_reference(bo);
349 		val_buf->shared = false;
350 		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
351 		vval_buf->validate_as_mob = validate_as_mob;
352 	}
353 
354 	if (p_val_node)
355 		*p_val_node = val_node;
356 
357 	return 0;
358 }
359 
360 /**
361  * vmw_resources_reserve - Reserve all resources on the sw_context's
362  * resource list.
363  *
364  * @sw_context: Pointer to the software context.
365  *
366  * Note that since vmware's command submission currently is protected by
367  * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
368  * since only a single thread at once will attempt this.
369  */
vmw_resources_reserve(struct vmw_sw_context * sw_context)370 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
371 {
372 	struct vmw_resource_val_node *val;
373 	int ret;
374 
375 	list_for_each_entry(val, &sw_context->resource_list, head) {
376 		struct vmw_resource *res = val->res;
377 
378 		ret = vmw_resource_reserve(res, val->no_buffer_needed);
379 		if (unlikely(ret != 0))
380 			return ret;
381 
382 		if (res->backup) {
383 			struct ttm_buffer_object *bo = &res->backup->base;
384 
385 			ret = vmw_bo_to_validate_list
386 				(sw_context, bo,
387 				 vmw_resource_needs_backup(res), NULL);
388 
389 			if (unlikely(ret != 0))
390 				return ret;
391 		}
392 	}
393 	return 0;
394 }
395 
396 /**
397  * vmw_resources_validate - Validate all resources on the sw_context's
398  * resource list.
399  *
400  * @sw_context: Pointer to the software context.
401  *
402  * Before this function is called, all resource backup buffers must have
403  * been validated.
404  */
vmw_resources_validate(struct vmw_sw_context * sw_context)405 static int vmw_resources_validate(struct vmw_sw_context *sw_context)
406 {
407 	struct vmw_resource_val_node *val;
408 	int ret;
409 
410 	list_for_each_entry(val, &sw_context->resource_list, head) {
411 		struct vmw_resource *res = val->res;
412 
413 		ret = vmw_resource_validate(res);
414 		if (unlikely(ret != 0)) {
415 			if (ret != -ERESTARTSYS)
416 				DRM_ERROR("Failed to validate resource.\n");
417 			return ret;
418 		}
419 	}
420 	return 0;
421 }
422 
423 
424 /**
425  * vmw_cmd_res_reloc_add - Add a resource to a software context's
426  * relocation- and validation lists.
427  *
428  * @dev_priv: Pointer to a struct vmw_private identifying the device.
429  * @sw_context: Pointer to the software context.
430  * @res_type: Resource type.
431  * @id_loc: Pointer to where the id that needs translation is located.
432  * @res: Valid pointer to a struct vmw_resource.
433  * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
434  * used for this resource is returned here.
435  */
vmw_cmd_res_reloc_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,uint32_t * id_loc,struct vmw_resource * res,struct vmw_resource_val_node ** p_val)436 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv,
437 				 struct vmw_sw_context *sw_context,
438 				 enum vmw_res_type res_type,
439 				 uint32_t *id_loc,
440 				 struct vmw_resource *res,
441 				 struct vmw_resource_val_node **p_val)
442 {
443 	int ret;
444 	struct vmw_resource_val_node *node;
445 
446 	*p_val = NULL;
447 	ret = vmw_resource_relocation_add(&sw_context->res_relocations,
448 					  res,
449 					  id_loc - sw_context->buf_start);
450 	if (unlikely(ret != 0))
451 		return ret;
452 
453 	ret = vmw_resource_val_add(sw_context, res, &node);
454 	if (unlikely(ret != 0))
455 		return ret;
456 
457 	if (res_type == vmw_res_context && dev_priv->has_mob &&
458 	    node->first_usage) {
459 
460 		/*
461 		 * Put contexts first on the list to be able to exit
462 		 * list traversal for contexts early.
463 		 */
464 		list_del(&node->head);
465 		list_add(&node->head, &sw_context->resource_list);
466 
467 		ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
468 		if (unlikely(ret != 0))
469 			return ret;
470 		node->staged_bindings =
471 			kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
472 		if (node->staged_bindings == NULL) {
473 			DRM_ERROR("Failed to allocate context binding "
474 				  "information.\n");
475 			return -ENOMEM;
476 		}
477 		INIT_LIST_HEAD(&node->staged_bindings->list);
478 	}
479 
480 	if (p_val)
481 		*p_val = node;
482 
483 	return 0;
484 }
485 
486 
487 /**
488  * vmw_cmd_res_check - Check that a resource is present and if so, put it
489  * on the resource validate list unless it's already there.
490  *
491  * @dev_priv: Pointer to a device private structure.
492  * @sw_context: Pointer to the software context.
493  * @res_type: Resource type.
494  * @converter: User-space visisble type specific information.
495  * @id_loc: Pointer to the location in the command buffer currently being
496  * parsed from where the user-space resource id handle is located.
497  * @p_val: Pointer to pointer to resource validalidation node. Populated
498  * on exit.
499  */
500 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource_val_node ** p_val)501 vmw_cmd_res_check(struct vmw_private *dev_priv,
502 		  struct vmw_sw_context *sw_context,
503 		  enum vmw_res_type res_type,
504 		  const struct vmw_user_resource_conv *converter,
505 		  uint32_t *id_loc,
506 		  struct vmw_resource_val_node **p_val)
507 {
508 	struct vmw_res_cache_entry *rcache =
509 		&sw_context->res_cache[res_type];
510 	struct vmw_resource *res;
511 	struct vmw_resource_val_node *node;
512 	int ret;
513 
514 	if (*id_loc == SVGA3D_INVALID_ID) {
515 		if (p_val)
516 			*p_val = NULL;
517 		if (res_type == vmw_res_context) {
518 			DRM_ERROR("Illegal context invalid id.\n");
519 			return -EINVAL;
520 		}
521 		return 0;
522 	}
523 
524 	/*
525 	 * Fastpath in case of repeated commands referencing the same
526 	 * resource
527 	 */
528 
529 	if (likely(rcache->valid && *id_loc == rcache->handle)) {
530 		const struct vmw_resource *res = rcache->res;
531 
532 		rcache->node->first_usage = false;
533 		if (p_val)
534 			*p_val = rcache->node;
535 
536 		return vmw_resource_relocation_add
537 			(&sw_context->res_relocations, res,
538 			 id_loc - sw_context->buf_start);
539 	}
540 
541 	ret = vmw_user_resource_lookup_handle(dev_priv,
542 					      sw_context->fp->tfile,
543 					      *id_loc,
544 					      converter,
545 					      &res);
546 	if (unlikely(ret != 0)) {
547 		DRM_ERROR("Could not find or use resource 0x%08x.\n",
548 			  (unsigned) *id_loc);
549 		dump_stack();
550 		return ret;
551 	}
552 
553 	rcache->valid = true;
554 	rcache->res = res;
555 	rcache->handle = *id_loc;
556 
557 	ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc,
558 				    res, &node);
559 	if (unlikely(ret != 0))
560 		goto out_no_reloc;
561 
562 	rcache->node = node;
563 	if (p_val)
564 		*p_val = node;
565 	vmw_resource_unreference(&res);
566 	return 0;
567 
568 out_no_reloc:
569 	BUG_ON(sw_context->error_resource != NULL);
570 	sw_context->error_resource = res;
571 
572 	return ret;
573 }
574 
575 /**
576  * vmw_rebind_contexts - Rebind all resources previously bound to
577  * referenced contexts.
578  *
579  * @sw_context: Pointer to the software context.
580  *
581  * Rebind context binding points that have been scrubbed because of eviction.
582  */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)583 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
584 {
585 	struct vmw_resource_val_node *val;
586 	int ret;
587 
588 	list_for_each_entry(val, &sw_context->resource_list, head) {
589 		if (unlikely(!val->staged_bindings))
590 			break;
591 
592 		ret = vmw_context_rebind_all(val->res);
593 		if (unlikely(ret != 0)) {
594 			if (ret != -ERESTARTSYS)
595 				DRM_ERROR("Failed to rebind context.\n");
596 			return ret;
597 		}
598 	}
599 
600 	return 0;
601 }
602 
603 /**
604  * vmw_cmd_cid_check - Check a command header for valid context information.
605  *
606  * @dev_priv: Pointer to a device private structure.
607  * @sw_context: Pointer to the software context.
608  * @header: A command header with an embedded user-space context handle.
609  *
610  * Convenience function: Call vmw_cmd_res_check with the user-space context
611  * handle embedded in @header.
612  */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)613 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
614 			     struct vmw_sw_context *sw_context,
615 			     SVGA3dCmdHeader *header)
616 {
617 	struct vmw_cid_cmd {
618 		SVGA3dCmdHeader header;
619 		uint32_t cid;
620 	} *cmd;
621 
622 	cmd = container_of(header, struct vmw_cid_cmd, header);
623 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
624 				 user_context_converter, &cmd->cid, NULL);
625 }
626 
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)627 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
628 					   struct vmw_sw_context *sw_context,
629 					   SVGA3dCmdHeader *header)
630 {
631 	struct vmw_sid_cmd {
632 		SVGA3dCmdHeader header;
633 		SVGA3dCmdSetRenderTarget body;
634 	} *cmd;
635 	struct vmw_resource_val_node *ctx_node;
636 	struct vmw_resource_val_node *res_node;
637 	int ret;
638 
639 	cmd = container_of(header, struct vmw_sid_cmd, header);
640 
641 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
642 				user_context_converter, &cmd->body.cid,
643 				&ctx_node);
644 	if (unlikely(ret != 0))
645 		return ret;
646 
647 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
648 				user_surface_converter,
649 				&cmd->body.target.sid, &res_node);
650 	if (unlikely(ret != 0))
651 		return ret;
652 
653 	if (dev_priv->has_mob) {
654 		struct vmw_ctx_bindinfo bi;
655 
656 		bi.ctx = ctx_node->res;
657 		bi.res = res_node ? res_node->res : NULL;
658 		bi.bt = vmw_ctx_binding_rt;
659 		bi.i1.rt_type = cmd->body.type;
660 		return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
661 	}
662 
663 	return 0;
664 }
665 
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)666 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
667 				      struct vmw_sw_context *sw_context,
668 				      SVGA3dCmdHeader *header)
669 {
670 	struct vmw_sid_cmd {
671 		SVGA3dCmdHeader header;
672 		SVGA3dCmdSurfaceCopy body;
673 	} *cmd;
674 	int ret;
675 
676 	cmd = container_of(header, struct vmw_sid_cmd, header);
677 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
678 				user_surface_converter,
679 				&cmd->body.src.sid, NULL);
680 	if (unlikely(ret != 0))
681 		return ret;
682 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
683 				 user_surface_converter,
684 				 &cmd->body.dest.sid, NULL);
685 }
686 
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)687 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
688 				     struct vmw_sw_context *sw_context,
689 				     SVGA3dCmdHeader *header)
690 {
691 	struct vmw_sid_cmd {
692 		SVGA3dCmdHeader header;
693 		SVGA3dCmdSurfaceStretchBlt body;
694 	} *cmd;
695 	int ret;
696 
697 	cmd = container_of(header, struct vmw_sid_cmd, header);
698 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
699 				user_surface_converter,
700 				&cmd->body.src.sid, NULL);
701 	if (unlikely(ret != 0))
702 		return ret;
703 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
704 				 user_surface_converter,
705 				 &cmd->body.dest.sid, NULL);
706 }
707 
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)708 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
709 					 struct vmw_sw_context *sw_context,
710 					 SVGA3dCmdHeader *header)
711 {
712 	struct vmw_sid_cmd {
713 		SVGA3dCmdHeader header;
714 		SVGA3dCmdBlitSurfaceToScreen body;
715 	} *cmd;
716 
717 	cmd = container_of(header, struct vmw_sid_cmd, header);
718 
719 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
720 				 user_surface_converter,
721 				 &cmd->body.srcImage.sid, NULL);
722 }
723 
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)724 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
725 				 struct vmw_sw_context *sw_context,
726 				 SVGA3dCmdHeader *header)
727 {
728 	struct vmw_sid_cmd {
729 		SVGA3dCmdHeader header;
730 		SVGA3dCmdPresent body;
731 	} *cmd;
732 
733 
734 	cmd = container_of(header, struct vmw_sid_cmd, header);
735 
736 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
737 				 user_surface_converter, &cmd->body.sid,
738 				 NULL);
739 }
740 
741 /**
742  * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
743  *
744  * @dev_priv: The device private structure.
745  * @new_query_bo: The new buffer holding query results.
746  * @sw_context: The software context used for this command submission.
747  *
748  * This function checks whether @new_query_bo is suitable for holding
749  * query results, and if another buffer currently is pinned for query
750  * results. If so, the function prepares the state of @sw_context for
751  * switching pinned buffers after successful submission of the current
752  * command batch.
753  */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct ttm_buffer_object * new_query_bo,struct vmw_sw_context * sw_context)754 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
755 				       struct ttm_buffer_object *new_query_bo,
756 				       struct vmw_sw_context *sw_context)
757 {
758 	struct vmw_res_cache_entry *ctx_entry =
759 		&sw_context->res_cache[vmw_res_context];
760 	int ret;
761 
762 	BUG_ON(!ctx_entry->valid);
763 	sw_context->last_query_ctx = ctx_entry->res;
764 
765 	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
766 
767 		if (unlikely(new_query_bo->num_pages > 4)) {
768 			DRM_ERROR("Query buffer too large.\n");
769 			return -EINVAL;
770 		}
771 
772 		if (unlikely(sw_context->cur_query_bo != NULL)) {
773 			sw_context->needs_post_query_barrier = true;
774 			ret = vmw_bo_to_validate_list(sw_context,
775 						      sw_context->cur_query_bo,
776 						      dev_priv->has_mob, NULL);
777 			if (unlikely(ret != 0))
778 				return ret;
779 		}
780 		sw_context->cur_query_bo = new_query_bo;
781 
782 		ret = vmw_bo_to_validate_list(sw_context,
783 					      dev_priv->dummy_query_bo,
784 					      dev_priv->has_mob, NULL);
785 		if (unlikely(ret != 0))
786 			return ret;
787 
788 	}
789 
790 	return 0;
791 }
792 
793 
794 /**
795  * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
796  *
797  * @dev_priv: The device private structure.
798  * @sw_context: The software context used for this command submission batch.
799  *
800  * This function will check if we're switching query buffers, and will then,
801  * issue a dummy occlusion query wait used as a query barrier. When the fence
802  * object following that query wait has signaled, we are sure that all
803  * preceding queries have finished, and the old query buffer can be unpinned.
804  * However, since both the new query buffer and the old one are fenced with
805  * that fence, we can do an asynchronus unpin now, and be sure that the
806  * old query buffer won't be moved until the fence has signaled.
807  *
808  * As mentioned above, both the new - and old query buffers need to be fenced
809  * using a sequence emitted *after* calling this function.
810  */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)811 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
812 				     struct vmw_sw_context *sw_context)
813 {
814 	/*
815 	 * The validate list should still hold references to all
816 	 * contexts here.
817 	 */
818 
819 	if (sw_context->needs_post_query_barrier) {
820 		struct vmw_res_cache_entry *ctx_entry =
821 			&sw_context->res_cache[vmw_res_context];
822 		struct vmw_resource *ctx;
823 		int ret;
824 
825 		BUG_ON(!ctx_entry->valid);
826 		ctx = ctx_entry->res;
827 
828 		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
829 
830 		if (unlikely(ret != 0))
831 			DRM_ERROR("Out of fifo space for dummy query.\n");
832 	}
833 
834 	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
835 		if (dev_priv->pinned_bo) {
836 			vmw_bo_pin(dev_priv->pinned_bo, false);
837 			ttm_bo_unref(&dev_priv->pinned_bo);
838 		}
839 
840 		if (!sw_context->needs_post_query_barrier) {
841 			vmw_bo_pin(sw_context->cur_query_bo, true);
842 
843 			/*
844 			 * We pin also the dummy_query_bo buffer so that we
845 			 * don't need to validate it when emitting
846 			 * dummy queries in context destroy paths.
847 			 */
848 
849 			vmw_bo_pin(dev_priv->dummy_query_bo, true);
850 			dev_priv->dummy_query_bo_pinned = true;
851 
852 			BUG_ON(sw_context->last_query_ctx == NULL);
853 			dev_priv->query_cid = sw_context->last_query_ctx->id;
854 			dev_priv->query_cid_valid = true;
855 			dev_priv->pinned_bo =
856 				ttm_bo_reference(sw_context->cur_query_bo);
857 		}
858 	}
859 }
860 
861 /**
862  * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
863  * handle to a MOB id.
864  *
865  * @dev_priv: Pointer to a device private structure.
866  * @sw_context: The software context used for this command batch validation.
867  * @id: Pointer to the user-space handle to be translated.
868  * @vmw_bo_p: Points to a location that, on successful return will carry
869  * a reference-counted pointer to the DMA buffer identified by the
870  * user-space handle in @id.
871  *
872  * This function saves information needed to translate a user-space buffer
873  * handle to a MOB id. The translation does not take place immediately, but
874  * during a call to vmw_apply_relocations(). This function builds a relocation
875  * list and a list of buffers to validate. The former needs to be freed using
876  * either vmw_apply_relocations() or vmw_free_relocations(). The latter
877  * needs to be freed using vmw_clear_validations.
878  */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_dma_buffer ** vmw_bo_p)879 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
880 				 struct vmw_sw_context *sw_context,
881 				 SVGAMobId *id,
882 				 struct vmw_dma_buffer **vmw_bo_p)
883 {
884 	struct vmw_dma_buffer *vmw_bo = NULL;
885 	struct ttm_buffer_object *bo;
886 	uint32_t handle = *id;
887 	struct vmw_relocation *reloc;
888 	int ret;
889 
890 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
891 				     NULL);
892 	if (unlikely(ret != 0)) {
893 		DRM_ERROR("Could not find or use MOB buffer.\n");
894 		ret = -EINVAL;
895 		goto out_no_reloc;
896 	}
897 	bo = &vmw_bo->base;
898 
899 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
900 		DRM_ERROR("Max number relocations per submission"
901 			  " exceeded\n");
902 		ret = -EINVAL;
903 		goto out_no_reloc;
904 	}
905 
906 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
907 	reloc->mob_loc = id;
908 	reloc->location = NULL;
909 
910 	ret = vmw_bo_to_validate_list(sw_context, bo, true, &reloc->index);
911 	if (unlikely(ret != 0))
912 		goto out_no_reloc;
913 
914 	*vmw_bo_p = vmw_bo;
915 	return 0;
916 
917 out_no_reloc:
918 	vmw_dmabuf_unreference(&vmw_bo);
919 	*vmw_bo_p = NULL;
920 	return ret;
921 }
922 
923 /**
924  * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
925  * handle to a valid SVGAGuestPtr
926  *
927  * @dev_priv: Pointer to a device private structure.
928  * @sw_context: The software context used for this command batch validation.
929  * @ptr: Pointer to the user-space handle to be translated.
930  * @vmw_bo_p: Points to a location that, on successful return will carry
931  * a reference-counted pointer to the DMA buffer identified by the
932  * user-space handle in @id.
933  *
934  * This function saves information needed to translate a user-space buffer
935  * handle to a valid SVGAGuestPtr. The translation does not take place
936  * immediately, but during a call to vmw_apply_relocations().
937  * This function builds a relocation list and a list of buffers to validate.
938  * The former needs to be freed using either vmw_apply_relocations() or
939  * vmw_free_relocations(). The latter needs to be freed using
940  * vmw_clear_validations.
941  */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_dma_buffer ** vmw_bo_p)942 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
943 				   struct vmw_sw_context *sw_context,
944 				   SVGAGuestPtr *ptr,
945 				   struct vmw_dma_buffer **vmw_bo_p)
946 {
947 	struct vmw_dma_buffer *vmw_bo = NULL;
948 	struct ttm_buffer_object *bo;
949 	uint32_t handle = ptr->gmrId;
950 	struct vmw_relocation *reloc;
951 	int ret;
952 
953 	ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
954 				     NULL);
955 	if (unlikely(ret != 0)) {
956 		DRM_ERROR("Could not find or use GMR region.\n");
957 		ret = -EINVAL;
958 		goto out_no_reloc;
959 	}
960 	bo = &vmw_bo->base;
961 
962 	if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
963 		DRM_ERROR("Max number relocations per submission"
964 			  " exceeded\n");
965 		ret = -EINVAL;
966 		goto out_no_reloc;
967 	}
968 
969 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
970 	reloc->location = ptr;
971 
972 	ret = vmw_bo_to_validate_list(sw_context, bo, false, &reloc->index);
973 	if (unlikely(ret != 0))
974 		goto out_no_reloc;
975 
976 	*vmw_bo_p = vmw_bo;
977 	return 0;
978 
979 out_no_reloc:
980 	vmw_dmabuf_unreference(&vmw_bo);
981 	*vmw_bo_p = NULL;
982 	return ret;
983 }
984 
985 /**
986  * vmw_cmd_begin_gb_query - validate a  SVGA_3D_CMD_BEGIN_GB_QUERY command.
987  *
988  * @dev_priv: Pointer to a device private struct.
989  * @sw_context: The software context used for this command submission.
990  * @header: Pointer to the command header in the command stream.
991  */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)992 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
993 				  struct vmw_sw_context *sw_context,
994 				  SVGA3dCmdHeader *header)
995 {
996 	struct vmw_begin_gb_query_cmd {
997 		SVGA3dCmdHeader header;
998 		SVGA3dCmdBeginGBQuery q;
999 	} *cmd;
1000 
1001 	cmd = container_of(header, struct vmw_begin_gb_query_cmd,
1002 			   header);
1003 
1004 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1005 				 user_context_converter, &cmd->q.cid,
1006 				 NULL);
1007 }
1008 
1009 /**
1010  * vmw_cmd_begin_query - validate a  SVGA_3D_CMD_BEGIN_QUERY command.
1011  *
1012  * @dev_priv: Pointer to a device private struct.
1013  * @sw_context: The software context used for this command submission.
1014  * @header: Pointer to the command header in the command stream.
1015  */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1016 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1017 			       struct vmw_sw_context *sw_context,
1018 			       SVGA3dCmdHeader *header)
1019 {
1020 	struct vmw_begin_query_cmd {
1021 		SVGA3dCmdHeader header;
1022 		SVGA3dCmdBeginQuery q;
1023 	} *cmd;
1024 
1025 	cmd = container_of(header, struct vmw_begin_query_cmd,
1026 			   header);
1027 
1028 	if (unlikely(dev_priv->has_mob)) {
1029 		struct {
1030 			SVGA3dCmdHeader header;
1031 			SVGA3dCmdBeginGBQuery q;
1032 		} gb_cmd;
1033 
1034 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1035 
1036 		gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1037 		gb_cmd.header.size = cmd->header.size;
1038 		gb_cmd.q.cid = cmd->q.cid;
1039 		gb_cmd.q.type = cmd->q.type;
1040 
1041 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1042 		return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1043 	}
1044 
1045 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1046 				 user_context_converter, &cmd->q.cid,
1047 				 NULL);
1048 }
1049 
1050 /**
1051  * vmw_cmd_end_gb_query - validate a  SVGA_3D_CMD_END_GB_QUERY command.
1052  *
1053  * @dev_priv: Pointer to a device private struct.
1054  * @sw_context: The software context used for this command submission.
1055  * @header: Pointer to the command header in the command stream.
1056  */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1057 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1058 				struct vmw_sw_context *sw_context,
1059 				SVGA3dCmdHeader *header)
1060 {
1061 	struct vmw_dma_buffer *vmw_bo;
1062 	struct vmw_query_cmd {
1063 		SVGA3dCmdHeader header;
1064 		SVGA3dCmdEndGBQuery q;
1065 	} *cmd;
1066 	int ret;
1067 
1068 	cmd = container_of(header, struct vmw_query_cmd, header);
1069 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1070 	if (unlikely(ret != 0))
1071 		return ret;
1072 
1073 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1074 				    &cmd->q.mobid,
1075 				    &vmw_bo);
1076 	if (unlikely(ret != 0))
1077 		return ret;
1078 
1079 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1080 
1081 	vmw_dmabuf_unreference(&vmw_bo);
1082 	return ret;
1083 }
1084 
1085 /**
1086  * vmw_cmd_end_query - validate a  SVGA_3D_CMD_END_QUERY command.
1087  *
1088  * @dev_priv: Pointer to a device private struct.
1089  * @sw_context: The software context used for this command submission.
1090  * @header: Pointer to the command header in the command stream.
1091  */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1092 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1093 			     struct vmw_sw_context *sw_context,
1094 			     SVGA3dCmdHeader *header)
1095 {
1096 	struct vmw_dma_buffer *vmw_bo;
1097 	struct vmw_query_cmd {
1098 		SVGA3dCmdHeader header;
1099 		SVGA3dCmdEndQuery q;
1100 	} *cmd;
1101 	int ret;
1102 
1103 	cmd = container_of(header, struct vmw_query_cmd, header);
1104 	if (dev_priv->has_mob) {
1105 		struct {
1106 			SVGA3dCmdHeader header;
1107 			SVGA3dCmdEndGBQuery q;
1108 		} gb_cmd;
1109 
1110 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1111 
1112 		gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1113 		gb_cmd.header.size = cmd->header.size;
1114 		gb_cmd.q.cid = cmd->q.cid;
1115 		gb_cmd.q.type = cmd->q.type;
1116 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1117 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1118 
1119 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1120 		return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1121 	}
1122 
1123 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1124 	if (unlikely(ret != 0))
1125 		return ret;
1126 
1127 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1128 				      &cmd->q.guestResult,
1129 				      &vmw_bo);
1130 	if (unlikely(ret != 0))
1131 		return ret;
1132 
1133 	ret = vmw_query_bo_switch_prepare(dev_priv, &vmw_bo->base, sw_context);
1134 
1135 	vmw_dmabuf_unreference(&vmw_bo);
1136 	return ret;
1137 }
1138 
1139 /**
1140  * vmw_cmd_wait_gb_query - validate a  SVGA_3D_CMD_WAIT_GB_QUERY command.
1141  *
1142  * @dev_priv: Pointer to a device private struct.
1143  * @sw_context: The software context used for this command submission.
1144  * @header: Pointer to the command header in the command stream.
1145  */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1146 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1147 				 struct vmw_sw_context *sw_context,
1148 				 SVGA3dCmdHeader *header)
1149 {
1150 	struct vmw_dma_buffer *vmw_bo;
1151 	struct vmw_query_cmd {
1152 		SVGA3dCmdHeader header;
1153 		SVGA3dCmdWaitForGBQuery q;
1154 	} *cmd;
1155 	int ret;
1156 
1157 	cmd = container_of(header, struct vmw_query_cmd, header);
1158 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1159 	if (unlikely(ret != 0))
1160 		return ret;
1161 
1162 	ret = vmw_translate_mob_ptr(dev_priv, sw_context,
1163 				    &cmd->q.mobid,
1164 				    &vmw_bo);
1165 	if (unlikely(ret != 0))
1166 		return ret;
1167 
1168 	vmw_dmabuf_unreference(&vmw_bo);
1169 	return 0;
1170 }
1171 
1172 /**
1173  * vmw_cmd_wait_query - validate a  SVGA_3D_CMD_WAIT_QUERY command.
1174  *
1175  * @dev_priv: Pointer to a device private struct.
1176  * @sw_context: The software context used for this command submission.
1177  * @header: Pointer to the command header in the command stream.
1178  */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1179 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1180 			      struct vmw_sw_context *sw_context,
1181 			      SVGA3dCmdHeader *header)
1182 {
1183 	struct vmw_dma_buffer *vmw_bo;
1184 	struct vmw_query_cmd {
1185 		SVGA3dCmdHeader header;
1186 		SVGA3dCmdWaitForQuery q;
1187 	} *cmd;
1188 	int ret;
1189 
1190 	cmd = container_of(header, struct vmw_query_cmd, header);
1191 	if (dev_priv->has_mob) {
1192 		struct {
1193 			SVGA3dCmdHeader header;
1194 			SVGA3dCmdWaitForGBQuery q;
1195 		} gb_cmd;
1196 
1197 		BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1198 
1199 		gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1200 		gb_cmd.header.size = cmd->header.size;
1201 		gb_cmd.q.cid = cmd->q.cid;
1202 		gb_cmd.q.type = cmd->q.type;
1203 		gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
1204 		gb_cmd.q.offset = cmd->q.guestResult.offset;
1205 
1206 		memcpy(cmd, &gb_cmd, sizeof(*cmd));
1207 		return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1208 	}
1209 
1210 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1211 	if (unlikely(ret != 0))
1212 		return ret;
1213 
1214 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1215 				      &cmd->q.guestResult,
1216 				      &vmw_bo);
1217 	if (unlikely(ret != 0))
1218 		return ret;
1219 
1220 	vmw_dmabuf_unreference(&vmw_bo);
1221 	return 0;
1222 }
1223 
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1224 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1225 		       struct vmw_sw_context *sw_context,
1226 		       SVGA3dCmdHeader *header)
1227 {
1228 	struct vmw_dma_buffer *vmw_bo = NULL;
1229 	struct vmw_surface *srf = NULL;
1230 	struct vmw_dma_cmd {
1231 		SVGA3dCmdHeader header;
1232 		SVGA3dCmdSurfaceDMA dma;
1233 	} *cmd;
1234 	int ret;
1235 	SVGA3dCmdSurfaceDMASuffix *suffix;
1236 	uint32_t bo_size;
1237 
1238 	cmd = container_of(header, struct vmw_dma_cmd, header);
1239 	suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
1240 					       header->size - sizeof(*suffix));
1241 
1242 	/* Make sure device and verifier stays in sync. */
1243 	if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1244 		DRM_ERROR("Invalid DMA suffix size.\n");
1245 		return -EINVAL;
1246 	}
1247 
1248 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1249 				      &cmd->dma.guest.ptr,
1250 				      &vmw_bo);
1251 	if (unlikely(ret != 0))
1252 		return ret;
1253 
1254 	/* Make sure DMA doesn't cross BO boundaries. */
1255 	bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
1256 	if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
1257 		DRM_ERROR("Invalid DMA offset.\n");
1258 		return -EINVAL;
1259 	}
1260 
1261 	bo_size -= cmd->dma.guest.ptr.offset;
1262 	if (unlikely(suffix->maximumOffset > bo_size))
1263 		suffix->maximumOffset = bo_size;
1264 
1265 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1266 				user_surface_converter, &cmd->dma.host.sid,
1267 				NULL);
1268 	if (unlikely(ret != 0)) {
1269 		if (unlikely(ret != -ERESTARTSYS))
1270 			DRM_ERROR("could not find surface for DMA.\n");
1271 		goto out_no_surface;
1272 	}
1273 
1274 	srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1275 
1276 	vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
1277 			     header);
1278 
1279 out_no_surface:
1280 	vmw_dmabuf_unreference(&vmw_bo);
1281 	return ret;
1282 }
1283 
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1284 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1285 			struct vmw_sw_context *sw_context,
1286 			SVGA3dCmdHeader *header)
1287 {
1288 	struct vmw_draw_cmd {
1289 		SVGA3dCmdHeader header;
1290 		SVGA3dCmdDrawPrimitives body;
1291 	} *cmd;
1292 	SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1293 		(unsigned long)header + sizeof(*cmd));
1294 	SVGA3dPrimitiveRange *range;
1295 	uint32_t i;
1296 	uint32_t maxnum;
1297 	int ret;
1298 
1299 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1300 	if (unlikely(ret != 0))
1301 		return ret;
1302 
1303 	cmd = container_of(header, struct vmw_draw_cmd, header);
1304 	maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1305 
1306 	if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1307 		DRM_ERROR("Illegal number of vertex declarations.\n");
1308 		return -EINVAL;
1309 	}
1310 
1311 	for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1312 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1313 					user_surface_converter,
1314 					&decl->array.surfaceId, NULL);
1315 		if (unlikely(ret != 0))
1316 			return ret;
1317 	}
1318 
1319 	maxnum = (header->size - sizeof(cmd->body) -
1320 		  cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1321 	if (unlikely(cmd->body.numRanges > maxnum)) {
1322 		DRM_ERROR("Illegal number of index ranges.\n");
1323 		return -EINVAL;
1324 	}
1325 
1326 	range = (SVGA3dPrimitiveRange *) decl;
1327 	for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1328 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1329 					user_surface_converter,
1330 					&range->indexArray.surfaceId, NULL);
1331 		if (unlikely(ret != 0))
1332 			return ret;
1333 	}
1334 	return 0;
1335 }
1336 
1337 
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1338 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1339 			     struct vmw_sw_context *sw_context,
1340 			     SVGA3dCmdHeader *header)
1341 {
1342 	struct vmw_tex_state_cmd {
1343 		SVGA3dCmdHeader header;
1344 		SVGA3dCmdSetTextureState state;
1345 	} *cmd;
1346 
1347 	SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1348 	  ((unsigned long) header + header->size + sizeof(header));
1349 	SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1350 		((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
1351 	struct vmw_resource_val_node *ctx_node;
1352 	struct vmw_resource_val_node *res_node;
1353 	int ret;
1354 
1355 	cmd = container_of(header, struct vmw_tex_state_cmd,
1356 			   header);
1357 
1358 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1359 				user_context_converter, &cmd->state.cid,
1360 				&ctx_node);
1361 	if (unlikely(ret != 0))
1362 		return ret;
1363 
1364 	for (; cur_state < last_state; ++cur_state) {
1365 		if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1366 			continue;
1367 
1368 		ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1369 					user_surface_converter,
1370 					&cur_state->value, &res_node);
1371 		if (unlikely(ret != 0))
1372 			return ret;
1373 
1374 		if (dev_priv->has_mob) {
1375 			struct vmw_ctx_bindinfo bi;
1376 
1377 			bi.ctx = ctx_node->res;
1378 			bi.res = res_node ? res_node->res : NULL;
1379 			bi.bt = vmw_ctx_binding_tex;
1380 			bi.i1.texture_stage = cur_state->stage;
1381 			vmw_context_binding_add(ctx_node->staged_bindings,
1382 						&bi);
1383 		}
1384 	}
1385 
1386 	return 0;
1387 }
1388 
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1389 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1390 				      struct vmw_sw_context *sw_context,
1391 				      void *buf)
1392 {
1393 	struct vmw_dma_buffer *vmw_bo;
1394 	int ret;
1395 
1396 	struct {
1397 		uint32_t header;
1398 		SVGAFifoCmdDefineGMRFB body;
1399 	} *cmd = buf;
1400 
1401 	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1402 				      &cmd->body.ptr,
1403 				      &vmw_bo);
1404 	if (unlikely(ret != 0))
1405 		return ret;
1406 
1407 	vmw_dmabuf_unreference(&vmw_bo);
1408 
1409 	return ret;
1410 }
1411 
1412 /**
1413  * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1414  *
1415  * @dev_priv: Pointer to a device private struct.
1416  * @sw_context: The software context being used for this batch.
1417  * @res_type: The resource type.
1418  * @converter: Information about user-space binding for this resource type.
1419  * @res_id: Pointer to the user-space resource handle in the command stream.
1420  * @buf_id: Pointer to the user-space backup buffer handle in the command
1421  * stream.
1422  * @backup_offset: Offset of backup into MOB.
1423  *
1424  * This function prepares for registering a switch of backup buffers
1425  * in the resource metadata just prior to unreserving.
1426  */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1427 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1428 				 struct vmw_sw_context *sw_context,
1429 				 enum vmw_res_type res_type,
1430 				 const struct vmw_user_resource_conv
1431 				 *converter,
1432 				 uint32_t *res_id,
1433 				 uint32_t *buf_id,
1434 				 unsigned long backup_offset)
1435 {
1436 	int ret;
1437 	struct vmw_dma_buffer *dma_buf;
1438 	struct vmw_resource_val_node *val_node;
1439 
1440 	ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1441 				converter, res_id, &val_node);
1442 	if (unlikely(ret != 0))
1443 		return ret;
1444 
1445 	ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf);
1446 	if (unlikely(ret != 0))
1447 		return ret;
1448 
1449 	if (val_node->first_usage)
1450 		val_node->no_buffer_needed = true;
1451 
1452 	vmw_dmabuf_unreference(&val_node->new_backup);
1453 	val_node->new_backup = dma_buf;
1454 	val_node->new_backup_offset = backup_offset;
1455 
1456 	return 0;
1457 }
1458 
1459 /**
1460  * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1461  * command
1462  *
1463  * @dev_priv: Pointer to a device private struct.
1464  * @sw_context: The software context being used for this batch.
1465  * @header: Pointer to the command header in the command stream.
1466  */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1467 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1468 				   struct vmw_sw_context *sw_context,
1469 				   SVGA3dCmdHeader *header)
1470 {
1471 	struct vmw_bind_gb_surface_cmd {
1472 		SVGA3dCmdHeader header;
1473 		SVGA3dCmdBindGBSurface body;
1474 	} *cmd;
1475 
1476 	cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
1477 
1478 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1479 				     user_surface_converter,
1480 				     &cmd->body.sid, &cmd->body.mobid,
1481 				     0);
1482 }
1483 
1484 /**
1485  * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1486  * command
1487  *
1488  * @dev_priv: Pointer to a device private struct.
1489  * @sw_context: The software context being used for this batch.
1490  * @header: Pointer to the command header in the command stream.
1491  */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1492 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1493 				   struct vmw_sw_context *sw_context,
1494 				   SVGA3dCmdHeader *header)
1495 {
1496 	struct vmw_gb_surface_cmd {
1497 		SVGA3dCmdHeader header;
1498 		SVGA3dCmdUpdateGBImage body;
1499 	} *cmd;
1500 
1501 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1502 
1503 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1504 				 user_surface_converter,
1505 				 &cmd->body.image.sid, NULL);
1506 }
1507 
1508 /**
1509  * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1510  * command
1511  *
1512  * @dev_priv: Pointer to a device private struct.
1513  * @sw_context: The software context being used for this batch.
1514  * @header: Pointer to the command header in the command stream.
1515  */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1516 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1517 				     struct vmw_sw_context *sw_context,
1518 				     SVGA3dCmdHeader *header)
1519 {
1520 	struct vmw_gb_surface_cmd {
1521 		SVGA3dCmdHeader header;
1522 		SVGA3dCmdUpdateGBSurface body;
1523 	} *cmd;
1524 
1525 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1526 
1527 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1528 				 user_surface_converter,
1529 				 &cmd->body.sid, NULL);
1530 }
1531 
1532 /**
1533  * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1534  * command
1535  *
1536  * @dev_priv: Pointer to a device private struct.
1537  * @sw_context: The software context being used for this batch.
1538  * @header: Pointer to the command header in the command stream.
1539  */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1540 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1541 				     struct vmw_sw_context *sw_context,
1542 				     SVGA3dCmdHeader *header)
1543 {
1544 	struct vmw_gb_surface_cmd {
1545 		SVGA3dCmdHeader header;
1546 		SVGA3dCmdReadbackGBImage body;
1547 	} *cmd;
1548 
1549 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1550 
1551 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1552 				 user_surface_converter,
1553 				 &cmd->body.image.sid, NULL);
1554 }
1555 
1556 /**
1557  * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1558  * command
1559  *
1560  * @dev_priv: Pointer to a device private struct.
1561  * @sw_context: The software context being used for this batch.
1562  * @header: Pointer to the command header in the command stream.
1563  */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1564 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1565 				       struct vmw_sw_context *sw_context,
1566 				       SVGA3dCmdHeader *header)
1567 {
1568 	struct vmw_gb_surface_cmd {
1569 		SVGA3dCmdHeader header;
1570 		SVGA3dCmdReadbackGBSurface body;
1571 	} *cmd;
1572 
1573 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1574 
1575 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1576 				 user_surface_converter,
1577 				 &cmd->body.sid, NULL);
1578 }
1579 
1580 /**
1581  * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1582  * command
1583  *
1584  * @dev_priv: Pointer to a device private struct.
1585  * @sw_context: The software context being used for this batch.
1586  * @header: Pointer to the command header in the command stream.
1587  */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1588 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1589 				       struct vmw_sw_context *sw_context,
1590 				       SVGA3dCmdHeader *header)
1591 {
1592 	struct vmw_gb_surface_cmd {
1593 		SVGA3dCmdHeader header;
1594 		SVGA3dCmdInvalidateGBImage body;
1595 	} *cmd;
1596 
1597 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1598 
1599 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1600 				 user_surface_converter,
1601 				 &cmd->body.image.sid, NULL);
1602 }
1603 
1604 /**
1605  * vmw_cmd_invalidate_gb_surface - Validate an
1606  * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1607  *
1608  * @dev_priv: Pointer to a device private struct.
1609  * @sw_context: The software context being used for this batch.
1610  * @header: Pointer to the command header in the command stream.
1611  */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1612 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1613 					 struct vmw_sw_context *sw_context,
1614 					 SVGA3dCmdHeader *header)
1615 {
1616 	struct vmw_gb_surface_cmd {
1617 		SVGA3dCmdHeader header;
1618 		SVGA3dCmdInvalidateGBSurface body;
1619 	} *cmd;
1620 
1621 	cmd = container_of(header, struct vmw_gb_surface_cmd, header);
1622 
1623 	return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1624 				 user_surface_converter,
1625 				 &cmd->body.sid, NULL);
1626 }
1627 
1628 
1629 /**
1630  * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1631  * command
1632  *
1633  * @dev_priv: Pointer to a device private struct.
1634  * @sw_context: The software context being used for this batch.
1635  * @header: Pointer to the command header in the command stream.
1636  */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1637 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1638 				 struct vmw_sw_context *sw_context,
1639 				 SVGA3dCmdHeader *header)
1640 {
1641 	struct vmw_shader_define_cmd {
1642 		SVGA3dCmdHeader header;
1643 		SVGA3dCmdDefineShader body;
1644 	} *cmd;
1645 	int ret;
1646 	size_t size;
1647 	struct vmw_resource_val_node *val;
1648 
1649 	cmd = container_of(header, struct vmw_shader_define_cmd,
1650 			   header);
1651 
1652 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1653 				user_context_converter, &cmd->body.cid,
1654 				&val);
1655 	if (unlikely(ret != 0))
1656 		return ret;
1657 
1658 	if (unlikely(!dev_priv->has_mob))
1659 		return 0;
1660 
1661 	size = cmd->header.size - sizeof(cmd->body);
1662 	ret = vmw_compat_shader_add(dev_priv,
1663 				    vmw_context_res_man(val->res),
1664 				    cmd->body.shid, cmd + 1,
1665 				    cmd->body.type, size,
1666 				    &sw_context->staged_cmd_res);
1667 	if (unlikely(ret != 0))
1668 		return ret;
1669 
1670 	return vmw_resource_relocation_add(&sw_context->res_relocations,
1671 					   NULL, &cmd->header.id -
1672 					   sw_context->buf_start);
1673 
1674 	return 0;
1675 }
1676 
1677 /**
1678  * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1679  * command
1680  *
1681  * @dev_priv: Pointer to a device private struct.
1682  * @sw_context: The software context being used for this batch.
1683  * @header: Pointer to the command header in the command stream.
1684  */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1685 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1686 				  struct vmw_sw_context *sw_context,
1687 				  SVGA3dCmdHeader *header)
1688 {
1689 	struct vmw_shader_destroy_cmd {
1690 		SVGA3dCmdHeader header;
1691 		SVGA3dCmdDestroyShader body;
1692 	} *cmd;
1693 	int ret;
1694 	struct vmw_resource_val_node *val;
1695 
1696 	cmd = container_of(header, struct vmw_shader_destroy_cmd,
1697 			   header);
1698 
1699 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1700 				user_context_converter, &cmd->body.cid,
1701 				&val);
1702 	if (unlikely(ret != 0))
1703 		return ret;
1704 
1705 	if (unlikely(!dev_priv->has_mob))
1706 		return 0;
1707 
1708 	ret = vmw_compat_shader_remove(vmw_context_res_man(val->res),
1709 				       cmd->body.shid,
1710 				       cmd->body.type,
1711 				       &sw_context->staged_cmd_res);
1712 	if (unlikely(ret != 0))
1713 		return ret;
1714 
1715 	return vmw_resource_relocation_add(&sw_context->res_relocations,
1716 					   NULL, &cmd->header.id -
1717 					   sw_context->buf_start);
1718 
1719 	return 0;
1720 }
1721 
1722 /**
1723  * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1724  * command
1725  *
1726  * @dev_priv: Pointer to a device private struct.
1727  * @sw_context: The software context being used for this batch.
1728  * @header: Pointer to the command header in the command stream.
1729  */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1730 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1731 			      struct vmw_sw_context *sw_context,
1732 			      SVGA3dCmdHeader *header)
1733 {
1734 	struct vmw_set_shader_cmd {
1735 		SVGA3dCmdHeader header;
1736 		SVGA3dCmdSetShader body;
1737 	} *cmd;
1738 	struct vmw_resource_val_node *ctx_node, *res_node = NULL;
1739 	struct vmw_ctx_bindinfo bi;
1740 	struct vmw_resource *res = NULL;
1741 	int ret;
1742 
1743 	cmd = container_of(header, struct vmw_set_shader_cmd,
1744 			   header);
1745 
1746 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1747 				user_context_converter, &cmd->body.cid,
1748 				&ctx_node);
1749 	if (unlikely(ret != 0))
1750 		return ret;
1751 
1752 	if (!dev_priv->has_mob)
1753 		return 0;
1754 
1755 	if (cmd->body.shid != SVGA3D_INVALID_ID) {
1756 		res = vmw_compat_shader_lookup
1757 			(vmw_context_res_man(ctx_node->res),
1758 			 cmd->body.shid,
1759 			 cmd->body.type);
1760 
1761 		if (!IS_ERR(res)) {
1762 			ret = vmw_cmd_res_reloc_add(dev_priv, sw_context,
1763 						    vmw_res_shader,
1764 						    &cmd->body.shid, res,
1765 						    &res_node);
1766 			vmw_resource_unreference(&res);
1767 			if (unlikely(ret != 0))
1768 				return ret;
1769 		}
1770 	}
1771 
1772 	if (!res_node) {
1773 		ret = vmw_cmd_res_check(dev_priv, sw_context,
1774 					vmw_res_shader,
1775 					user_shader_converter,
1776 					&cmd->body.shid, &res_node);
1777 		if (unlikely(ret != 0))
1778 			return ret;
1779 	}
1780 
1781 	bi.ctx = ctx_node->res;
1782 	bi.res = res_node ? res_node->res : NULL;
1783 	bi.bt = vmw_ctx_binding_shader;
1784 	bi.i1.shader_type = cmd->body.type;
1785 	return vmw_context_binding_add(ctx_node->staged_bindings, &bi);
1786 }
1787 
1788 /**
1789  * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1790  * command
1791  *
1792  * @dev_priv: Pointer to a device private struct.
1793  * @sw_context: The software context being used for this batch.
1794  * @header: Pointer to the command header in the command stream.
1795  */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1796 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
1797 				    struct vmw_sw_context *sw_context,
1798 				    SVGA3dCmdHeader *header)
1799 {
1800 	struct vmw_set_shader_const_cmd {
1801 		SVGA3dCmdHeader header;
1802 		SVGA3dCmdSetShaderConst body;
1803 	} *cmd;
1804 	int ret;
1805 
1806 	cmd = container_of(header, struct vmw_set_shader_const_cmd,
1807 			   header);
1808 
1809 	ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1810 				user_context_converter, &cmd->body.cid,
1811 				NULL);
1812 	if (unlikely(ret != 0))
1813 		return ret;
1814 
1815 	if (dev_priv->has_mob)
1816 		header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
1817 
1818 	return 0;
1819 }
1820 
1821 /**
1822  * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1823  * command
1824  *
1825  * @dev_priv: Pointer to a device private struct.
1826  * @sw_context: The software context being used for this batch.
1827  * @header: Pointer to the command header in the command stream.
1828  */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1829 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
1830 				  struct vmw_sw_context *sw_context,
1831 				  SVGA3dCmdHeader *header)
1832 {
1833 	struct vmw_bind_gb_shader_cmd {
1834 		SVGA3dCmdHeader header;
1835 		SVGA3dCmdBindGBShader body;
1836 	} *cmd;
1837 
1838 	cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
1839 			   header);
1840 
1841 	return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
1842 				     user_shader_converter,
1843 				     &cmd->body.shid, &cmd->body.mobid,
1844 				     cmd->body.offsetInBytes);
1845 }
1846 
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)1847 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
1848 				struct vmw_sw_context *sw_context,
1849 				void *buf, uint32_t *size)
1850 {
1851 	uint32_t size_remaining = *size;
1852 	uint32_t cmd_id;
1853 
1854 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
1855 	switch (cmd_id) {
1856 	case SVGA_CMD_UPDATE:
1857 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
1858 		break;
1859 	case SVGA_CMD_DEFINE_GMRFB:
1860 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
1861 		break;
1862 	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
1863 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1864 		break;
1865 	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
1866 		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
1867 		break;
1868 	default:
1869 		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
1870 		return -EINVAL;
1871 	}
1872 
1873 	if (*size > size_remaining) {
1874 		DRM_ERROR("Invalid SVGA command (size mismatch):"
1875 			  " %u.\n", cmd_id);
1876 		return -EINVAL;
1877 	}
1878 
1879 	if (unlikely(!sw_context->kernel)) {
1880 		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
1881 		return -EPERM;
1882 	}
1883 
1884 	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
1885 		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
1886 
1887 	return 0;
1888 }
1889 
1890 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
1891 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
1892 		    false, false, false),
1893 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
1894 		    false, false, false),
1895 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
1896 		    true, false, false),
1897 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
1898 		    true, false, false),
1899 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
1900 		    true, false, false),
1901 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
1902 		    false, false, false),
1903 	VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
1904 		    false, false, false),
1905 	VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
1906 		    true, false, false),
1907 	VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
1908 		    true, false, false),
1909 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
1910 		    true, false, false),
1911 	VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
1912 		    &vmw_cmd_set_render_target_check, true, false, false),
1913 	VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
1914 		    true, false, false),
1915 	VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
1916 		    true, false, false),
1917 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
1918 		    true, false, false),
1919 	VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
1920 		    true, false, false),
1921 	VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
1922 		    true, false, false),
1923 	VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
1924 		    true, false, false),
1925 	VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
1926 		    true, false, false),
1927 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
1928 		    false, false, false),
1929 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
1930 		    true, false, false),
1931 	VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
1932 		    true, false, false),
1933 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
1934 		    true, false, false),
1935 	VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
1936 		    true, false, false),
1937 	VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
1938 		    true, false, false),
1939 	VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
1940 		    true, false, false),
1941 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
1942 		    true, false, false),
1943 	VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
1944 		    true, false, false),
1945 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
1946 		    true, false, false),
1947 	VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
1948 		    true, false, false),
1949 	VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
1950 		    &vmw_cmd_blt_surf_screen_check, false, false, false),
1951 	VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
1952 		    false, false, false),
1953 	VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
1954 		    false, false, false),
1955 	VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
1956 		    false, false, false),
1957 	VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
1958 		    false, false, false),
1959 	VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
1960 		    false, false, false),
1961 	VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE, &vmw_cmd_invalid,
1962 		    false, false, false),
1963 	VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE, &vmw_cmd_invalid,
1964 		    false, false, false),
1965 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT, &vmw_cmd_invalid,
1966 		    false, false, false),
1967 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT, &vmw_cmd_invalid,
1968 		    false, false, false),
1969 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT, &vmw_cmd_invalid,
1970 		    false, false, false),
1971 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL, &vmw_cmd_invalid,
1972 		    false, false, false),
1973 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND, &vmw_cmd_invalid,
1974 		    false, false, false),
1975 	VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND, &vmw_cmd_invalid,
1976 		    false, false, false),
1977 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
1978 		    false, false, true),
1979 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
1980 		    false, false, true),
1981 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
1982 		    false, false, true),
1983 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
1984 		    false, false, true),
1985 	VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB, &vmw_cmd_invalid,
1986 		    false, false, true),
1987 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
1988 		    false, false, true),
1989 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
1990 		    false, false, true),
1991 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
1992 		    false, false, true),
1993 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
1994 		    true, false, true),
1995 	VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
1996 		    false, false, true),
1997 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
1998 		    true, false, true),
1999 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
2000 		    &vmw_cmd_update_gb_surface, true, false, true),
2001 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
2002 		    &vmw_cmd_readback_gb_image, true, false, true),
2003 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
2004 		    &vmw_cmd_readback_gb_surface, true, false, true),
2005 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
2006 		    &vmw_cmd_invalidate_gb_image, true, false, true),
2007 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
2008 		    &vmw_cmd_invalidate_gb_surface, true, false, true),
2009 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
2010 		    false, false, true),
2011 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
2012 		    false, false, true),
2013 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
2014 		    false, false, true),
2015 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
2016 		    false, false, true),
2017 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
2018 		    false, false, true),
2019 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
2020 		    false, false, true),
2021 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
2022 		    true, false, true),
2023 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
2024 		    false, false, true),
2025 	VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
2026 		    false, false, false),
2027 	VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
2028 		    true, false, true),
2029 	VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
2030 		    true, false, true),
2031 	VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
2032 		    true, false, true),
2033 	VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
2034 		    true, false, true),
2035 	VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
2036 		    false, false, true),
2037 	VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
2038 		    false, false, true),
2039 	VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
2040 		    false, false, true),
2041 	VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
2042 		    false, false, true),
2043 	VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
2044 		    false, false, true),
2045 	VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
2046 		    false, false, true),
2047 	VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
2048 		    false, false, true),
2049 	VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
2050 		    false, false, true),
2051 	VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2052 		    false, false, true),
2053 	VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
2054 		    false, false, true),
2055 	VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
2056 		    true, false, true)
2057 };
2058 
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)2059 static int vmw_cmd_check(struct vmw_private *dev_priv,
2060 			 struct vmw_sw_context *sw_context,
2061 			 void *buf, uint32_t *size)
2062 {
2063 	uint32_t cmd_id;
2064 	uint32_t size_remaining = *size;
2065 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
2066 	int ret;
2067 	const struct vmw_cmd_entry *entry;
2068 	bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
2069 
2070 	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
2071 	/* Handle any none 3D commands */
2072 	if (unlikely(cmd_id < SVGA_CMD_MAX))
2073 		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
2074 
2075 
2076 	cmd_id = le32_to_cpu(header->id);
2077 	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
2078 
2079 	cmd_id -= SVGA_3D_CMD_BASE;
2080 	if (unlikely(*size > size_remaining))
2081 		goto out_invalid;
2082 
2083 	if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
2084 		goto out_invalid;
2085 
2086 	entry = &vmw_cmd_entries[cmd_id];
2087 	if (unlikely(!entry->func))
2088 		goto out_invalid;
2089 
2090 	if (unlikely(!entry->user_allow && !sw_context->kernel))
2091 		goto out_privileged;
2092 
2093 	if (unlikely(entry->gb_disable && gb))
2094 		goto out_old;
2095 
2096 	if (unlikely(entry->gb_enable && !gb))
2097 		goto out_new;
2098 
2099 	ret = entry->func(dev_priv, sw_context, header);
2100 	if (unlikely(ret != 0))
2101 		goto out_invalid;
2102 
2103 	return 0;
2104 out_invalid:
2105 	DRM_ERROR("Invalid SVGA3D command: %d\n",
2106 		  cmd_id + SVGA_3D_CMD_BASE);
2107 	return -EINVAL;
2108 out_privileged:
2109 	DRM_ERROR("Privileged SVGA3D command: %d\n",
2110 		  cmd_id + SVGA_3D_CMD_BASE);
2111 	return -EPERM;
2112 out_old:
2113 	DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2114 		  cmd_id + SVGA_3D_CMD_BASE);
2115 	return -EINVAL;
2116 out_new:
2117 	DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2118 		  cmd_id + SVGA_3D_CMD_BASE);
2119 	return -EINVAL;
2120 }
2121 
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)2122 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
2123 			     struct vmw_sw_context *sw_context,
2124 			     void *buf,
2125 			     uint32_t size)
2126 {
2127 	int32_t cur_size = size;
2128 	int ret;
2129 
2130 	sw_context->buf_start = buf;
2131 
2132 	while (cur_size > 0) {
2133 		size = cur_size;
2134 		ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
2135 		if (unlikely(ret != 0))
2136 			return ret;
2137 		buf = (void *)((unsigned long) buf + size);
2138 		cur_size -= size;
2139 	}
2140 
2141 	if (unlikely(cur_size != 0)) {
2142 		DRM_ERROR("Command verifier out of sync.\n");
2143 		return -EINVAL;
2144 	}
2145 
2146 	return 0;
2147 }
2148 
vmw_free_relocations(struct vmw_sw_context * sw_context)2149 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
2150 {
2151 	sw_context->cur_reloc = 0;
2152 }
2153 
vmw_apply_relocations(struct vmw_sw_context * sw_context)2154 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
2155 {
2156 	uint32_t i;
2157 	struct vmw_relocation *reloc;
2158 	struct ttm_validate_buffer *validate;
2159 	struct ttm_buffer_object *bo;
2160 
2161 	for (i = 0; i < sw_context->cur_reloc; ++i) {
2162 		reloc = &sw_context->relocs[i];
2163 		validate = &sw_context->val_bufs[reloc->index].base;
2164 		bo = validate->bo;
2165 		switch (bo->mem.mem_type) {
2166 		case TTM_PL_VRAM:
2167 			reloc->location->offset += bo->offset;
2168 			reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
2169 			break;
2170 		case VMW_PL_GMR:
2171 			reloc->location->gmrId = bo->mem.start;
2172 			break;
2173 		case VMW_PL_MOB:
2174 			*reloc->mob_loc = bo->mem.start;
2175 			break;
2176 		default:
2177 			BUG();
2178 		}
2179 	}
2180 	vmw_free_relocations(sw_context);
2181 }
2182 
2183 /**
2184  * vmw_resource_list_unrefererence - Free up a resource list and unreference
2185  * all resources referenced by it.
2186  *
2187  * @list: The resource list.
2188  */
vmw_resource_list_unreference(struct list_head * list)2189 static void vmw_resource_list_unreference(struct list_head *list)
2190 {
2191 	struct vmw_resource_val_node *val, *val_next;
2192 
2193 	/*
2194 	 * Drop references to resources held during command submission.
2195 	 */
2196 
2197 	list_for_each_entry_safe(val, val_next, list, head) {
2198 		list_del_init(&val->head);
2199 		vmw_resource_unreference(&val->res);
2200 		if (unlikely(val->staged_bindings))
2201 			kfree(val->staged_bindings);
2202 		kfree(val);
2203 	}
2204 }
2205 
vmw_clear_validations(struct vmw_sw_context * sw_context)2206 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
2207 {
2208 	struct vmw_validate_buffer *entry, *next;
2209 	struct vmw_resource_val_node *val;
2210 
2211 	/*
2212 	 * Drop references to DMA buffers held during command submission.
2213 	 */
2214 	list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
2215 				 base.head) {
2216 		list_del(&entry->base.head);
2217 		ttm_bo_unref(&entry->base.bo);
2218 		(void) drm_ht_remove_item(&sw_context->res_ht, &entry->hash);
2219 		sw_context->cur_val_buf--;
2220 	}
2221 	BUG_ON(sw_context->cur_val_buf != 0);
2222 
2223 	list_for_each_entry(val, &sw_context->resource_list, head)
2224 		(void) drm_ht_remove_item(&sw_context->res_ht, &val->hash);
2225 }
2226 
vmw_validate_single_buffer(struct vmw_private * dev_priv,struct ttm_buffer_object * bo,bool validate_as_mob)2227 static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
2228 				      struct ttm_buffer_object *bo,
2229 				      bool validate_as_mob)
2230 {
2231 	int ret;
2232 
2233 
2234 	/*
2235 	 * Don't validate pinned buffers.
2236 	 */
2237 
2238 	if (bo == dev_priv->pinned_bo ||
2239 	    (bo == dev_priv->dummy_query_bo &&
2240 	     dev_priv->dummy_query_bo_pinned))
2241 		return 0;
2242 
2243 	if (validate_as_mob)
2244 		return ttm_bo_validate(bo, &vmw_mob_placement, true, false);
2245 
2246 	/**
2247 	 * Put BO in VRAM if there is space, otherwise as a GMR.
2248 	 * If there is no space in VRAM and GMR ids are all used up,
2249 	 * start evicting GMRs to make room. If the DMA buffer can't be
2250 	 * used as a GMR, this will return -ENOMEM.
2251 	 */
2252 
2253 	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false);
2254 	if (likely(ret == 0 || ret == -ERESTARTSYS))
2255 		return ret;
2256 
2257 	/**
2258 	 * If that failed, try VRAM again, this time evicting
2259 	 * previous contents.
2260 	 */
2261 
2262 	DRM_INFO("Falling through to VRAM.\n");
2263 	ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
2264 	return ret;
2265 }
2266 
vmw_validate_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)2267 static int vmw_validate_buffers(struct vmw_private *dev_priv,
2268 				struct vmw_sw_context *sw_context)
2269 {
2270 	struct vmw_validate_buffer *entry;
2271 	int ret;
2272 
2273 	list_for_each_entry(entry, &sw_context->validate_nodes, base.head) {
2274 		ret = vmw_validate_single_buffer(dev_priv, entry->base.bo,
2275 						 entry->validate_as_mob);
2276 		if (unlikely(ret != 0))
2277 			return ret;
2278 	}
2279 	return 0;
2280 }
2281 
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)2282 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
2283 				 uint32_t size)
2284 {
2285 	if (likely(sw_context->cmd_bounce_size >= size))
2286 		return 0;
2287 
2288 	if (sw_context->cmd_bounce_size == 0)
2289 		sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
2290 
2291 	while (sw_context->cmd_bounce_size < size) {
2292 		sw_context->cmd_bounce_size =
2293 			PAGE_ALIGN(sw_context->cmd_bounce_size +
2294 				   (sw_context->cmd_bounce_size >> 1));
2295 	}
2296 
2297 	if (sw_context->cmd_bounce != NULL)
2298 		vfree(sw_context->cmd_bounce);
2299 
2300 	sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
2301 
2302 	if (sw_context->cmd_bounce == NULL) {
2303 		DRM_ERROR("Failed to allocate command bounce buffer.\n");
2304 		sw_context->cmd_bounce_size = 0;
2305 		return -ENOMEM;
2306 	}
2307 
2308 	return 0;
2309 }
2310 
2311 /**
2312  * vmw_execbuf_fence_commands - create and submit a command stream fence
2313  *
2314  * Creates a fence object and submits a command stream marker.
2315  * If this fails for some reason, We sync the fifo and return NULL.
2316  * It is then safe to fence buffers with a NULL pointer.
2317  *
2318  * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2319  * a userspace handle if @p_handle is not NULL, otherwise not.
2320  */
2321 
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)2322 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
2323 			       struct vmw_private *dev_priv,
2324 			       struct vmw_fence_obj **p_fence,
2325 			       uint32_t *p_handle)
2326 {
2327 	uint32_t sequence;
2328 	int ret;
2329 	bool synced = false;
2330 
2331 	/* p_handle implies file_priv. */
2332 	BUG_ON(p_handle != NULL && file_priv == NULL);
2333 
2334 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
2335 	if (unlikely(ret != 0)) {
2336 		DRM_ERROR("Fence submission error. Syncing.\n");
2337 		synced = true;
2338 	}
2339 
2340 	if (p_handle != NULL)
2341 		ret = vmw_user_fence_create(file_priv, dev_priv->fman,
2342 					    sequence, p_fence, p_handle);
2343 	else
2344 		ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
2345 
2346 	if (unlikely(ret != 0 && !synced)) {
2347 		(void) vmw_fallback_wait(dev_priv, false, false,
2348 					 sequence, false,
2349 					 VMW_FENCE_WAIT_TIMEOUT);
2350 		*p_fence = NULL;
2351 	}
2352 
2353 	return 0;
2354 }
2355 
2356 /**
2357  * vmw_execbuf_copy_fence_user - copy fence object information to
2358  * user-space.
2359  *
2360  * @dev_priv: Pointer to a vmw_private struct.
2361  * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2362  * @ret: Return value from fence object creation.
2363  * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2364  * which the information should be copied.
2365  * @fence: Pointer to the fenc object.
2366  * @fence_handle: User-space fence handle.
2367  *
2368  * This function copies fence information to user-space. If copying fails,
2369  * The user-space struct drm_vmw_fence_rep::error member is hopefully
2370  * left untouched, and if it's preloaded with an -EFAULT by user-space,
2371  * the error will hopefully be detected.
2372  * Also if copying fails, user-space will be unable to signal the fence
2373  * object so we wait for it immediately, and then unreference the
2374  * user-space reference.
2375  */
2376 void
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle)2377 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
2378 			    struct vmw_fpriv *vmw_fp,
2379 			    int ret,
2380 			    struct drm_vmw_fence_rep __user *user_fence_rep,
2381 			    struct vmw_fence_obj *fence,
2382 			    uint32_t fence_handle)
2383 {
2384 	struct drm_vmw_fence_rep fence_rep;
2385 
2386 	if (user_fence_rep == NULL)
2387 		return;
2388 
2389 	memset(&fence_rep, 0, sizeof(fence_rep));
2390 
2391 	fence_rep.error = ret;
2392 	if (ret == 0) {
2393 		BUG_ON(fence == NULL);
2394 
2395 		fence_rep.handle = fence_handle;
2396 		fence_rep.seqno = fence->base.seqno;
2397 		vmw_update_seqno(dev_priv, &dev_priv->fifo);
2398 		fence_rep.passed_seqno = dev_priv->last_read_seqno;
2399 	}
2400 
2401 	/*
2402 	 * copy_to_user errors will be detected by user space not
2403 	 * seeing fence_rep::error filled in. Typically
2404 	 * user-space would have pre-set that member to -EFAULT.
2405 	 */
2406 	ret = copy_to_user(user_fence_rep, &fence_rep,
2407 			   sizeof(fence_rep));
2408 
2409 	/*
2410 	 * User-space lost the fence object. We need to sync
2411 	 * and unreference the handle.
2412 	 */
2413 	if (unlikely(ret != 0) && (fence_rep.error == 0)) {
2414 		ttm_ref_object_base_unref(vmw_fp->tfile,
2415 					  fence_handle, TTM_REF_USAGE);
2416 		DRM_ERROR("Fence copy error. Syncing.\n");
2417 		(void) vmw_fence_obj_wait(fence, false, false,
2418 					  VMW_FENCE_WAIT_TIMEOUT);
2419 	}
2420 }
2421 
2422 
2423 
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence)2424 int vmw_execbuf_process(struct drm_file *file_priv,
2425 			struct vmw_private *dev_priv,
2426 			void __user *user_commands,
2427 			void *kernel_commands,
2428 			uint32_t command_size,
2429 			uint64_t throttle_us,
2430 			struct drm_vmw_fence_rep __user *user_fence_rep,
2431 			struct vmw_fence_obj **out_fence)
2432 {
2433 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
2434 	struct vmw_fence_obj *fence = NULL;
2435 	struct vmw_resource *error_resource;
2436 	struct list_head resource_list;
2437 	struct ww_acquire_ctx ticket;
2438 	uint32_t handle;
2439 	void *cmd;
2440 	int ret;
2441 
2442 	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
2443 	if (unlikely(ret != 0))
2444 		return -ERESTARTSYS;
2445 
2446 	if (kernel_commands == NULL) {
2447 		sw_context->kernel = false;
2448 
2449 		ret = vmw_resize_cmd_bounce(sw_context, command_size);
2450 		if (unlikely(ret != 0))
2451 			goto out_unlock;
2452 
2453 
2454 		ret = copy_from_user(sw_context->cmd_bounce,
2455 				     user_commands, command_size);
2456 
2457 		if (unlikely(ret != 0)) {
2458 			ret = -EFAULT;
2459 			DRM_ERROR("Failed copying commands.\n");
2460 			goto out_unlock;
2461 		}
2462 		kernel_commands = sw_context->cmd_bounce;
2463 	} else
2464 		sw_context->kernel = true;
2465 
2466 	sw_context->fp = vmw_fpriv(file_priv);
2467 	sw_context->cur_reloc = 0;
2468 	sw_context->cur_val_buf = 0;
2469 	INIT_LIST_HEAD(&sw_context->resource_list);
2470 	sw_context->cur_query_bo = dev_priv->pinned_bo;
2471 	sw_context->last_query_ctx = NULL;
2472 	sw_context->needs_post_query_barrier = false;
2473 	memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
2474 	INIT_LIST_HEAD(&sw_context->validate_nodes);
2475 	INIT_LIST_HEAD(&sw_context->res_relocations);
2476 	if (!sw_context->res_ht_initialized) {
2477 		ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
2478 		if (unlikely(ret != 0))
2479 			goto out_unlock;
2480 		sw_context->res_ht_initialized = true;
2481 	}
2482 	INIT_LIST_HEAD(&sw_context->staged_cmd_res);
2483 
2484 	INIT_LIST_HEAD(&resource_list);
2485 	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
2486 				command_size);
2487 	if (unlikely(ret != 0))
2488 		goto out_err_nores;
2489 
2490 	ret = vmw_resources_reserve(sw_context);
2491 	if (unlikely(ret != 0))
2492 		goto out_err_nores;
2493 
2494 	ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2495 				     true, NULL);
2496 	if (unlikely(ret != 0))
2497 		goto out_err_nores;
2498 
2499 	ret = vmw_validate_buffers(dev_priv, sw_context);
2500 	if (unlikely(ret != 0))
2501 		goto out_err;
2502 
2503 	ret = vmw_resources_validate(sw_context);
2504 	if (unlikely(ret != 0))
2505 		goto out_err;
2506 
2507 	if (throttle_us) {
2508 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
2509 				   throttle_us);
2510 
2511 		if (unlikely(ret != 0))
2512 			goto out_err;
2513 	}
2514 
2515 	ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
2516 	if (unlikely(ret != 0)) {
2517 		ret = -ERESTARTSYS;
2518 		goto out_err;
2519 	}
2520 
2521 	if (dev_priv->has_mob) {
2522 		ret = vmw_rebind_contexts(sw_context);
2523 		if (unlikely(ret != 0))
2524 			goto out_unlock_binding;
2525 	}
2526 
2527 	cmd = vmw_fifo_reserve(dev_priv, command_size);
2528 	if (unlikely(cmd == NULL)) {
2529 		DRM_ERROR("Failed reserving fifo space for commands.\n");
2530 		ret = -ENOMEM;
2531 		goto out_unlock_binding;
2532 	}
2533 
2534 	vmw_apply_relocations(sw_context);
2535 	memcpy(cmd, kernel_commands, command_size);
2536 
2537 	vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
2538 	vmw_resource_relocations_free(&sw_context->res_relocations);
2539 
2540 	vmw_fifo_commit(dev_priv, command_size);
2541 	mutex_unlock(&dev_priv->binding_mutex);
2542 
2543 	vmw_query_bo_switch_commit(dev_priv, sw_context);
2544 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
2545 					 &fence,
2546 					 (user_fence_rep) ? &handle : NULL);
2547 	/*
2548 	 * This error is harmless, because if fence submission fails,
2549 	 * vmw_fifo_send_fence will sync. The error will be propagated to
2550 	 * user-space in @fence_rep
2551 	 */
2552 
2553 	if (ret != 0)
2554 		DRM_ERROR("Fence submission error. Syncing.\n");
2555 
2556 	vmw_resource_list_unreserve(&sw_context->resource_list, false);
2557 
2558 	ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2559 				    (void *) fence);
2560 
2561 	if (unlikely(dev_priv->pinned_bo != NULL &&
2562 		     !dev_priv->query_cid_valid))
2563 		__vmw_execbuf_release_pinned_bo(dev_priv, fence);
2564 
2565 	vmw_clear_validations(sw_context);
2566 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
2567 				    user_fence_rep, fence, handle);
2568 
2569 	/* Don't unreference when handing fence out */
2570 	if (unlikely(out_fence != NULL)) {
2571 		*out_fence = fence;
2572 		fence = NULL;
2573 	} else if (likely(fence != NULL)) {
2574 		vmw_fence_obj_unreference(&fence);
2575 	}
2576 
2577 	list_splice_init(&sw_context->resource_list, &resource_list);
2578 	vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
2579 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2580 
2581 	/*
2582 	 * Unreference resources outside of the cmdbuf_mutex to
2583 	 * avoid deadlocks in resource destruction paths.
2584 	 */
2585 	vmw_resource_list_unreference(&resource_list);
2586 
2587 	return 0;
2588 
2589 out_unlock_binding:
2590 	mutex_unlock(&dev_priv->binding_mutex);
2591 out_err:
2592 	ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
2593 out_err_nores:
2594 	vmw_resource_list_unreserve(&sw_context->resource_list, true);
2595 	vmw_resource_relocations_free(&sw_context->res_relocations);
2596 	vmw_free_relocations(sw_context);
2597 	vmw_clear_validations(sw_context);
2598 	if (unlikely(dev_priv->pinned_bo != NULL &&
2599 		     !dev_priv->query_cid_valid))
2600 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2601 out_unlock:
2602 	list_splice_init(&sw_context->resource_list, &resource_list);
2603 	error_resource = sw_context->error_resource;
2604 	sw_context->error_resource = NULL;
2605 	vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
2606 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2607 
2608 	/*
2609 	 * Unreference resources outside of the cmdbuf_mutex to
2610 	 * avoid deadlocks in resource destruction paths.
2611 	 */
2612 	vmw_resource_list_unreference(&resource_list);
2613 	if (unlikely(error_resource != NULL))
2614 		vmw_resource_unreference(&error_resource);
2615 
2616 	return ret;
2617 }
2618 
2619 /**
2620  * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2621  *
2622  * @dev_priv: The device private structure.
2623  *
2624  * This function is called to idle the fifo and unpin the query buffer
2625  * if the normal way to do this hits an error, which should typically be
2626  * extremely rare.
2627  */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)2628 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
2629 {
2630 	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2631 
2632 	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
2633 	vmw_bo_pin(dev_priv->pinned_bo, false);
2634 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2635 	dev_priv->dummy_query_bo_pinned = false;
2636 }
2637 
2638 
2639 /**
2640  * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2641  * query bo.
2642  *
2643  * @dev_priv: The device private structure.
2644  * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2645  * _after_ a query barrier that flushes all queries touching the current
2646  * buffer pointed to by @dev_priv->pinned_bo
2647  *
2648  * This function should be used to unpin the pinned query bo, or
2649  * as a query barrier when we need to make sure that all queries have
2650  * finished before the next fifo command. (For example on hardware
2651  * context destructions where the hardware may otherwise leak unfinished
2652  * queries).
2653  *
2654  * This function does not return any failure codes, but make attempts
2655  * to do safe unpinning in case of errors.
2656  *
2657  * The function will synchronize on the previous query barrier, and will
2658  * thus not finish until that barrier has executed.
2659  *
2660  * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2661  * before calling this function.
2662  */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)2663 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2664 				     struct vmw_fence_obj *fence)
2665 {
2666 	int ret = 0;
2667 	struct list_head validate_list;
2668 	struct ttm_validate_buffer pinned_val, query_val;
2669 	struct vmw_fence_obj *lfence = NULL;
2670 	struct ww_acquire_ctx ticket;
2671 
2672 	if (dev_priv->pinned_bo == NULL)
2673 		goto out_unlock;
2674 
2675 	INIT_LIST_HEAD(&validate_list);
2676 
2677 	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
2678 	pinned_val.shared = false;
2679 	list_add_tail(&pinned_val.head, &validate_list);
2680 
2681 	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
2682 	query_val.shared = false;
2683 	list_add_tail(&query_val.head, &validate_list);
2684 
2685 	ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2686 				     false, NULL);
2687 	if (unlikely(ret != 0)) {
2688 		vmw_execbuf_unpin_panic(dev_priv);
2689 		goto out_no_reserve;
2690 	}
2691 
2692 	if (dev_priv->query_cid_valid) {
2693 		BUG_ON(fence != NULL);
2694 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
2695 		if (unlikely(ret != 0)) {
2696 			vmw_execbuf_unpin_panic(dev_priv);
2697 			goto out_no_emit;
2698 		}
2699 		dev_priv->query_cid_valid = false;
2700 	}
2701 
2702 	vmw_bo_pin(dev_priv->pinned_bo, false);
2703 	vmw_bo_pin(dev_priv->dummy_query_bo, false);
2704 	dev_priv->dummy_query_bo_pinned = false;
2705 
2706 	if (fence == NULL) {
2707 		(void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
2708 						  NULL);
2709 		fence = lfence;
2710 	}
2711 	ttm_eu_fence_buffer_objects(&ticket, &validate_list, (void *) fence);
2712 	if (lfence != NULL)
2713 		vmw_fence_obj_unreference(&lfence);
2714 
2715 	ttm_bo_unref(&query_val.bo);
2716 	ttm_bo_unref(&pinned_val.bo);
2717 	ttm_bo_unref(&dev_priv->pinned_bo);
2718 
2719 out_unlock:
2720 	return;
2721 
2722 out_no_emit:
2723 	ttm_eu_backoff_reservation(&ticket, &validate_list);
2724 out_no_reserve:
2725 	ttm_bo_unref(&query_val.bo);
2726 	ttm_bo_unref(&pinned_val.bo);
2727 	ttm_bo_unref(&dev_priv->pinned_bo);
2728 }
2729 
2730 /**
2731  * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2732  * query bo.
2733  *
2734  * @dev_priv: The device private structure.
2735  *
2736  * This function should be used to unpin the pinned query bo, or
2737  * as a query barrier when we need to make sure that all queries have
2738  * finished before the next fifo command. (For example on hardware
2739  * context destructions where the hardware may otherwise leak unfinished
2740  * queries).
2741  *
2742  * This function does not return any failure codes, but make attempts
2743  * to do safe unpinning in case of errors.
2744  *
2745  * The function will synchronize on the previous query barrier, and will
2746  * thus not finish until that barrier has executed.
2747  */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)2748 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
2749 {
2750 	mutex_lock(&dev_priv->cmdbuf_mutex);
2751 	if (dev_priv->query_cid_valid)
2752 		__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
2753 	mutex_unlock(&dev_priv->cmdbuf_mutex);
2754 }
2755 
2756 
vmw_execbuf_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)2757 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
2758 		      struct drm_file *file_priv)
2759 {
2760 	struct vmw_private *dev_priv = vmw_priv(dev);
2761 	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
2762 	int ret;
2763 
2764 	/*
2765 	 * This will allow us to extend the ioctl argument while
2766 	 * maintaining backwards compatibility:
2767 	 * We take different code paths depending on the value of
2768 	 * arg->version.
2769 	 */
2770 
2771 	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
2772 		DRM_ERROR("Incorrect execbuf version.\n");
2773 		DRM_ERROR("You're running outdated experimental "
2774 			  "vmwgfx user-space drivers.");
2775 		return -EINVAL;
2776 	}
2777 
2778 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
2779 	if (unlikely(ret != 0))
2780 		return ret;
2781 
2782 	ret = vmw_execbuf_process(file_priv, dev_priv,
2783 				  (void __user *)(unsigned long)arg->commands,
2784 				  NULL, arg->command_size, arg->throttle_us,
2785 				  (void __user *)(unsigned long)arg->fence_rep,
2786 				  NULL);
2787 	ttm_read_unlock(&dev_priv->reservation_sem);
2788 	if (unlikely(ret != 0))
2789 		return ret;
2790 
2791 	vmw_kms_cursor_post_execbuf(dev_priv);
2792 
2793 	return 0;
2794 }
2795