1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34 
35 #define VMW_RES_EVICT_ERR_COUNT 10
36 
37 struct vmw_user_dma_buffer {
38 	struct ttm_prime_object prime;
39 	struct vmw_dma_buffer dma;
40 };
41 
42 struct vmw_bo_user_rep {
43 	uint32_t handle;
44 	uint64_t map_handle;
45 };
46 
47 struct vmw_stream {
48 	struct vmw_resource res;
49 	uint32_t stream_id;
50 };
51 
52 struct vmw_user_stream {
53 	struct ttm_base_object base;
54 	struct vmw_stream stream;
55 };
56 
57 
58 static uint64_t vmw_user_stream_size;
59 
60 static const struct vmw_res_func vmw_stream_func = {
61 	.res_type = vmw_res_stream,
62 	.needs_backup = false,
63 	.may_evict = false,
64 	.type_name = "video streams",
65 	.backup_placement = NULL,
66 	.create = NULL,
67 	.destroy = NULL,
68 	.bind = NULL,
69 	.unbind = NULL
70 };
71 
72 static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object * bo)73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75 	return container_of(bo, struct vmw_dma_buffer, base);
76 }
77 
78 static inline struct vmw_user_dma_buffer *
vmw_user_dma_buffer(struct ttm_buffer_object * bo)79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 	return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84 
vmw_resource_reference(struct vmw_resource * res)85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 	kref_get(&res->kref);
88 	return res;
89 }
90 
91 struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource * res)92 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
93 {
94 	return kref_get_unless_zero(&res->kref) ? res : NULL;
95 }
96 
97 /**
98  * vmw_resource_release_id - release a resource id to the id manager.
99  *
100  * @res: Pointer to the resource.
101  *
102  * Release the resource id to the resource id manager and set it to -1
103  */
vmw_resource_release_id(struct vmw_resource * res)104 void vmw_resource_release_id(struct vmw_resource *res)
105 {
106 	struct vmw_private *dev_priv = res->dev_priv;
107 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
108 
109 	write_lock(&dev_priv->resource_lock);
110 	if (res->id != -1)
111 		idr_remove(idr, res->id);
112 	res->id = -1;
113 	write_unlock(&dev_priv->resource_lock);
114 }
115 
vmw_resource_release(struct kref * kref)116 static void vmw_resource_release(struct kref *kref)
117 {
118 	struct vmw_resource *res =
119 	    container_of(kref, struct vmw_resource, kref);
120 	struct vmw_private *dev_priv = res->dev_priv;
121 	int id;
122 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
123 
124 	res->avail = false;
125 	list_del_init(&res->lru_head);
126 	write_unlock(&dev_priv->resource_lock);
127 	if (res->backup) {
128 		struct ttm_buffer_object *bo = &res->backup->base;
129 
130 		ttm_bo_reserve(bo, false, false, false, NULL);
131 		if (!list_empty(&res->mob_head) &&
132 		    res->func->unbind != NULL) {
133 			struct ttm_validate_buffer val_buf;
134 
135 			val_buf.bo = bo;
136 			val_buf.shared = false;
137 			res->func->unbind(res, false, &val_buf);
138 		}
139 		res->backup_dirty = false;
140 		list_del_init(&res->mob_head);
141 		ttm_bo_unreserve(bo);
142 		vmw_dmabuf_unreference(&res->backup);
143 	}
144 
145 	if (likely(res->hw_destroy != NULL)) {
146 		res->hw_destroy(res);
147 		mutex_lock(&dev_priv->binding_mutex);
148 		vmw_context_binding_res_list_kill(&res->binding_head);
149 		mutex_unlock(&dev_priv->binding_mutex);
150 	}
151 
152 	id = res->id;
153 	if (res->res_free != NULL)
154 		res->res_free(res);
155 	else
156 		kfree(res);
157 
158 	write_lock(&dev_priv->resource_lock);
159 
160 	if (id != -1)
161 		idr_remove(idr, id);
162 }
163 
vmw_resource_unreference(struct vmw_resource ** p_res)164 void vmw_resource_unreference(struct vmw_resource **p_res)
165 {
166 	struct vmw_resource *res = *p_res;
167 	struct vmw_private *dev_priv = res->dev_priv;
168 
169 	*p_res = NULL;
170 	write_lock(&dev_priv->resource_lock);
171 	kref_put(&res->kref, vmw_resource_release);
172 	write_unlock(&dev_priv->resource_lock);
173 }
174 
175 
176 /**
177  * vmw_resource_alloc_id - release a resource id to the id manager.
178  *
179  * @res: Pointer to the resource.
180  *
181  * Allocate the lowest free resource from the resource manager, and set
182  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183  */
vmw_resource_alloc_id(struct vmw_resource * res)184 int vmw_resource_alloc_id(struct vmw_resource *res)
185 {
186 	struct vmw_private *dev_priv = res->dev_priv;
187 	int ret;
188 	struct idr *idr = &dev_priv->res_idr[res->func->res_type];
189 
190 	BUG_ON(res->id != -1);
191 
192 	idr_preload(GFP_KERNEL);
193 	write_lock(&dev_priv->resource_lock);
194 
195 	ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
196 	if (ret >= 0)
197 		res->id = ret;
198 
199 	write_unlock(&dev_priv->resource_lock);
200 	idr_preload_end();
201 	return ret < 0 ? ret : 0;
202 }
203 
204 /**
205  * vmw_resource_init - initialize a struct vmw_resource
206  *
207  * @dev_priv:       Pointer to a device private struct.
208  * @res:            The struct vmw_resource to initialize.
209  * @obj_type:       Resource object type.
210  * @delay_id:       Boolean whether to defer device id allocation until
211  *                  the first validation.
212  * @res_free:       Resource destructor.
213  * @func:           Resource function table.
214  */
vmw_resource_init(struct vmw_private * dev_priv,struct vmw_resource * res,bool delay_id,void (* res_free)(struct vmw_resource * res),const struct vmw_res_func * func)215 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
216 		      bool delay_id,
217 		      void (*res_free) (struct vmw_resource *res),
218 		      const struct vmw_res_func *func)
219 {
220 	kref_init(&res->kref);
221 	res->hw_destroy = NULL;
222 	res->res_free = res_free;
223 	res->avail = false;
224 	res->dev_priv = dev_priv;
225 	res->func = func;
226 	INIT_LIST_HEAD(&res->lru_head);
227 	INIT_LIST_HEAD(&res->mob_head);
228 	INIT_LIST_HEAD(&res->binding_head);
229 	res->id = -1;
230 	res->backup = NULL;
231 	res->backup_offset = 0;
232 	res->backup_dirty = false;
233 	res->res_dirty = false;
234 	if (delay_id)
235 		return 0;
236 	else
237 		return vmw_resource_alloc_id(res);
238 }
239 
240 /**
241  * vmw_resource_activate
242  *
243  * @res:        Pointer to the newly created resource
244  * @hw_destroy: Destroy function. NULL if none.
245  *
246  * Activate a resource after the hardware has been made aware of it.
247  * Set tye destroy function to @destroy. Typically this frees the
248  * resource and destroys the hardware resources associated with it.
249  * Activate basically means that the function vmw_resource_lookup will
250  * find it.
251  */
vmw_resource_activate(struct vmw_resource * res,void (* hw_destroy)(struct vmw_resource *))252 void vmw_resource_activate(struct vmw_resource *res,
253 			   void (*hw_destroy) (struct vmw_resource *))
254 {
255 	struct vmw_private *dev_priv = res->dev_priv;
256 
257 	write_lock(&dev_priv->resource_lock);
258 	res->avail = true;
259 	res->hw_destroy = hw_destroy;
260 	write_unlock(&dev_priv->resource_lock);
261 }
262 
vmw_resource_lookup(struct vmw_private * dev_priv,struct idr * idr,int id)263 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
264 					 struct idr *idr, int id)
265 {
266 	struct vmw_resource *res;
267 
268 	read_lock(&dev_priv->resource_lock);
269 	res = idr_find(idr, id);
270 	if (res && res->avail)
271 		kref_get(&res->kref);
272 	else
273 		res = NULL;
274 	read_unlock(&dev_priv->resource_lock);
275 
276 	if (unlikely(res == NULL))
277 		return NULL;
278 
279 	return res;
280 }
281 
282 /**
283  * vmw_user_resource_lookup_handle - lookup a struct resource from a
284  * TTM user-space handle and perform basic type checks
285  *
286  * @dev_priv:     Pointer to a device private struct
287  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
288  * @handle:       The TTM user-space handle
289  * @converter:    Pointer to an object describing the resource type
290  * @p_res:        On successful return the location pointed to will contain
291  *                a pointer to a refcounted struct vmw_resource.
292  *
293  * If the handle can't be found or is associated with an incorrect resource
294  * type, -EINVAL will be returned.
295  */
vmw_user_resource_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,const struct vmw_user_resource_conv * converter,struct vmw_resource ** p_res)296 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
297 				    struct ttm_object_file *tfile,
298 				    uint32_t handle,
299 				    const struct vmw_user_resource_conv
300 				    *converter,
301 				    struct vmw_resource **p_res)
302 {
303 	struct ttm_base_object *base;
304 	struct vmw_resource *res;
305 	int ret = -EINVAL;
306 
307 	base = ttm_base_object_lookup(tfile, handle);
308 	if (unlikely(base == NULL))
309 		return -EINVAL;
310 
311 	if (unlikely(ttm_base_object_type(base) != converter->object_type))
312 		goto out_bad_resource;
313 
314 	res = converter->base_obj_to_res(base);
315 
316 	read_lock(&dev_priv->resource_lock);
317 	if (!res->avail || res->res_free != converter->res_free) {
318 		read_unlock(&dev_priv->resource_lock);
319 		goto out_bad_resource;
320 	}
321 
322 	kref_get(&res->kref);
323 	read_unlock(&dev_priv->resource_lock);
324 
325 	*p_res = res;
326 	ret = 0;
327 
328 out_bad_resource:
329 	ttm_base_object_unref(&base);
330 
331 	return ret;
332 }
333 
334 /**
335  * Helper function that looks either a surface or dmabuf.
336  *
337  * The pointer this pointed at by out_surf and out_buf needs to be null.
338  */
vmw_user_lookup_handle(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t handle,struct vmw_surface ** out_surf,struct vmw_dma_buffer ** out_buf)339 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
340 			   struct ttm_object_file *tfile,
341 			   uint32_t handle,
342 			   struct vmw_surface **out_surf,
343 			   struct vmw_dma_buffer **out_buf)
344 {
345 	struct vmw_resource *res;
346 	int ret;
347 
348 	BUG_ON(*out_surf || *out_buf);
349 
350 	ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
351 					      user_surface_converter,
352 					      &res);
353 	if (!ret) {
354 		*out_surf = vmw_res_to_srf(res);
355 		return 0;
356 	}
357 
358 	*out_surf = NULL;
359 	ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
360 	return ret;
361 }
362 
363 /**
364  * Buffer management.
365  */
366 
367 /**
368  * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
369  *
370  * @dev_priv: Pointer to a struct vmw_private identifying the device.
371  * @size: The requested buffer size.
372  * @user: Whether this is an ordinary dma buffer or a user dma buffer.
373  */
vmw_dmabuf_acc_size(struct vmw_private * dev_priv,size_t size,bool user)374 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
375 				  bool user)
376 {
377 	static size_t struct_size, user_struct_size;
378 	size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
379 	size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
380 
381 	if (unlikely(struct_size == 0)) {
382 		size_t backend_size = ttm_round_pot(vmw_tt_size);
383 
384 		struct_size = backend_size +
385 			ttm_round_pot(sizeof(struct vmw_dma_buffer));
386 		user_struct_size = backend_size +
387 			ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
388 	}
389 
390 	if (dev_priv->map_mode == vmw_dma_alloc_coherent)
391 		page_array_size +=
392 			ttm_round_pot(num_pages * sizeof(dma_addr_t));
393 
394 	return ((user) ? user_struct_size : struct_size) +
395 		page_array_size;
396 }
397 
vmw_dmabuf_bo_free(struct ttm_buffer_object * bo)398 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
399 {
400 	struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
401 
402 	kfree(vmw_bo);
403 }
404 
vmw_user_dmabuf_destroy(struct ttm_buffer_object * bo)405 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
406 {
407 	struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
408 
409 	ttm_prime_object_kfree(vmw_user_bo, prime);
410 }
411 
vmw_dmabuf_init(struct vmw_private * dev_priv,struct vmw_dma_buffer * vmw_bo,size_t size,struct ttm_placement * placement,bool interruptible,void (* bo_free)(struct ttm_buffer_object * bo))412 int vmw_dmabuf_init(struct vmw_private *dev_priv,
413 		    struct vmw_dma_buffer *vmw_bo,
414 		    size_t size, struct ttm_placement *placement,
415 		    bool interruptible,
416 		    void (*bo_free) (struct ttm_buffer_object *bo))
417 {
418 	struct ttm_bo_device *bdev = &dev_priv->bdev;
419 	size_t acc_size;
420 	int ret;
421 	bool user = (bo_free == &vmw_user_dmabuf_destroy);
422 
423 	BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
424 
425 	acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
426 	memset(vmw_bo, 0, sizeof(*vmw_bo));
427 
428 	INIT_LIST_HEAD(&vmw_bo->res_list);
429 
430 	ret = ttm_bo_init(bdev, &vmw_bo->base, size,
431 			  ttm_bo_type_device, placement,
432 			  0, interruptible,
433 			  NULL, acc_size, NULL, NULL, bo_free);
434 	return ret;
435 }
436 
vmw_user_dmabuf_release(struct ttm_base_object ** p_base)437 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
438 {
439 	struct vmw_user_dma_buffer *vmw_user_bo;
440 	struct ttm_base_object *base = *p_base;
441 	struct ttm_buffer_object *bo;
442 
443 	*p_base = NULL;
444 
445 	if (unlikely(base == NULL))
446 		return;
447 
448 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
449 				   prime.base);
450 	bo = &vmw_user_bo->dma.base;
451 	ttm_bo_unref(&bo);
452 }
453 
vmw_user_dmabuf_ref_obj_release(struct ttm_base_object * base,enum ttm_ref_type ref_type)454 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
455 					    enum ttm_ref_type ref_type)
456 {
457 	struct vmw_user_dma_buffer *user_bo;
458 	user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
459 
460 	switch (ref_type) {
461 	case TTM_REF_SYNCCPU_WRITE:
462 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
463 		break;
464 	default:
465 		BUG();
466 	}
467 }
468 
469 /**
470  * vmw_user_dmabuf_alloc - Allocate a user dma buffer
471  *
472  * @dev_priv: Pointer to a struct device private.
473  * @tfile: Pointer to a struct ttm_object_file on which to register the user
474  * object.
475  * @size: Size of the dma buffer.
476  * @shareable: Boolean whether the buffer is shareable with other open files.
477  * @handle: Pointer to where the handle value should be assigned.
478  * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
479  * should be assigned.
480  */
vmw_user_dmabuf_alloc(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t size,bool shareable,uint32_t * handle,struct vmw_dma_buffer ** p_dma_buf,struct ttm_base_object ** p_base)481 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
482 			  struct ttm_object_file *tfile,
483 			  uint32_t size,
484 			  bool shareable,
485 			  uint32_t *handle,
486 			  struct vmw_dma_buffer **p_dma_buf,
487 			  struct ttm_base_object **p_base)
488 {
489 	struct vmw_user_dma_buffer *user_bo;
490 	struct ttm_buffer_object *tmp;
491 	int ret;
492 
493 	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
494 	if (unlikely(user_bo == NULL)) {
495 		DRM_ERROR("Failed to allocate a buffer.\n");
496 		return -ENOMEM;
497 	}
498 
499 	ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
500 			      (dev_priv->has_mob) ?
501 			      &vmw_sys_placement :
502 			      &vmw_vram_sys_placement, true,
503 			      &vmw_user_dmabuf_destroy);
504 	if (unlikely(ret != 0))
505 		return ret;
506 
507 	tmp = ttm_bo_reference(&user_bo->dma.base);
508 	ret = ttm_prime_object_init(tfile,
509 				    size,
510 				    &user_bo->prime,
511 				    shareable,
512 				    ttm_buffer_type,
513 				    &vmw_user_dmabuf_release,
514 				    &vmw_user_dmabuf_ref_obj_release);
515 	if (unlikely(ret != 0)) {
516 		ttm_bo_unref(&tmp);
517 		goto out_no_base_object;
518 	}
519 
520 	*p_dma_buf = &user_bo->dma;
521 	if (p_base) {
522 		*p_base = &user_bo->prime.base;
523 		kref_get(&(*p_base)->refcount);
524 	}
525 	*handle = user_bo->prime.base.hash.key;
526 
527 out_no_base_object:
528 	return ret;
529 }
530 
531 /**
532  * vmw_user_dmabuf_verify_access - verify access permissions on this
533  * buffer object.
534  *
535  * @bo: Pointer to the buffer object being accessed
536  * @tfile: Identifying the caller.
537  */
vmw_user_dmabuf_verify_access(struct ttm_buffer_object * bo,struct ttm_object_file * tfile)538 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
539 				  struct ttm_object_file *tfile)
540 {
541 	struct vmw_user_dma_buffer *vmw_user_bo;
542 
543 	if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
544 		return -EPERM;
545 
546 	vmw_user_bo = vmw_user_dma_buffer(bo);
547 
548 	/* Check that the caller has opened the object. */
549 	if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
550 		return 0;
551 
552 	DRM_ERROR("Could not grant buffer access.\n");
553 	return -EPERM;
554 }
555 
556 /**
557  * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
558  * access, idling previous GPU operations on the buffer and optionally
559  * blocking it for further command submissions.
560  *
561  * @user_bo: Pointer to the buffer object being grabbed for CPU access
562  * @tfile: Identifying the caller.
563  * @flags: Flags indicating how the grab should be performed.
564  *
565  * A blocking grab will be automatically released when @tfile is closed.
566  */
vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer * user_bo,struct ttm_object_file * tfile,uint32_t flags)567 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
568 					struct ttm_object_file *tfile,
569 					uint32_t flags)
570 {
571 	struct ttm_buffer_object *bo = &user_bo->dma.base;
572 	bool existed;
573 	int ret;
574 
575 	if (flags & drm_vmw_synccpu_allow_cs) {
576 		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
577 		long lret;
578 
579 		if (nonblock)
580 			return reservation_object_test_signaled_rcu(bo->resv, true) ? 0 : -EBUSY;
581 
582 		lret = reservation_object_wait_timeout_rcu(bo->resv, true, true, MAX_SCHEDULE_TIMEOUT);
583 		if (!lret)
584 			return -EBUSY;
585 		else if (lret < 0)
586 			return lret;
587 		return 0;
588 	}
589 
590 	ret = ttm_bo_synccpu_write_grab
591 		(bo, !!(flags & drm_vmw_synccpu_dontblock));
592 	if (unlikely(ret != 0))
593 		return ret;
594 
595 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
596 				 TTM_REF_SYNCCPU_WRITE, &existed);
597 	if (ret != 0 || existed)
598 		ttm_bo_synccpu_write_release(&user_bo->dma.base);
599 
600 	return ret;
601 }
602 
603 /**
604  * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
605  * and unblock command submission on the buffer if blocked.
606  *
607  * @handle: Handle identifying the buffer object.
608  * @tfile: Identifying the caller.
609  * @flags: Flags indicating the type of release.
610  */
vmw_user_dmabuf_synccpu_release(uint32_t handle,struct ttm_object_file * tfile,uint32_t flags)611 static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
612 					   struct ttm_object_file *tfile,
613 					   uint32_t flags)
614 {
615 	if (!(flags & drm_vmw_synccpu_allow_cs))
616 		return ttm_ref_object_base_unref(tfile, handle,
617 						 TTM_REF_SYNCCPU_WRITE);
618 
619 	return 0;
620 }
621 
622 /**
623  * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
624  * functionality.
625  *
626  * @dev: Identifies the drm device.
627  * @data: Pointer to the ioctl argument.
628  * @file_priv: Identifies the caller.
629  *
630  * This function checks the ioctl arguments for validity and calls the
631  * relevant synccpu functions.
632  */
vmw_user_dmabuf_synccpu_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)633 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
634 				  struct drm_file *file_priv)
635 {
636 	struct drm_vmw_synccpu_arg *arg =
637 		(struct drm_vmw_synccpu_arg *) data;
638 	struct vmw_dma_buffer *dma_buf;
639 	struct vmw_user_dma_buffer *user_bo;
640 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
641 	struct ttm_base_object *buffer_base;
642 	int ret;
643 
644 	if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
645 	    || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
646 			       drm_vmw_synccpu_dontblock |
647 			       drm_vmw_synccpu_allow_cs)) != 0) {
648 		DRM_ERROR("Illegal synccpu flags.\n");
649 		return -EINVAL;
650 	}
651 
652 	switch (arg->op) {
653 	case drm_vmw_synccpu_grab:
654 		ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
655 					     &buffer_base);
656 		if (unlikely(ret != 0))
657 			return ret;
658 
659 		user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
660 				       dma);
661 		ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
662 		vmw_dmabuf_unreference(&dma_buf);
663 		ttm_base_object_unref(&buffer_base);
664 		if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
665 			     ret != -EBUSY)) {
666 			DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
667 				  (unsigned int) arg->handle);
668 			return ret;
669 		}
670 		break;
671 	case drm_vmw_synccpu_release:
672 		ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
673 						      arg->flags);
674 		if (unlikely(ret != 0)) {
675 			DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
676 				  (unsigned int) arg->handle);
677 			return ret;
678 		}
679 		break;
680 	default:
681 		DRM_ERROR("Invalid synccpu operation.\n");
682 		return -EINVAL;
683 	}
684 
685 	return 0;
686 }
687 
vmw_dmabuf_alloc_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)688 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
689 			   struct drm_file *file_priv)
690 {
691 	struct vmw_private *dev_priv = vmw_priv(dev);
692 	union drm_vmw_alloc_dmabuf_arg *arg =
693 	    (union drm_vmw_alloc_dmabuf_arg *)data;
694 	struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
695 	struct drm_vmw_dmabuf_rep *rep = &arg->rep;
696 	struct vmw_dma_buffer *dma_buf;
697 	uint32_t handle;
698 	int ret;
699 
700 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
701 	if (unlikely(ret != 0))
702 		return ret;
703 
704 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
705 				    req->size, false, &handle, &dma_buf,
706 				    NULL);
707 	if (unlikely(ret != 0))
708 		goto out_no_dmabuf;
709 
710 	rep->handle = handle;
711 	rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
712 	rep->cur_gmr_id = handle;
713 	rep->cur_gmr_offset = 0;
714 
715 	vmw_dmabuf_unreference(&dma_buf);
716 
717 out_no_dmabuf:
718 	ttm_read_unlock(&dev_priv->reservation_sem);
719 
720 	return ret;
721 }
722 
vmw_dmabuf_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)723 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
724 			   struct drm_file *file_priv)
725 {
726 	struct drm_vmw_unref_dmabuf_arg *arg =
727 	    (struct drm_vmw_unref_dmabuf_arg *)data;
728 
729 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
730 					 arg->handle,
731 					 TTM_REF_USAGE);
732 }
733 
vmw_user_dmabuf_lookup(struct ttm_object_file * tfile,uint32_t handle,struct vmw_dma_buffer ** out,struct ttm_base_object ** p_base)734 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
735 			   uint32_t handle, struct vmw_dma_buffer **out,
736 			   struct ttm_base_object **p_base)
737 {
738 	struct vmw_user_dma_buffer *vmw_user_bo;
739 	struct ttm_base_object *base;
740 
741 	base = ttm_base_object_lookup(tfile, handle);
742 	if (unlikely(base == NULL)) {
743 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
744 		       (unsigned long)handle);
745 		return -ESRCH;
746 	}
747 
748 	if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
749 		ttm_base_object_unref(&base);
750 		printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
751 		       (unsigned long)handle);
752 		return -EINVAL;
753 	}
754 
755 	vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
756 				   prime.base);
757 	(void)ttm_bo_reference(&vmw_user_bo->dma.base);
758 	if (p_base)
759 		*p_base = base;
760 	else
761 		ttm_base_object_unref(&base);
762 	*out = &vmw_user_bo->dma;
763 
764 	return 0;
765 }
766 
vmw_user_dmabuf_reference(struct ttm_object_file * tfile,struct vmw_dma_buffer * dma_buf,uint32_t * handle)767 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
768 			      struct vmw_dma_buffer *dma_buf,
769 			      uint32_t *handle)
770 {
771 	struct vmw_user_dma_buffer *user_bo;
772 
773 	if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
774 		return -EINVAL;
775 
776 	user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
777 
778 	*handle = user_bo->prime.base.hash.key;
779 	return ttm_ref_object_add(tfile, &user_bo->prime.base,
780 				  TTM_REF_USAGE, NULL);
781 }
782 
783 /*
784  * Stream management
785  */
786 
vmw_stream_destroy(struct vmw_resource * res)787 static void vmw_stream_destroy(struct vmw_resource *res)
788 {
789 	struct vmw_private *dev_priv = res->dev_priv;
790 	struct vmw_stream *stream;
791 	int ret;
792 
793 	DRM_INFO("%s: unref\n", __func__);
794 	stream = container_of(res, struct vmw_stream, res);
795 
796 	ret = vmw_overlay_unref(dev_priv, stream->stream_id);
797 	WARN_ON(ret != 0);
798 }
799 
vmw_stream_init(struct vmw_private * dev_priv,struct vmw_stream * stream,void (* res_free)(struct vmw_resource * res))800 static int vmw_stream_init(struct vmw_private *dev_priv,
801 			   struct vmw_stream *stream,
802 			   void (*res_free) (struct vmw_resource *res))
803 {
804 	struct vmw_resource *res = &stream->res;
805 	int ret;
806 
807 	ret = vmw_resource_init(dev_priv, res, false, res_free,
808 				&vmw_stream_func);
809 
810 	if (unlikely(ret != 0)) {
811 		if (res_free == NULL)
812 			kfree(stream);
813 		else
814 			res_free(&stream->res);
815 		return ret;
816 	}
817 
818 	ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
819 	if (ret) {
820 		vmw_resource_unreference(&res);
821 		return ret;
822 	}
823 
824 	DRM_INFO("%s: claimed\n", __func__);
825 
826 	vmw_resource_activate(&stream->res, vmw_stream_destroy);
827 	return 0;
828 }
829 
vmw_user_stream_free(struct vmw_resource * res)830 static void vmw_user_stream_free(struct vmw_resource *res)
831 {
832 	struct vmw_user_stream *stream =
833 	    container_of(res, struct vmw_user_stream, stream.res);
834 	struct vmw_private *dev_priv = res->dev_priv;
835 
836 	ttm_base_object_kfree(stream, base);
837 	ttm_mem_global_free(vmw_mem_glob(dev_priv),
838 			    vmw_user_stream_size);
839 }
840 
841 /**
842  * This function is called when user space has no more references on the
843  * base object. It releases the base-object's reference on the resource object.
844  */
845 
vmw_user_stream_base_release(struct ttm_base_object ** p_base)846 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
847 {
848 	struct ttm_base_object *base = *p_base;
849 	struct vmw_user_stream *stream =
850 	    container_of(base, struct vmw_user_stream, base);
851 	struct vmw_resource *res = &stream->stream.res;
852 
853 	*p_base = NULL;
854 	vmw_resource_unreference(&res);
855 }
856 
vmw_stream_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)857 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
858 			   struct drm_file *file_priv)
859 {
860 	struct vmw_private *dev_priv = vmw_priv(dev);
861 	struct vmw_resource *res;
862 	struct vmw_user_stream *stream;
863 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
864 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
865 	struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
866 	int ret = 0;
867 
868 
869 	res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
870 	if (unlikely(res == NULL))
871 		return -EINVAL;
872 
873 	if (res->res_free != &vmw_user_stream_free) {
874 		ret = -EINVAL;
875 		goto out;
876 	}
877 
878 	stream = container_of(res, struct vmw_user_stream, stream.res);
879 	if (stream->base.tfile != tfile) {
880 		ret = -EINVAL;
881 		goto out;
882 	}
883 
884 	ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
885 out:
886 	vmw_resource_unreference(&res);
887 	return ret;
888 }
889 
vmw_stream_claim_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)890 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
891 			   struct drm_file *file_priv)
892 {
893 	struct vmw_private *dev_priv = vmw_priv(dev);
894 	struct vmw_user_stream *stream;
895 	struct vmw_resource *res;
896 	struct vmw_resource *tmp;
897 	struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
898 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
899 	int ret;
900 
901 	/*
902 	 * Approximate idr memory usage with 128 bytes. It will be limited
903 	 * by maximum number_of streams anyway?
904 	 */
905 
906 	if (unlikely(vmw_user_stream_size == 0))
907 		vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
908 
909 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
910 	if (unlikely(ret != 0))
911 		return ret;
912 
913 	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
914 				   vmw_user_stream_size,
915 				   false, true);
916 	if (unlikely(ret != 0)) {
917 		if (ret != -ERESTARTSYS)
918 			DRM_ERROR("Out of graphics memory for stream"
919 				  " creation.\n");
920 		goto out_unlock;
921 	}
922 
923 
924 	stream = kmalloc(sizeof(*stream), GFP_KERNEL);
925 	if (unlikely(stream == NULL)) {
926 		ttm_mem_global_free(vmw_mem_glob(dev_priv),
927 				    vmw_user_stream_size);
928 		ret = -ENOMEM;
929 		goto out_unlock;
930 	}
931 
932 	res = &stream->stream.res;
933 	stream->base.shareable = false;
934 	stream->base.tfile = NULL;
935 
936 	/*
937 	 * From here on, the destructor takes over resource freeing.
938 	 */
939 
940 	ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
941 	if (unlikely(ret != 0))
942 		goto out_unlock;
943 
944 	tmp = vmw_resource_reference(res);
945 	ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
946 				   &vmw_user_stream_base_release, NULL);
947 
948 	if (unlikely(ret != 0)) {
949 		vmw_resource_unreference(&tmp);
950 		goto out_err;
951 	}
952 
953 	arg->stream_id = res->id;
954 out_err:
955 	vmw_resource_unreference(&res);
956 out_unlock:
957 	ttm_read_unlock(&dev_priv->reservation_sem);
958 	return ret;
959 }
960 
vmw_user_stream_lookup(struct vmw_private * dev_priv,struct ttm_object_file * tfile,uint32_t * inout_id,struct vmw_resource ** out)961 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
962 			   struct ttm_object_file *tfile,
963 			   uint32_t *inout_id, struct vmw_resource **out)
964 {
965 	struct vmw_user_stream *stream;
966 	struct vmw_resource *res;
967 	int ret;
968 
969 	res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
970 				  *inout_id);
971 	if (unlikely(res == NULL))
972 		return -EINVAL;
973 
974 	if (res->res_free != &vmw_user_stream_free) {
975 		ret = -EINVAL;
976 		goto err_ref;
977 	}
978 
979 	stream = container_of(res, struct vmw_user_stream, stream.res);
980 	if (stream->base.tfile != tfile) {
981 		ret = -EPERM;
982 		goto err_ref;
983 	}
984 
985 	*inout_id = stream->stream.stream_id;
986 	*out = res;
987 	return 0;
988 err_ref:
989 	vmw_resource_unreference(&res);
990 	return ret;
991 }
992 
993 
994 /**
995  * vmw_dumb_create - Create a dumb kms buffer
996  *
997  * @file_priv: Pointer to a struct drm_file identifying the caller.
998  * @dev: Pointer to the drm device.
999  * @args: Pointer to a struct drm_mode_create_dumb structure
1000  *
1001  * This is a driver callback for the core drm create_dumb functionality.
1002  * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
1003  * that the arguments have a different format.
1004  */
vmw_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)1005 int vmw_dumb_create(struct drm_file *file_priv,
1006 		    struct drm_device *dev,
1007 		    struct drm_mode_create_dumb *args)
1008 {
1009 	struct vmw_private *dev_priv = vmw_priv(dev);
1010 	struct vmw_dma_buffer *dma_buf;
1011 	int ret;
1012 
1013 	args->pitch = args->width * ((args->bpp + 7) / 8);
1014 	args->size = args->pitch * args->height;
1015 
1016 	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1017 	if (unlikely(ret != 0))
1018 		return ret;
1019 
1020 	ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
1021 				    args->size, false, &args->handle,
1022 				    &dma_buf, NULL);
1023 	if (unlikely(ret != 0))
1024 		goto out_no_dmabuf;
1025 
1026 	vmw_dmabuf_unreference(&dma_buf);
1027 out_no_dmabuf:
1028 	ttm_read_unlock(&dev_priv->reservation_sem);
1029 	return ret;
1030 }
1031 
1032 /**
1033  * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1034  *
1035  * @file_priv: Pointer to a struct drm_file identifying the caller.
1036  * @dev: Pointer to the drm device.
1037  * @handle: Handle identifying the dumb buffer.
1038  * @offset: The address space offset returned.
1039  *
1040  * This is a driver callback for the core drm dumb_map_offset functionality.
1041  */
vmw_dumb_map_offset(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle,uint64_t * offset)1042 int vmw_dumb_map_offset(struct drm_file *file_priv,
1043 			struct drm_device *dev, uint32_t handle,
1044 			uint64_t *offset)
1045 {
1046 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1047 	struct vmw_dma_buffer *out_buf;
1048 	int ret;
1049 
1050 	ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
1051 	if (ret != 0)
1052 		return -EINVAL;
1053 
1054 	*offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
1055 	vmw_dmabuf_unreference(&out_buf);
1056 	return 0;
1057 }
1058 
1059 /**
1060  * vmw_dumb_destroy - Destroy a dumb boffer
1061  *
1062  * @file_priv: Pointer to a struct drm_file identifying the caller.
1063  * @dev: Pointer to the drm device.
1064  * @handle: Handle identifying the dumb buffer.
1065  *
1066  * This is a driver callback for the core drm dumb_destroy functionality.
1067  */
vmw_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)1068 int vmw_dumb_destroy(struct drm_file *file_priv,
1069 		     struct drm_device *dev,
1070 		     uint32_t handle)
1071 {
1072 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1073 					 handle, TTM_REF_USAGE);
1074 }
1075 
1076 /**
1077  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1078  *
1079  * @res:            The resource for which to allocate a backup buffer.
1080  * @interruptible:  Whether any sleeps during allocation should be
1081  *                  performed while interruptible.
1082  */
vmw_resource_buf_alloc(struct vmw_resource * res,bool interruptible)1083 static int vmw_resource_buf_alloc(struct vmw_resource *res,
1084 				  bool interruptible)
1085 {
1086 	unsigned long size =
1087 		(res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
1088 	struct vmw_dma_buffer *backup;
1089 	int ret;
1090 
1091 	if (likely(res->backup)) {
1092 		BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
1093 		return 0;
1094 	}
1095 
1096 	backup = kzalloc(sizeof(*backup), GFP_KERNEL);
1097 	if (unlikely(backup == NULL))
1098 		return -ENOMEM;
1099 
1100 	ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
1101 			      res->func->backup_placement,
1102 			      interruptible,
1103 			      &vmw_dmabuf_bo_free);
1104 	if (unlikely(ret != 0))
1105 		goto out_no_dmabuf;
1106 
1107 	res->backup = backup;
1108 
1109 out_no_dmabuf:
1110 	return ret;
1111 }
1112 
1113 /**
1114  * vmw_resource_do_validate - Make a resource up-to-date and visible
1115  *                            to the device.
1116  *
1117  * @res:            The resource to make visible to the device.
1118  * @val_buf:        Information about a buffer possibly
1119  *                  containing backup data if a bind operation is needed.
1120  *
1121  * On hardware resource shortage, this function returns -EBUSY and
1122  * should be retried once resources have been freed up.
1123  */
vmw_resource_do_validate(struct vmw_resource * res,struct ttm_validate_buffer * val_buf)1124 static int vmw_resource_do_validate(struct vmw_resource *res,
1125 				    struct ttm_validate_buffer *val_buf)
1126 {
1127 	int ret = 0;
1128 	const struct vmw_res_func *func = res->func;
1129 
1130 	if (unlikely(res->id == -1)) {
1131 		ret = func->create(res);
1132 		if (unlikely(ret != 0))
1133 			return ret;
1134 	}
1135 
1136 	if (func->bind &&
1137 	    ((func->needs_backup && list_empty(&res->mob_head) &&
1138 	      val_buf->bo != NULL) ||
1139 	     (!func->needs_backup && val_buf->bo != NULL))) {
1140 		ret = func->bind(res, val_buf);
1141 		if (unlikely(ret != 0))
1142 			goto out_bind_failed;
1143 		if (func->needs_backup)
1144 			list_add_tail(&res->mob_head, &res->backup->res_list);
1145 	}
1146 
1147 	/*
1148 	 * Only do this on write operations, and move to
1149 	 * vmw_resource_unreserve if it can be called after
1150 	 * backup buffers have been unreserved. Otherwise
1151 	 * sort out locking.
1152 	 */
1153 	res->res_dirty = true;
1154 
1155 	return 0;
1156 
1157 out_bind_failed:
1158 	func->destroy(res);
1159 
1160 	return ret;
1161 }
1162 
1163 /**
1164  * vmw_resource_unreserve - Unreserve a resource previously reserved for
1165  * command submission.
1166  *
1167  * @res:               Pointer to the struct vmw_resource to unreserve.
1168  * @new_backup:        Pointer to new backup buffer if command submission
1169  *                     switched.
1170  * @new_backup_offset: New backup offset if @new_backup is !NULL.
1171  *
1172  * Currently unreserving a resource means putting it back on the device's
1173  * resource lru list, so that it can be evicted if necessary.
1174  */
vmw_resource_unreserve(struct vmw_resource * res,struct vmw_dma_buffer * new_backup,unsigned long new_backup_offset)1175 void vmw_resource_unreserve(struct vmw_resource *res,
1176 			    struct vmw_dma_buffer *new_backup,
1177 			    unsigned long new_backup_offset)
1178 {
1179 	struct vmw_private *dev_priv = res->dev_priv;
1180 
1181 	if (!list_empty(&res->lru_head))
1182 		return;
1183 
1184 	if (new_backup && new_backup != res->backup) {
1185 
1186 		if (res->backup) {
1187 			lockdep_assert_held(&res->backup->base.resv->lock.base);
1188 			list_del_init(&res->mob_head);
1189 			vmw_dmabuf_unreference(&res->backup);
1190 		}
1191 
1192 		res->backup = vmw_dmabuf_reference(new_backup);
1193 		lockdep_assert_held(&new_backup->base.resv->lock.base);
1194 		list_add_tail(&res->mob_head, &new_backup->res_list);
1195 	}
1196 	if (new_backup)
1197 		res->backup_offset = new_backup_offset;
1198 
1199 	if (!res->func->may_evict || res->id == -1)
1200 		return;
1201 
1202 	write_lock(&dev_priv->resource_lock);
1203 	list_add_tail(&res->lru_head,
1204 		      &res->dev_priv->res_lru[res->func->res_type]);
1205 	write_unlock(&dev_priv->resource_lock);
1206 }
1207 
1208 /**
1209  * vmw_resource_check_buffer - Check whether a backup buffer is needed
1210  *                             for a resource and in that case, allocate
1211  *                             one, reserve and validate it.
1212  *
1213  * @res:            The resource for which to allocate a backup buffer.
1214  * @interruptible:  Whether any sleeps during allocation should be
1215  *                  performed while interruptible.
1216  * @val_buf:        On successful return contains data about the
1217  *                  reserved and validated backup buffer.
1218  */
1219 static int
vmw_resource_check_buffer(struct vmw_resource * res,bool interruptible,struct ttm_validate_buffer * val_buf)1220 vmw_resource_check_buffer(struct vmw_resource *res,
1221 			  bool interruptible,
1222 			  struct ttm_validate_buffer *val_buf)
1223 {
1224 	struct list_head val_list;
1225 	bool backup_dirty = false;
1226 	int ret;
1227 
1228 	if (unlikely(res->backup == NULL)) {
1229 		ret = vmw_resource_buf_alloc(res, interruptible);
1230 		if (unlikely(ret != 0))
1231 			return ret;
1232 	}
1233 
1234 	INIT_LIST_HEAD(&val_list);
1235 	val_buf->bo = ttm_bo_reference(&res->backup->base);
1236 	val_buf->shared = false;
1237 	list_add_tail(&val_buf->head, &val_list);
1238 	ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1239 	if (unlikely(ret != 0))
1240 		goto out_no_reserve;
1241 
1242 	if (res->func->needs_backup && list_empty(&res->mob_head))
1243 		return 0;
1244 
1245 	backup_dirty = res->backup_dirty;
1246 	ret = ttm_bo_validate(&res->backup->base,
1247 			      res->func->backup_placement,
1248 			      true, false);
1249 
1250 	if (unlikely(ret != 0))
1251 		goto out_no_validate;
1252 
1253 	return 0;
1254 
1255 out_no_validate:
1256 	ttm_eu_backoff_reservation(NULL, &val_list);
1257 out_no_reserve:
1258 	ttm_bo_unref(&val_buf->bo);
1259 	if (backup_dirty)
1260 		vmw_dmabuf_unreference(&res->backup);
1261 
1262 	return ret;
1263 }
1264 
1265 /**
1266  * vmw_resource_reserve - Reserve a resource for command submission
1267  *
1268  * @res:            The resource to reserve.
1269  *
1270  * This function takes the resource off the LRU list and make sure
1271  * a backup buffer is present for guest-backed resources. However,
1272  * the buffer may not be bound to the resource at this point.
1273  *
1274  */
vmw_resource_reserve(struct vmw_resource * res,bool no_backup)1275 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1276 {
1277 	struct vmw_private *dev_priv = res->dev_priv;
1278 	int ret;
1279 
1280 	write_lock(&dev_priv->resource_lock);
1281 	list_del_init(&res->lru_head);
1282 	write_unlock(&dev_priv->resource_lock);
1283 
1284 	if (res->func->needs_backup && res->backup == NULL &&
1285 	    !no_backup) {
1286 		ret = vmw_resource_buf_alloc(res, true);
1287 		if (unlikely(ret != 0))
1288 			return ret;
1289 	}
1290 
1291 	return 0;
1292 }
1293 
1294 /**
1295  * vmw_resource_backoff_reservation - Unreserve and unreference a
1296  *                                    backup buffer
1297  *.
1298  * @val_buf:        Backup buffer information.
1299  */
1300 static void
vmw_resource_backoff_reservation(struct ttm_validate_buffer * val_buf)1301 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1302 {
1303 	struct list_head val_list;
1304 
1305 	if (likely(val_buf->bo == NULL))
1306 		return;
1307 
1308 	INIT_LIST_HEAD(&val_list);
1309 	list_add_tail(&val_buf->head, &val_list);
1310 	ttm_eu_backoff_reservation(NULL, &val_list);
1311 	ttm_bo_unref(&val_buf->bo);
1312 }
1313 
1314 /**
1315  * vmw_resource_do_evict - Evict a resource, and transfer its data
1316  *                         to a backup buffer.
1317  *
1318  * @res:            The resource to evict.
1319  * @interruptible:  Whether to wait interruptible.
1320  */
vmw_resource_do_evict(struct vmw_resource * res,bool interruptible)1321 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1322 {
1323 	struct ttm_validate_buffer val_buf;
1324 	const struct vmw_res_func *func = res->func;
1325 	int ret;
1326 
1327 	BUG_ON(!func->may_evict);
1328 
1329 	val_buf.bo = NULL;
1330 	val_buf.shared = false;
1331 	ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1332 	if (unlikely(ret != 0))
1333 		return ret;
1334 
1335 	if (unlikely(func->unbind != NULL &&
1336 		     (!func->needs_backup || !list_empty(&res->mob_head)))) {
1337 		ret = func->unbind(res, res->res_dirty, &val_buf);
1338 		if (unlikely(ret != 0))
1339 			goto out_no_unbind;
1340 		list_del_init(&res->mob_head);
1341 	}
1342 	ret = func->destroy(res);
1343 	res->backup_dirty = true;
1344 	res->res_dirty = false;
1345 out_no_unbind:
1346 	vmw_resource_backoff_reservation(&val_buf);
1347 
1348 	return ret;
1349 }
1350 
1351 
1352 /**
1353  * vmw_resource_validate - Make a resource up-to-date and visible
1354  *                         to the device.
1355  *
1356  * @res:            The resource to make visible to the device.
1357  *
1358  * On succesful return, any backup DMA buffer pointed to by @res->backup will
1359  * be reserved and validated.
1360  * On hardware resource shortage, this function will repeatedly evict
1361  * resources of the same type until the validation succeeds.
1362  */
vmw_resource_validate(struct vmw_resource * res)1363 int vmw_resource_validate(struct vmw_resource *res)
1364 {
1365 	int ret;
1366 	struct vmw_resource *evict_res;
1367 	struct vmw_private *dev_priv = res->dev_priv;
1368 	struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1369 	struct ttm_validate_buffer val_buf;
1370 	unsigned err_count = 0;
1371 
1372 	if (likely(!res->func->may_evict))
1373 		return 0;
1374 
1375 	val_buf.bo = NULL;
1376 	val_buf.shared = false;
1377 	if (res->backup)
1378 		val_buf.bo = &res->backup->base;
1379 	do {
1380 		ret = vmw_resource_do_validate(res, &val_buf);
1381 		if (likely(ret != -EBUSY))
1382 			break;
1383 
1384 		write_lock(&dev_priv->resource_lock);
1385 		if (list_empty(lru_list) || !res->func->may_evict) {
1386 			DRM_ERROR("Out of device device resources "
1387 				  "for %s.\n", res->func->type_name);
1388 			ret = -EBUSY;
1389 			write_unlock(&dev_priv->resource_lock);
1390 			break;
1391 		}
1392 
1393 		evict_res = vmw_resource_reference
1394 			(list_first_entry(lru_list, struct vmw_resource,
1395 					  lru_head));
1396 		list_del_init(&evict_res->lru_head);
1397 
1398 		write_unlock(&dev_priv->resource_lock);
1399 
1400 		ret = vmw_resource_do_evict(evict_res, true);
1401 		if (unlikely(ret != 0)) {
1402 			write_lock(&dev_priv->resource_lock);
1403 			list_add_tail(&evict_res->lru_head, lru_list);
1404 			write_unlock(&dev_priv->resource_lock);
1405 			if (ret == -ERESTARTSYS ||
1406 			    ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1407 				vmw_resource_unreference(&evict_res);
1408 				goto out_no_validate;
1409 			}
1410 		}
1411 
1412 		vmw_resource_unreference(&evict_res);
1413 	} while (1);
1414 
1415 	if (unlikely(ret != 0))
1416 		goto out_no_validate;
1417 	else if (!res->func->needs_backup && res->backup) {
1418 		list_del_init(&res->mob_head);
1419 		vmw_dmabuf_unreference(&res->backup);
1420 	}
1421 
1422 	return 0;
1423 
1424 out_no_validate:
1425 	return ret;
1426 }
1427 
1428 /**
1429  * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1430  *                       object without unreserving it.
1431  *
1432  * @bo:             Pointer to the struct ttm_buffer_object to fence.
1433  * @fence:          Pointer to the fence. If NULL, this function will
1434  *                  insert a fence into the command stream..
1435  *
1436  * Contrary to the ttm_eu version of this function, it takes only
1437  * a single buffer object instead of a list, and it also doesn't
1438  * unreserve the buffer object, which needs to be done separately.
1439  */
vmw_fence_single_bo(struct ttm_buffer_object * bo,struct vmw_fence_obj * fence)1440 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1441 			 struct vmw_fence_obj *fence)
1442 {
1443 	struct ttm_bo_device *bdev = bo->bdev;
1444 
1445 	struct vmw_private *dev_priv =
1446 		container_of(bdev, struct vmw_private, bdev);
1447 
1448 	if (fence == NULL) {
1449 		vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1450 		reservation_object_add_excl_fence(bo->resv, &fence->base);
1451 		fence_put(&fence->base);
1452 	} else
1453 		reservation_object_add_excl_fence(bo->resv, &fence->base);
1454 }
1455 
1456 /**
1457  * vmw_resource_move_notify - TTM move_notify_callback
1458  *
1459  * @bo:             The TTM buffer object about to move.
1460  * @mem:            The truct ttm_mem_reg indicating to what memory
1461  *                  region the move is taking place.
1462  *
1463  * Evicts the Guest Backed hardware resource if the backup
1464  * buffer is being moved out of MOB memory.
1465  * Note that this function should not race with the resource
1466  * validation code as long as it accesses only members of struct
1467  * resource that remain static while bo::res is !NULL and
1468  * while we have @bo reserved. struct resource::backup is *not* a
1469  * static member. The resource validation code will take care
1470  * to set @bo::res to NULL, while having @bo reserved when the
1471  * buffer is no longer bound to the resource, so @bo:res can be
1472  * used to determine whether there is a need to unbind and whether
1473  * it is safe to unbind.
1474  */
vmw_resource_move_notify(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)1475 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1476 			      struct ttm_mem_reg *mem)
1477 {
1478 	struct vmw_dma_buffer *dma_buf;
1479 
1480 	if (mem == NULL)
1481 		return;
1482 
1483 	if (bo->destroy != vmw_dmabuf_bo_free &&
1484 	    bo->destroy != vmw_user_dmabuf_destroy)
1485 		return;
1486 
1487 	dma_buf = container_of(bo, struct vmw_dma_buffer, base);
1488 
1489 	if (mem->mem_type != VMW_PL_MOB) {
1490 		struct vmw_resource *res, *n;
1491 		struct ttm_validate_buffer val_buf;
1492 
1493 		val_buf.bo = bo;
1494 		val_buf.shared = false;
1495 
1496 		list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
1497 
1498 			if (unlikely(res->func->unbind == NULL))
1499 				continue;
1500 
1501 			(void) res->func->unbind(res, true, &val_buf);
1502 			res->backup_dirty = true;
1503 			res->res_dirty = false;
1504 			list_del_init(&res->mob_head);
1505 		}
1506 
1507 		(void) ttm_bo_wait(bo, false, false, false);
1508 	}
1509 }
1510 
1511 /**
1512  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1513  *
1514  * @res:            The resource being queried.
1515  */
vmw_resource_needs_backup(const struct vmw_resource * res)1516 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1517 {
1518 	return res->func->needs_backup;
1519 }
1520 
1521 /**
1522  * vmw_resource_evict_type - Evict all resources of a specific type
1523  *
1524  * @dev_priv:       Pointer to a device private struct
1525  * @type:           The resource type to evict
1526  *
1527  * To avoid thrashing starvation or as part of the hibernation sequence,
1528  * try to evict all evictable resources of a specific type.
1529  */
vmw_resource_evict_type(struct vmw_private * dev_priv,enum vmw_res_type type)1530 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1531 				    enum vmw_res_type type)
1532 {
1533 	struct list_head *lru_list = &dev_priv->res_lru[type];
1534 	struct vmw_resource *evict_res;
1535 	unsigned err_count = 0;
1536 	int ret;
1537 
1538 	do {
1539 		write_lock(&dev_priv->resource_lock);
1540 
1541 		if (list_empty(lru_list))
1542 			goto out_unlock;
1543 
1544 		evict_res = vmw_resource_reference(
1545 			list_first_entry(lru_list, struct vmw_resource,
1546 					 lru_head));
1547 		list_del_init(&evict_res->lru_head);
1548 		write_unlock(&dev_priv->resource_lock);
1549 
1550 		ret = vmw_resource_do_evict(evict_res, false);
1551 		if (unlikely(ret != 0)) {
1552 			write_lock(&dev_priv->resource_lock);
1553 			list_add_tail(&evict_res->lru_head, lru_list);
1554 			write_unlock(&dev_priv->resource_lock);
1555 			if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1556 				vmw_resource_unreference(&evict_res);
1557 				return;
1558 			}
1559 		}
1560 
1561 		vmw_resource_unreference(&evict_res);
1562 	} while (1);
1563 
1564 out_unlock:
1565 	write_unlock(&dev_priv->resource_lock);
1566 }
1567 
1568 /**
1569  * vmw_resource_evict_all - Evict all evictable resources
1570  *
1571  * @dev_priv:       Pointer to a device private struct
1572  *
1573  * To avoid thrashing starvation or as part of the hibernation sequence,
1574  * evict all evictable resources. In particular this means that all
1575  * guest-backed resources that are registered with the device are
1576  * evicted and the OTable becomes clean.
1577  */
vmw_resource_evict_all(struct vmw_private * dev_priv)1578 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1579 {
1580 	enum vmw_res_type type;
1581 
1582 	mutex_lock(&dev_priv->cmdbuf_mutex);
1583 
1584 	for (type = 0; type < vmw_res_max; ++type)
1585 		vmw_resource_evict_type(dev_priv, type);
1586 
1587 	mutex_unlock(&dev_priv->cmdbuf_mutex);
1588 }
1589