1/**************************************************************************
2 *
3 * Copyright �� 2009-2015 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "vmwgfx_so.h"
31#include "vmwgfx_binding.h"
32#include <ttm/ttm_placement.h>
33#include "device_include/svga3d_surfacedefs.h"
34
35
36/**
37 * struct vmw_user_surface - User-space visible surface resource
38 *
39 * @base:           The TTM base object handling user-space visibility.
40 * @srf:            The surface metadata.
41 * @size:           TTM accounting size for the surface.
42 * @master: master of the creating client. Used for security check.
43 */
44struct vmw_user_surface {
45	struct ttm_prime_object prime;
46	struct vmw_surface srf;
47	uint32_t size;
48	struct drm_master *master;
49	struct ttm_base_object *backup_base;
50};
51
52/**
53 * struct vmw_surface_offset - Backing store mip level offset info
54 *
55 * @face:           Surface face.
56 * @mip:            Mip level.
57 * @bo_offset:      Offset into backing store of this mip level.
58 *
59 */
60struct vmw_surface_offset {
61	uint32_t face;
62	uint32_t mip;
63	uint32_t bo_offset;
64};
65
66static void vmw_user_surface_free(struct vmw_resource *res);
67static struct vmw_resource *
68vmw_user_surface_base_to_res(struct ttm_base_object *base);
69static int vmw_legacy_srf_bind(struct vmw_resource *res,
70			       struct ttm_validate_buffer *val_buf);
71static int vmw_legacy_srf_unbind(struct vmw_resource *res,
72				 bool readback,
73				 struct ttm_validate_buffer *val_buf);
74static int vmw_legacy_srf_create(struct vmw_resource *res);
75static int vmw_legacy_srf_destroy(struct vmw_resource *res);
76static int vmw_gb_surface_create(struct vmw_resource *res);
77static int vmw_gb_surface_bind(struct vmw_resource *res,
78			       struct ttm_validate_buffer *val_buf);
79static int vmw_gb_surface_unbind(struct vmw_resource *res,
80				 bool readback,
81				 struct ttm_validate_buffer *val_buf);
82static int vmw_gb_surface_destroy(struct vmw_resource *res);
83
84
85static const struct vmw_user_resource_conv user_surface_conv = {
86	.object_type = VMW_RES_SURFACE,
87	.base_obj_to_res = vmw_user_surface_base_to_res,
88	.res_free = vmw_user_surface_free
89};
90
91const struct vmw_user_resource_conv *user_surface_converter =
92	&user_surface_conv;
93
94
95static uint64_t vmw_user_surface_size;
96
97static const struct vmw_res_func vmw_legacy_surface_func = {
98	.res_type = vmw_res_surface,
99	.needs_backup = false,
100	.may_evict = true,
101	.type_name = "legacy surfaces",
102	.backup_placement = &vmw_srf_placement,
103	.create = &vmw_legacy_srf_create,
104	.destroy = &vmw_legacy_srf_destroy,
105	.bind = &vmw_legacy_srf_bind,
106	.unbind = &vmw_legacy_srf_unbind
107};
108
109static const struct vmw_res_func vmw_gb_surface_func = {
110	.res_type = vmw_res_surface,
111	.needs_backup = true,
112	.may_evict = true,
113	.type_name = "guest backed surfaces",
114	.backup_placement = &vmw_mob_placement,
115	.create = vmw_gb_surface_create,
116	.destroy = vmw_gb_surface_destroy,
117	.bind = vmw_gb_surface_bind,
118	.unbind = vmw_gb_surface_unbind
119};
120
121/**
122 * struct vmw_surface_dma - SVGA3D DMA command
123 */
124struct vmw_surface_dma {
125	SVGA3dCmdHeader header;
126	SVGA3dCmdSurfaceDMA body;
127	SVGA3dCopyBox cb;
128	SVGA3dCmdSurfaceDMASuffix suffix;
129};
130
131/**
132 * struct vmw_surface_define - SVGA3D Surface Define command
133 */
134struct vmw_surface_define {
135	SVGA3dCmdHeader header;
136	SVGA3dCmdDefineSurface body;
137};
138
139/**
140 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
141 */
142struct vmw_surface_destroy {
143	SVGA3dCmdHeader header;
144	SVGA3dCmdDestroySurface body;
145};
146
147
148/**
149 * vmw_surface_dma_size - Compute fifo size for a dma command.
150 *
151 * @srf: Pointer to a struct vmw_surface
152 *
153 * Computes the required size for a surface dma command for backup or
154 * restoration of the surface represented by @srf.
155 */
156static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
157{
158	return srf->num_sizes * sizeof(struct vmw_surface_dma);
159}
160
161
162/**
163 * vmw_surface_define_size - Compute fifo size for a surface define command.
164 *
165 * @srf: Pointer to a struct vmw_surface
166 *
167 * Computes the required size for a surface define command for the definition
168 * of the surface represented by @srf.
169 */
170static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
171{
172	return sizeof(struct vmw_surface_define) + srf->num_sizes *
173		sizeof(SVGA3dSize);
174}
175
176
177/**
178 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
179 *
180 * Computes the required size for a surface destroy command for the destruction
181 * of a hw surface.
182 */
183static inline uint32_t vmw_surface_destroy_size(void)
184{
185	return sizeof(struct vmw_surface_destroy);
186}
187
188/**
189 * vmw_surface_destroy_encode - Encode a surface_destroy command.
190 *
191 * @id: The surface id
192 * @cmd_space: Pointer to memory area in which the commands should be encoded.
193 */
194static void vmw_surface_destroy_encode(uint32_t id,
195				       void *cmd_space)
196{
197	struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
198		cmd_space;
199
200	cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
201	cmd->header.size = sizeof(cmd->body);
202	cmd->body.sid = id;
203}
204
205/**
206 * vmw_surface_define_encode - Encode a surface_define command.
207 *
208 * @srf: Pointer to a struct vmw_surface object.
209 * @cmd_space: Pointer to memory area in which the commands should be encoded.
210 */
211static void vmw_surface_define_encode(const struct vmw_surface *srf,
212				      void *cmd_space)
213{
214	struct vmw_surface_define *cmd = (struct vmw_surface_define *)
215		cmd_space;
216	struct drm_vmw_size *src_size;
217	SVGA3dSize *cmd_size;
218	uint32_t cmd_len;
219	int i;
220
221	cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
222
223	cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
224	cmd->header.size = cmd_len;
225	cmd->body.sid = srf->res.id;
226	cmd->body.surfaceFlags = srf->flags;
227	cmd->body.format = srf->format;
228	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
229		cmd->body.face[i].numMipLevels = srf->mip_levels[i];
230
231	cmd += 1;
232	cmd_size = (SVGA3dSize *) cmd;
233	src_size = srf->sizes;
234
235	for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
236		cmd_size->width = src_size->width;
237		cmd_size->height = src_size->height;
238		cmd_size->depth = src_size->depth;
239	}
240}
241
242/**
243 * vmw_surface_dma_encode - Encode a surface_dma command.
244 *
245 * @srf: Pointer to a struct vmw_surface object.
246 * @cmd_space: Pointer to memory area in which the commands should be encoded.
247 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
248 * should be placed or read from.
249 * @to_surface: Boolean whether to DMA to the surface or from the surface.
250 */
251static void vmw_surface_dma_encode(struct vmw_surface *srf,
252				   void *cmd_space,
253				   const SVGAGuestPtr *ptr,
254				   bool to_surface)
255{
256	uint32_t i;
257	struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
258	const struct svga3d_surface_desc *desc =
259		svga3dsurface_get_desc(srf->format);
260
261	for (i = 0; i < srf->num_sizes; ++i) {
262		SVGA3dCmdHeader *header = &cmd->header;
263		SVGA3dCmdSurfaceDMA *body = &cmd->body;
264		SVGA3dCopyBox *cb = &cmd->cb;
265		SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
266		const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
267		const struct drm_vmw_size *cur_size = &srf->sizes[i];
268
269		header->id = SVGA_3D_CMD_SURFACE_DMA;
270		header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
271
272		body->guest.ptr = *ptr;
273		body->guest.ptr.offset += cur_offset->bo_offset;
274		body->guest.pitch = svga3dsurface_calculate_pitch(desc,
275								  cur_size);
276		body->host.sid = srf->res.id;
277		body->host.face = cur_offset->face;
278		body->host.mipmap = cur_offset->mip;
279		body->transfer = ((to_surface) ?  SVGA3D_WRITE_HOST_VRAM :
280				  SVGA3D_READ_HOST_VRAM);
281		cb->x = 0;
282		cb->y = 0;
283		cb->z = 0;
284		cb->srcx = 0;
285		cb->srcy = 0;
286		cb->srcz = 0;
287		cb->w = cur_size->width;
288		cb->h = cur_size->height;
289		cb->d = cur_size->depth;
290
291		suffix->suffixSize = sizeof(*suffix);
292		suffix->maximumOffset =
293			svga3dsurface_get_image_buffer_size(desc, cur_size,
294							    body->guest.pitch);
295		suffix->flags.discard = 0;
296		suffix->flags.unsynchronized = 0;
297		suffix->flags.reserved = 0;
298		++cmd;
299	}
300};
301
302
303/**
304 * vmw_hw_surface_destroy - destroy a Device surface
305 *
306 * @res:        Pointer to a struct vmw_resource embedded in a struct
307 *              vmw_surface.
308 *
309 * Destroys a the device surface associated with a struct vmw_surface if
310 * any, and adjusts accounting and resource count accordingly.
311 */
312static void vmw_hw_surface_destroy(struct vmw_resource *res)
313{
314
315	struct vmw_private *dev_priv = res->dev_priv;
316	struct vmw_surface *srf;
317	void *cmd;
318
319	if (res->func->destroy == vmw_gb_surface_destroy) {
320		(void) vmw_gb_surface_destroy(res);
321		return;
322	}
323
324	if (res->id != -1) {
325
326		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
327		if (unlikely(cmd == NULL)) {
328			DRM_ERROR("Failed reserving FIFO space for surface "
329				  "destruction.\n");
330			return;
331		}
332
333		vmw_surface_destroy_encode(res->id, cmd);
334		vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
335
336		/*
337		 * used_memory_size_atomic, or separate lock
338		 * to avoid taking dev_priv::cmdbuf_mutex in
339		 * the destroy path.
340		 */
341
342		mutex_lock(&dev_priv->cmdbuf_mutex);
343		srf = vmw_res_to_srf(res);
344		dev_priv->used_memory_size -= res->backup_size;
345		mutex_unlock(&dev_priv->cmdbuf_mutex);
346	}
347	vmw_fifo_resource_dec(dev_priv);
348}
349
350/**
351 * vmw_legacy_srf_create - Create a device surface as part of the
352 * resource validation process.
353 *
354 * @res: Pointer to a struct vmw_surface.
355 *
356 * If the surface doesn't have a hw id.
357 *
358 * Returns -EBUSY if there wasn't sufficient device resources to
359 * complete the validation. Retry after freeing up resources.
360 *
361 * May return other errors if the kernel is out of guest resources.
362 */
363static int vmw_legacy_srf_create(struct vmw_resource *res)
364{
365	struct vmw_private *dev_priv = res->dev_priv;
366	struct vmw_surface *srf;
367	uint32_t submit_size;
368	uint8_t *cmd;
369	int ret;
370
371	if (likely(res->id != -1))
372		return 0;
373
374	srf = vmw_res_to_srf(res);
375	if (unlikely(dev_priv->used_memory_size + res->backup_size >=
376		     dev_priv->memory_size))
377		return -EBUSY;
378
379	/*
380	 * Alloc id for the resource.
381	 */
382
383	ret = vmw_resource_alloc_id(res);
384	if (unlikely(ret != 0)) {
385		DRM_ERROR("Failed to allocate a surface id.\n");
386		goto out_no_id;
387	}
388
389	if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
390		ret = -EBUSY;
391		goto out_no_fifo;
392	}
393
394	/*
395	 * Encode surface define- commands.
396	 */
397
398	submit_size = vmw_surface_define_size(srf);
399	cmd = vmw_fifo_reserve(dev_priv, submit_size);
400	if (unlikely(cmd == NULL)) {
401		DRM_ERROR("Failed reserving FIFO space for surface "
402			  "creation.\n");
403		ret = -ENOMEM;
404		goto out_no_fifo;
405	}
406
407	vmw_surface_define_encode(srf, cmd);
408	vmw_fifo_commit(dev_priv, submit_size);
409	/*
410	 * Surface memory usage accounting.
411	 */
412
413	dev_priv->used_memory_size += res->backup_size;
414	return 0;
415
416out_no_fifo:
417	vmw_resource_release_id(res);
418out_no_id:
419	return ret;
420}
421
422/**
423 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
424 *
425 * @res:            Pointer to a struct vmw_res embedded in a struct
426 *                  vmw_surface.
427 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
428 *                  information about the backup buffer.
429 * @bind:           Boolean wether to DMA to the surface.
430 *
431 * Transfer backup data to or from a legacy surface as part of the
432 * validation process.
433 * May return other errors if the kernel is out of guest resources.
434 * The backup buffer will be fenced or idle upon successful completion,
435 * and if the surface needs persistent backup storage, the backup buffer
436 * will also be returned reserved iff @bind is true.
437 */
438static int vmw_legacy_srf_dma(struct vmw_resource *res,
439			      struct ttm_validate_buffer *val_buf,
440			      bool bind)
441{
442	SVGAGuestPtr ptr;
443	struct vmw_fence_obj *fence;
444	uint32_t submit_size;
445	struct vmw_surface *srf = vmw_res_to_srf(res);
446	uint8_t *cmd;
447	struct vmw_private *dev_priv = res->dev_priv;
448
449	BUG_ON(val_buf->bo == NULL);
450
451	submit_size = vmw_surface_dma_size(srf);
452	cmd = vmw_fifo_reserve(dev_priv, submit_size);
453	if (unlikely(cmd == NULL)) {
454		DRM_ERROR("Failed reserving FIFO space for surface "
455			  "DMA.\n");
456		return -ENOMEM;
457	}
458	vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
459	vmw_surface_dma_encode(srf, cmd, &ptr, bind);
460
461	vmw_fifo_commit(dev_priv, submit_size);
462
463	/*
464	 * Create a fence object and fence the backup buffer.
465	 */
466
467	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
468					  &fence, NULL);
469
470	vmw_fence_single_bo(val_buf->bo, fence);
471
472	if (likely(fence != NULL))
473		vmw_fence_obj_unreference(&fence);
474
475	return 0;
476}
477
478/**
479 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
480 *                       surface validation process.
481 *
482 * @res:            Pointer to a struct vmw_res embedded in a struct
483 *                  vmw_surface.
484 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
485 *                  information about the backup buffer.
486 *
487 * This function will copy backup data to the surface if the
488 * backup buffer is dirty.
489 */
490static int vmw_legacy_srf_bind(struct vmw_resource *res,
491			       struct ttm_validate_buffer *val_buf)
492{
493	if (!res->backup_dirty)
494		return 0;
495
496	return vmw_legacy_srf_dma(res, val_buf, true);
497}
498
499
500/**
501 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
502 *                         surface eviction process.
503 *
504 * @res:            Pointer to a struct vmw_res embedded in a struct
505 *                  vmw_surface.
506 * @val_buf:        Pointer to a struct ttm_validate_buffer containing
507 *                  information about the backup buffer.
508 *
509 * This function will copy backup data from the surface.
510 */
511static int vmw_legacy_srf_unbind(struct vmw_resource *res,
512				 bool readback,
513				 struct ttm_validate_buffer *val_buf)
514{
515	if (unlikely(readback))
516		return vmw_legacy_srf_dma(res, val_buf, false);
517	return 0;
518}
519
520/**
521 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
522 *                          resource eviction process.
523 *
524 * @res:            Pointer to a struct vmw_res embedded in a struct
525 *                  vmw_surface.
526 */
527static int vmw_legacy_srf_destroy(struct vmw_resource *res)
528{
529	struct vmw_private *dev_priv = res->dev_priv;
530	uint32_t submit_size;
531	uint8_t *cmd;
532
533	BUG_ON(res->id == -1);
534
535	/*
536	 * Encode the dma- and surface destroy commands.
537	 */
538
539	submit_size = vmw_surface_destroy_size();
540	cmd = vmw_fifo_reserve(dev_priv, submit_size);
541	if (unlikely(cmd == NULL)) {
542		DRM_ERROR("Failed reserving FIFO space for surface "
543			  "eviction.\n");
544		return -ENOMEM;
545	}
546
547	vmw_surface_destroy_encode(res->id, cmd);
548	vmw_fifo_commit(dev_priv, submit_size);
549
550	/*
551	 * Surface memory usage accounting.
552	 */
553
554	dev_priv->used_memory_size -= res->backup_size;
555
556	/*
557	 * Release the surface ID.
558	 */
559
560	vmw_resource_release_id(res);
561
562	return 0;
563}
564
565
566/**
567 * vmw_surface_init - initialize a struct vmw_surface
568 *
569 * @dev_priv:       Pointer to a device private struct.
570 * @srf:            Pointer to the struct vmw_surface to initialize.
571 * @res_free:       Pointer to a resource destructor used to free
572 *                  the object.
573 */
574static int vmw_surface_init(struct vmw_private *dev_priv,
575			    struct vmw_surface *srf,
576			    void (*res_free) (struct vmw_resource *res))
577{
578	int ret;
579	struct vmw_resource *res = &srf->res;
580
581	BUG_ON(res_free == NULL);
582	if (!dev_priv->has_mob)
583		vmw_fifo_resource_inc(dev_priv);
584	ret = vmw_resource_init(dev_priv, res, true, res_free,
585				(dev_priv->has_mob) ? &vmw_gb_surface_func :
586				&vmw_legacy_surface_func);
587
588	if (unlikely(ret != 0)) {
589		if (!dev_priv->has_mob)
590			vmw_fifo_resource_dec(dev_priv);
591		res_free(res);
592		return ret;
593	}
594
595	/*
596	 * The surface won't be visible to hardware until a
597	 * surface validate.
598	 */
599
600	INIT_LIST_HEAD(&srf->view_list);
601	vmw_resource_activate(res, vmw_hw_surface_destroy);
602	return ret;
603}
604
605/**
606 * vmw_user_surface_base_to_res - TTM base object to resource converter for
607 *                                user visible surfaces
608 *
609 * @base:           Pointer to a TTM base object
610 *
611 * Returns the struct vmw_resource embedded in a struct vmw_surface
612 * for the user-visible object identified by the TTM base object @base.
613 */
614static struct vmw_resource *
615vmw_user_surface_base_to_res(struct ttm_base_object *base)
616{
617	return &(container_of(base, struct vmw_user_surface,
618			      prime.base)->srf.res);
619}
620
621/**
622 * vmw_user_surface_free - User visible surface resource destructor
623 *
624 * @res:            A struct vmw_resource embedded in a struct vmw_surface.
625 */
626static void vmw_user_surface_free(struct vmw_resource *res)
627{
628	struct vmw_surface *srf = vmw_res_to_srf(res);
629	struct vmw_user_surface *user_srf =
630	    container_of(srf, struct vmw_user_surface, srf);
631	struct vmw_private *dev_priv = srf->res.dev_priv;
632	uint32_t size = user_srf->size;
633
634	if (user_srf->master)
635		drm_master_put(&user_srf->master);
636	kfree(srf->offsets);
637	kfree(srf->sizes);
638	kfree(srf->snooper.image);
639	ttm_prime_object_kfree(user_srf, prime);
640	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
641}
642
643/**
644 * vmw_user_surface_free - User visible surface TTM base object destructor
645 *
646 * @p_base:         Pointer to a pointer to a TTM base object
647 *                  embedded in a struct vmw_user_surface.
648 *
649 * Drops the base object's reference on its resource, and the
650 * pointer pointed to by *p_base is set to NULL.
651 */
652static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
653{
654	struct ttm_base_object *base = *p_base;
655	struct vmw_user_surface *user_srf =
656	    container_of(base, struct vmw_user_surface, prime.base);
657	struct vmw_resource *res = &user_srf->srf.res;
658
659	*p_base = NULL;
660	if (user_srf->backup_base)
661		ttm_base_object_unref(&user_srf->backup_base);
662	vmw_resource_unreference(&res);
663}
664
665/**
666 * vmw_user_surface_destroy_ioctl - Ioctl function implementing
667 *                                  the user surface destroy functionality.
668 *
669 * @dev:            Pointer to a struct drm_device.
670 * @data:           Pointer to data copied from / to user-space.
671 * @file_priv:      Pointer to a drm file private structure.
672 */
673int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
674			      struct drm_file *file_priv)
675{
676	struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
677	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
678
679	return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
680}
681
682/**
683 * vmw_user_surface_define_ioctl - Ioctl function implementing
684 *                                  the user surface define functionality.
685 *
686 * @dev:            Pointer to a struct drm_device.
687 * @data:           Pointer to data copied from / to user-space.
688 * @file_priv:      Pointer to a drm file private structure.
689 */
690int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
691			     struct drm_file *file_priv)
692{
693	struct vmw_private *dev_priv = vmw_priv(dev);
694	struct vmw_user_surface *user_srf;
695	struct vmw_surface *srf;
696	struct vmw_resource *res;
697	struct vmw_resource *tmp;
698	union drm_vmw_surface_create_arg *arg =
699	    (union drm_vmw_surface_create_arg *)data;
700	struct drm_vmw_surface_create_req *req = &arg->req;
701	struct drm_vmw_surface_arg *rep = &arg->rep;
702	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
703	struct drm_vmw_size __user *user_sizes;
704	int ret;
705	int i, j;
706	uint32_t cur_bo_offset;
707	struct drm_vmw_size *cur_size;
708	struct vmw_surface_offset *cur_offset;
709	uint32_t num_sizes;
710	uint32_t size;
711	const struct svga3d_surface_desc *desc;
712
713	if (unlikely(vmw_user_surface_size == 0))
714		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
715			128;
716
717	num_sizes = 0;
718	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
719		num_sizes += req->mip_levels[i];
720
721	if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
722	    DRM_VMW_MAX_MIP_LEVELS)
723		return -EINVAL;
724
725	size = vmw_user_surface_size + 128 +
726		ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
727		ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
728
729
730	desc = svga3dsurface_get_desc(req->format);
731	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
732		DRM_ERROR("Invalid surface format for surface creation.\n");
733		DRM_ERROR("Format requested is: %d\n", req->format);
734		return -EINVAL;
735	}
736
737	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
738	if (unlikely(ret != 0))
739		return ret;
740
741	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
742				   size, false, true);
743	if (unlikely(ret != 0)) {
744		if (ret != -ERESTARTSYS)
745			DRM_ERROR("Out of graphics memory for surface"
746				  " creation.\n");
747		goto out_unlock;
748	}
749
750	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
751	if (unlikely(user_srf == NULL)) {
752		ret = -ENOMEM;
753		goto out_no_user_srf;
754	}
755
756	srf = &user_srf->srf;
757	res = &srf->res;
758
759	srf->flags = req->flags;
760	srf->format = req->format;
761	srf->scanout = req->scanout;
762
763	memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
764	srf->num_sizes = num_sizes;
765	user_srf->size = size;
766
767	srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
768	if (unlikely(srf->sizes == NULL)) {
769		ret = -ENOMEM;
770		goto out_no_sizes;
771	}
772	srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
773			       GFP_KERNEL);
774	if (unlikely(srf->sizes == NULL)) {
775		ret = -ENOMEM;
776		goto out_no_offsets;
777	}
778
779	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
780	    req->size_addr;
781
782	ret = copy_from_user(srf->sizes, user_sizes,
783			     srf->num_sizes * sizeof(*srf->sizes));
784	if (unlikely(ret != 0)) {
785		ret = -EFAULT;
786		goto out_no_copy;
787	}
788
789	srf->base_size = *srf->sizes;
790	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
791	srf->multisample_count = 0;
792
793	cur_bo_offset = 0;
794	cur_offset = srf->offsets;
795	cur_size = srf->sizes;
796
797	for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
798		for (j = 0; j < srf->mip_levels[i]; ++j) {
799			uint32_t stride = svga3dsurface_calculate_pitch
800				(desc, cur_size);
801
802			cur_offset->face = i;
803			cur_offset->mip = j;
804			cur_offset->bo_offset = cur_bo_offset;
805			cur_bo_offset += svga3dsurface_get_image_buffer_size
806				(desc, cur_size, stride);
807			++cur_offset;
808			++cur_size;
809		}
810	}
811	res->backup_size = cur_bo_offset;
812	if (srf->scanout &&
813	    srf->num_sizes == 1 &&
814	    srf->sizes[0].width == 64 &&
815	    srf->sizes[0].height == 64 &&
816	    srf->format == SVGA3D_A8R8G8B8) {
817
818		srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
819		/* clear the image */
820		if (srf->snooper.image) {
821			memset(srf->snooper.image, 0x00, 64 * 64 * 4);
822		} else {
823			DRM_ERROR("Failed to allocate cursor_image\n");
824			ret = -ENOMEM;
825			goto out_no_copy;
826		}
827	} else {
828		srf->snooper.image = NULL;
829	}
830	srf->snooper.crtc = NULL;
831
832	user_srf->prime.base.shareable = false;
833	user_srf->prime.base.tfile = NULL;
834	if (drm_is_primary_client(file_priv))
835		user_srf->master = drm_master_get(file_priv->master);
836
837	/**
838	 * From this point, the generic resource management functions
839	 * destroy the object on failure.
840	 */
841
842	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
843	if (unlikely(ret != 0))
844		goto out_unlock;
845
846	/*
847	 * A gb-aware client referencing a shared surface will
848	 * expect a backup buffer to be present.
849	 */
850	if (dev_priv->has_mob && req->shareable) {
851		uint32_t backup_handle;
852
853		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
854					    res->backup_size,
855					    true,
856					    &backup_handle,
857					    &res->backup,
858					    &user_srf->backup_base);
859		if (unlikely(ret != 0)) {
860			vmw_resource_unreference(&res);
861			goto out_unlock;
862		}
863	}
864
865	tmp = vmw_resource_reference(&srf->res);
866	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
867				    req->shareable, VMW_RES_SURFACE,
868				    &vmw_user_surface_base_release, NULL);
869
870	if (unlikely(ret != 0)) {
871		vmw_resource_unreference(&tmp);
872		vmw_resource_unreference(&res);
873		goto out_unlock;
874	}
875
876	rep->sid = user_srf->prime.base.hash.key;
877	vmw_resource_unreference(&res);
878
879	ttm_read_unlock(&dev_priv->reservation_sem);
880	return 0;
881out_no_copy:
882	kfree(srf->offsets);
883out_no_offsets:
884	kfree(srf->sizes);
885out_no_sizes:
886	ttm_prime_object_kfree(user_srf, prime);
887out_no_user_srf:
888	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
889out_unlock:
890	ttm_read_unlock(&dev_priv->reservation_sem);
891	return ret;
892}
893
894
895static int
896vmw_surface_handle_reference(struct vmw_private *dev_priv,
897			     struct drm_file *file_priv,
898			     uint32_t u_handle,
899			     enum drm_vmw_handle_type handle_type,
900			     struct ttm_base_object **base_p)
901{
902	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
903	struct vmw_user_surface *user_srf;
904	uint32_t handle;
905	struct ttm_base_object *base;
906	int ret;
907
908	if (handle_type == DRM_VMW_HANDLE_PRIME) {
909		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
910		if (unlikely(ret != 0))
911			return ret;
912	} else {
913		if (unlikely(drm_is_render_client(file_priv))) {
914			DRM_ERROR("Render client refused legacy "
915				  "surface reference.\n");
916			return -EACCES;
917		}
918		if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
919			DRM_ERROR("Locked master refused legacy "
920				  "surface reference.\n");
921			return -EACCES;
922		}
923
924		handle = u_handle;
925	}
926
927	ret = -EINVAL;
928	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
929	if (unlikely(base == NULL)) {
930		DRM_ERROR("Could not find surface to reference.\n");
931		goto out_no_lookup;
932	}
933
934	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
935		DRM_ERROR("Referenced object is not a surface.\n");
936		goto out_bad_resource;
937	}
938
939	if (handle_type != DRM_VMW_HANDLE_PRIME) {
940		user_srf = container_of(base, struct vmw_user_surface,
941					prime.base);
942
943		/*
944		 * Make sure the surface creator has the same
945		 * authenticating master.
946		 */
947		if (drm_is_primary_client(file_priv) &&
948		    user_srf->master != file_priv->master) {
949			DRM_ERROR("Trying to reference surface outside of"
950				  " master domain.\n");
951			ret = -EACCES;
952			goto out_bad_resource;
953		}
954
955		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
956		if (unlikely(ret != 0)) {
957			DRM_ERROR("Could not add a reference to a surface.\n");
958			goto out_bad_resource;
959		}
960	}
961
962	*base_p = base;
963	return 0;
964
965out_bad_resource:
966	ttm_base_object_unref(&base);
967out_no_lookup:
968	if (handle_type == DRM_VMW_HANDLE_PRIME)
969		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
970
971	return ret;
972}
973
974/**
975 * vmw_user_surface_define_ioctl - Ioctl function implementing
976 *                                  the user surface reference functionality.
977 *
978 * @dev:            Pointer to a struct drm_device.
979 * @data:           Pointer to data copied from / to user-space.
980 * @file_priv:      Pointer to a drm file private structure.
981 */
982int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
983				struct drm_file *file_priv)
984{
985	struct vmw_private *dev_priv = vmw_priv(dev);
986	union drm_vmw_surface_reference_arg *arg =
987	    (union drm_vmw_surface_reference_arg *)data;
988	struct drm_vmw_surface_arg *req = &arg->req;
989	struct drm_vmw_surface_create_req *rep = &arg->rep;
990	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
991	struct vmw_surface *srf;
992	struct vmw_user_surface *user_srf;
993	struct drm_vmw_size __user *user_sizes;
994	struct ttm_base_object *base;
995	int ret;
996
997	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
998					   req->handle_type, &base);
999	if (unlikely(ret != 0))
1000		return ret;
1001
1002	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1003	srf = &user_srf->srf;
1004
1005	rep->flags = srf->flags;
1006	rep->format = srf->format;
1007	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1008	user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1009	    rep->size_addr;
1010
1011	if (user_sizes)
1012		ret = copy_to_user(user_sizes, &srf->base_size,
1013				   sizeof(srf->base_size));
1014	if (unlikely(ret != 0)) {
1015		DRM_ERROR("copy_to_user failed %p %u\n",
1016			  user_sizes, srf->num_sizes);
1017		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);
1018		ret = -EFAULT;
1019	}
1020
1021	ttm_base_object_unref(&base);
1022
1023	return ret;
1024}
1025
1026/**
1027 * vmw_surface_define_encode - Encode a surface_define command.
1028 *
1029 * @srf: Pointer to a struct vmw_surface object.
1030 * @cmd_space: Pointer to memory area in which the commands should be encoded.
1031 */
1032static int vmw_gb_surface_create(struct vmw_resource *res)
1033{
1034	struct vmw_private *dev_priv = res->dev_priv;
1035	struct vmw_surface *srf = vmw_res_to_srf(res);
1036	uint32_t cmd_len, cmd_id, submit_len;
1037	int ret;
1038	struct {
1039		SVGA3dCmdHeader header;
1040		SVGA3dCmdDefineGBSurface body;
1041	} *cmd;
1042	struct {
1043		SVGA3dCmdHeader header;
1044		SVGA3dCmdDefineGBSurface_v2 body;
1045	} *cmd2;
1046
1047	if (likely(res->id != -1))
1048		return 0;
1049
1050	vmw_fifo_resource_inc(dev_priv);
1051	ret = vmw_resource_alloc_id(res);
1052	if (unlikely(ret != 0)) {
1053		DRM_ERROR("Failed to allocate a surface id.\n");
1054		goto out_no_id;
1055	}
1056
1057	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
1058		ret = -EBUSY;
1059		goto out_no_fifo;
1060	}
1061
1062	if (srf->array_size > 0) {
1063		/* has_dx checked on creation time. */
1064		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2;
1065		cmd_len = sizeof(cmd2->body);
1066		submit_len = sizeof(*cmd2);
1067	} else {
1068		cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE;
1069		cmd_len = sizeof(cmd->body);
1070		submit_len = sizeof(*cmd);
1071	}
1072
1073	cmd = vmw_fifo_reserve(dev_priv, submit_len);
1074	cmd2 = (typeof(cmd2))cmd;
1075	if (unlikely(cmd == NULL)) {
1076		DRM_ERROR("Failed reserving FIFO space for surface "
1077			  "creation.\n");
1078		ret = -ENOMEM;
1079		goto out_no_fifo;
1080	}
1081
1082	if (srf->array_size > 0) {
1083		cmd2->header.id = cmd_id;
1084		cmd2->header.size = cmd_len;
1085		cmd2->body.sid = srf->res.id;
1086		cmd2->body.surfaceFlags = srf->flags;
1087		cmd2->body.format = cpu_to_le32(srf->format);
1088		cmd2->body.numMipLevels = srf->mip_levels[0];
1089		cmd2->body.multisampleCount = srf->multisample_count;
1090		cmd2->body.autogenFilter = srf->autogen_filter;
1091		cmd2->body.size.width = srf->base_size.width;
1092		cmd2->body.size.height = srf->base_size.height;
1093		cmd2->body.size.depth = srf->base_size.depth;
1094		cmd2->body.arraySize = srf->array_size;
1095	} else {
1096		cmd->header.id = cmd_id;
1097		cmd->header.size = cmd_len;
1098		cmd->body.sid = srf->res.id;
1099		cmd->body.surfaceFlags = srf->flags;
1100		cmd->body.format = cpu_to_le32(srf->format);
1101		cmd->body.numMipLevels = srf->mip_levels[0];
1102		cmd->body.multisampleCount = srf->multisample_count;
1103		cmd->body.autogenFilter = srf->autogen_filter;
1104		cmd->body.size.width = srf->base_size.width;
1105		cmd->body.size.height = srf->base_size.height;
1106		cmd->body.size.depth = srf->base_size.depth;
1107	}
1108
1109	vmw_fifo_commit(dev_priv, submit_len);
1110
1111	return 0;
1112
1113out_no_fifo:
1114	vmw_resource_release_id(res);
1115out_no_id:
1116	vmw_fifo_resource_dec(dev_priv);
1117	return ret;
1118}
1119
1120
1121static int vmw_gb_surface_bind(struct vmw_resource *res,
1122			       struct ttm_validate_buffer *val_buf)
1123{
1124	struct vmw_private *dev_priv = res->dev_priv;
1125	struct {
1126		SVGA3dCmdHeader header;
1127		SVGA3dCmdBindGBSurface body;
1128	} *cmd1;
1129	struct {
1130		SVGA3dCmdHeader header;
1131		SVGA3dCmdUpdateGBSurface body;
1132	} *cmd2;
1133	uint32_t submit_size;
1134	struct ttm_buffer_object *bo = val_buf->bo;
1135
1136	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1137
1138	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
1139
1140	cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
1141	if (unlikely(cmd1 == NULL)) {
1142		DRM_ERROR("Failed reserving FIFO space for surface "
1143			  "binding.\n");
1144		return -ENOMEM;
1145	}
1146
1147	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1148	cmd1->header.size = sizeof(cmd1->body);
1149	cmd1->body.sid = res->id;
1150	cmd1->body.mobid = bo->mem.start;
1151	if (res->backup_dirty) {
1152		cmd2 = (void *) &cmd1[1];
1153		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
1154		cmd2->header.size = sizeof(cmd2->body);
1155		cmd2->body.sid = res->id;
1156		res->backup_dirty = false;
1157	}
1158	vmw_fifo_commit(dev_priv, submit_size);
1159
1160	return 0;
1161}
1162
1163static int vmw_gb_surface_unbind(struct vmw_resource *res,
1164				 bool readback,
1165				 struct ttm_validate_buffer *val_buf)
1166{
1167	struct vmw_private *dev_priv = res->dev_priv;
1168	struct ttm_buffer_object *bo = val_buf->bo;
1169	struct vmw_fence_obj *fence;
1170
1171	struct {
1172		SVGA3dCmdHeader header;
1173		SVGA3dCmdReadbackGBSurface body;
1174	} *cmd1;
1175	struct {
1176		SVGA3dCmdHeader header;
1177		SVGA3dCmdInvalidateGBSurface body;
1178	} *cmd2;
1179	struct {
1180		SVGA3dCmdHeader header;
1181		SVGA3dCmdBindGBSurface body;
1182	} *cmd3;
1183	uint32_t submit_size;
1184	uint8_t *cmd;
1185
1186
1187	BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
1188
1189	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
1190	cmd = vmw_fifo_reserve(dev_priv, submit_size);
1191	if (unlikely(cmd == NULL)) {
1192		DRM_ERROR("Failed reserving FIFO space for surface "
1193			  "unbinding.\n");
1194		return -ENOMEM;
1195	}
1196
1197	if (readback) {
1198		cmd1 = (void *) cmd;
1199		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE;
1200		cmd1->header.size = sizeof(cmd1->body);
1201		cmd1->body.sid = res->id;
1202		cmd3 = (void *) &cmd1[1];
1203	} else {
1204		cmd2 = (void *) cmd;
1205		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE;
1206		cmd2->header.size = sizeof(cmd2->body);
1207		cmd2->body.sid = res->id;
1208		cmd3 = (void *) &cmd2[1];
1209	}
1210
1211	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
1212	cmd3->header.size = sizeof(cmd3->body);
1213	cmd3->body.sid = res->id;
1214	cmd3->body.mobid = SVGA3D_INVALID_ID;
1215
1216	vmw_fifo_commit(dev_priv, submit_size);
1217
1218	/*
1219	 * Create a fence object and fence the backup buffer.
1220	 */
1221
1222	(void) vmw_execbuf_fence_commands(NULL, dev_priv,
1223					  &fence, NULL);
1224
1225	vmw_fence_single_bo(val_buf->bo, fence);
1226
1227	if (likely(fence != NULL))
1228		vmw_fence_obj_unreference(&fence);
1229
1230	return 0;
1231}
1232
1233static int vmw_gb_surface_destroy(struct vmw_resource *res)
1234{
1235	struct vmw_private *dev_priv = res->dev_priv;
1236	struct vmw_surface *srf = vmw_res_to_srf(res);
1237	struct {
1238		SVGA3dCmdHeader header;
1239		SVGA3dCmdDestroyGBSurface body;
1240	} *cmd;
1241
1242	if (likely(res->id == -1))
1243		return 0;
1244
1245	mutex_lock(&dev_priv->binding_mutex);
1246	vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
1247	vmw_binding_res_list_scrub(&res->binding_head);
1248
1249	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
1250	if (unlikely(cmd == NULL)) {
1251		DRM_ERROR("Failed reserving FIFO space for surface "
1252			  "destruction.\n");
1253		mutex_unlock(&dev_priv->binding_mutex);
1254		return -ENOMEM;
1255	}
1256
1257	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE;
1258	cmd->header.size = sizeof(cmd->body);
1259	cmd->body.sid = res->id;
1260	vmw_fifo_commit(dev_priv, sizeof(*cmd));
1261	mutex_unlock(&dev_priv->binding_mutex);
1262	vmw_resource_release_id(res);
1263	vmw_fifo_resource_dec(dev_priv);
1264
1265	return 0;
1266}
1267
1268
1269/**
1270 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1271 *                               the user surface define functionality.
1272 *
1273 * @dev:            Pointer to a struct drm_device.
1274 * @data:           Pointer to data copied from / to user-space.
1275 * @file_priv:      Pointer to a drm file private structure.
1276 */
1277int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1278				struct drm_file *file_priv)
1279{
1280	struct vmw_private *dev_priv = vmw_priv(dev);
1281	struct vmw_user_surface *user_srf;
1282	struct vmw_surface *srf;
1283	struct vmw_resource *res;
1284	struct vmw_resource *tmp;
1285	union drm_vmw_gb_surface_create_arg *arg =
1286	    (union drm_vmw_gb_surface_create_arg *)data;
1287	struct drm_vmw_gb_surface_create_req *req = &arg->req;
1288	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep;
1289	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1290	int ret;
1291	uint32_t size;
1292	uint32_t backup_handle;
1293
1294	if (req->multisample_count != 0)
1295		return -EINVAL;
1296
1297	if (unlikely(vmw_user_surface_size == 0))
1298		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1299			128;
1300
1301	size = vmw_user_surface_size + 128;
1302
1303	/* Define a surface based on the parameters. */
1304	ret = vmw_surface_gb_priv_define(dev,
1305			size,
1306			req->svga3d_flags,
1307			req->format,
1308			req->drm_surface_flags & drm_vmw_surface_flag_scanout,
1309			req->mip_levels,
1310			req->multisample_count,
1311			req->array_size,
1312			req->base_size,
1313			&srf);
1314	if (unlikely(ret != 0))
1315		return ret;
1316
1317	user_srf = container_of(srf, struct vmw_user_surface, srf);
1318	if (drm_is_primary_client(file_priv))
1319		user_srf->master = drm_master_get(file_priv->master);
1320
1321	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1322	if (unlikely(ret != 0))
1323		return ret;
1324
1325	res = &user_srf->srf.res;
1326
1327
1328	if (req->buffer_handle != SVGA3D_INVALID_ID) {
1329		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
1330					     &res->backup,
1331					     &user_srf->backup_base);
1332		if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
1333		    res->backup_size) {
1334			DRM_ERROR("Surface backup buffer is too small.\n");
1335			vmw_dmabuf_unreference(&res->backup);
1336			ret = -EINVAL;
1337			goto out_unlock;
1338		}
1339	} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
1340		ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
1341					    res->backup_size,
1342					    req->drm_surface_flags &
1343					    drm_vmw_surface_flag_shareable,
1344					    &backup_handle,
1345					    &res->backup,
1346					    &user_srf->backup_base);
1347
1348	if (unlikely(ret != 0)) {
1349		vmw_resource_unreference(&res);
1350		goto out_unlock;
1351	}
1352
1353	tmp = vmw_resource_reference(res);
1354	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
1355				    req->drm_surface_flags &
1356				    drm_vmw_surface_flag_shareable,
1357				    VMW_RES_SURFACE,
1358				    &vmw_user_surface_base_release, NULL);
1359
1360	if (unlikely(ret != 0)) {
1361		vmw_resource_unreference(&tmp);
1362		vmw_resource_unreference(&res);
1363		goto out_unlock;
1364	}
1365
1366	rep->handle      = user_srf->prime.base.hash.key;
1367	rep->backup_size = res->backup_size;
1368	if (res->backup) {
1369		rep->buffer_map_handle =
1370			drm_vma_node_offset_addr(&res->backup->base.vma_node);
1371		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE;
1372		rep->buffer_handle = backup_handle;
1373	} else {
1374		rep->buffer_map_handle = 0;
1375		rep->buffer_size = 0;
1376		rep->buffer_handle = SVGA3D_INVALID_ID;
1377	}
1378
1379	vmw_resource_unreference(&res);
1380
1381out_unlock:
1382	ttm_read_unlock(&dev_priv->reservation_sem);
1383	return ret;
1384}
1385
1386/**
1387 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1388 *                                  the user surface reference functionality.
1389 *
1390 * @dev:            Pointer to a struct drm_device.
1391 * @data:           Pointer to data copied from / to user-space.
1392 * @file_priv:      Pointer to a drm file private structure.
1393 */
1394int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1395				   struct drm_file *file_priv)
1396{
1397	struct vmw_private *dev_priv = vmw_priv(dev);
1398	union drm_vmw_gb_surface_reference_arg *arg =
1399	    (union drm_vmw_gb_surface_reference_arg *)data;
1400	struct drm_vmw_surface_arg *req = &arg->req;
1401	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep;
1402	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1403	struct vmw_surface *srf;
1404	struct vmw_user_surface *user_srf;
1405	struct ttm_base_object *base;
1406	uint32_t backup_handle;
1407	int ret = -EINVAL;
1408
1409	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
1410					   req->handle_type, &base);
1411	if (unlikely(ret != 0))
1412		return ret;
1413
1414	user_srf = container_of(base, struct vmw_user_surface, prime.base);
1415	srf = &user_srf->srf;
1416	if (srf->res.backup == NULL) {
1417		DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
1418		goto out_bad_resource;
1419	}
1420
1421	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
1422	ret = vmw_user_dmabuf_reference(tfile, srf->res.backup,
1423					&backup_handle);
1424	mutex_unlock(&dev_priv->cmdbuf_mutex);
1425
1426	if (unlikely(ret != 0)) {
1427		DRM_ERROR("Could not add a reference to a GB surface "
1428			  "backup buffer.\n");
1429		(void) ttm_ref_object_base_unref(tfile, base->hash.key,
1430						 TTM_REF_USAGE);
1431		goto out_bad_resource;
1432	}
1433
1434	rep->creq.svga3d_flags = srf->flags;
1435	rep->creq.format = srf->format;
1436	rep->creq.mip_levels = srf->mip_levels[0];
1437	rep->creq.drm_surface_flags = 0;
1438	rep->creq.multisample_count = srf->multisample_count;
1439	rep->creq.autogen_filter = srf->autogen_filter;
1440	rep->creq.array_size = srf->array_size;
1441	rep->creq.buffer_handle = backup_handle;
1442	rep->creq.base_size = srf->base_size;
1443	rep->crep.handle = user_srf->prime.base.hash.key;
1444	rep->crep.backup_size = srf->res.backup_size;
1445	rep->crep.buffer_handle = backup_handle;
1446	rep->crep.buffer_map_handle =
1447		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node);
1448	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE;
1449
1450out_bad_resource:
1451	ttm_base_object_unref(&base);
1452
1453	return ret;
1454}
1455
1456/**
1457 * vmw_surface_gb_priv_define - Define a private GB surface
1458 *
1459 * @dev:  Pointer to a struct drm_device
1460 * @user_accounting_size:  Used to track user-space memory usage, set
1461 *                         to 0 for kernel mode only memory
1462 * @svga3d_flags: SVGA3d surface flags for the device
1463 * @format: requested surface format
1464 * @for_scanout: true if inteded to be used for scanout buffer
1465 * @num_mip_levels:  number of MIP levels
1466 * @multisample_count:
1467 * @array_size: Surface array size.
1468 * @size: width, heigh, depth of the surface requested
1469 * @user_srf_out: allocated user_srf.  Set to NULL on failure.
1470 *
1471 * GB surfaces allocated by this function will not have a user mode handle, and
1472 * thus will only be visible to vmwgfx.  For optimization reasons the
1473 * surface may later be given a user mode handle by another function to make
1474 * it available to user mode drivers.
1475 */
1476int vmw_surface_gb_priv_define(struct drm_device *dev,
1477			       uint32_t user_accounting_size,
1478			       uint32_t svga3d_flags,
1479			       SVGA3dSurfaceFormat format,
1480			       bool for_scanout,
1481			       uint32_t num_mip_levels,
1482			       uint32_t multisample_count,
1483			       uint32_t array_size,
1484			       struct drm_vmw_size size,
1485			       struct vmw_surface **srf_out)
1486{
1487	struct vmw_private *dev_priv = vmw_priv(dev);
1488	struct vmw_user_surface *user_srf;
1489	struct vmw_surface *srf;
1490	int ret;
1491	u32 num_layers;
1492
1493	*srf_out = NULL;
1494
1495	if (for_scanout) {
1496		if (!svga3dsurface_is_screen_target_format(format)) {
1497			DRM_ERROR("Invalid Screen Target surface format.");
1498			return -EINVAL;
1499		}
1500	} else {
1501		const struct svga3d_surface_desc *desc;
1502
1503		desc = svga3dsurface_get_desc(format);
1504		if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
1505			DRM_ERROR("Invalid surface format.\n");
1506			return -EINVAL;
1507		}
1508	}
1509
1510	/* array_size must be null for non-GL3 host. */
1511	if (array_size > 0 && !dev_priv->has_dx) {
1512		DRM_ERROR("Tried to create DX surface on non-DX host.\n");
1513		return -EINVAL;
1514	}
1515
1516	ret = ttm_read_lock(&dev_priv->reservation_sem, true);
1517	if (unlikely(ret != 0))
1518		return ret;
1519
1520	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1521				   user_accounting_size, false, true);
1522	if (unlikely(ret != 0)) {
1523		if (ret != -ERESTARTSYS)
1524			DRM_ERROR("Out of graphics memory for surface"
1525				  " creation.\n");
1526		goto out_unlock;
1527	}
1528
1529	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
1530	if (unlikely(user_srf == NULL)) {
1531		ret = -ENOMEM;
1532		goto out_no_user_srf;
1533	}
1534
1535	*srf_out  = &user_srf->srf;
1536	user_srf->size = user_accounting_size;
1537	user_srf->prime.base.shareable = false;
1538	user_srf->prime.base.tfile     = NULL;
1539
1540	srf = &user_srf->srf;
1541	srf->flags             = svga3d_flags;
1542	srf->format            = format;
1543	srf->scanout           = for_scanout;
1544	srf->mip_levels[0]     = num_mip_levels;
1545	srf->num_sizes         = 1;
1546	srf->sizes             = NULL;
1547	srf->offsets           = NULL;
1548	srf->base_size         = size;
1549	srf->autogen_filter    = SVGA3D_TEX_FILTER_NONE;
1550	srf->array_size        = array_size;
1551	srf->multisample_count = multisample_count;
1552
1553	if (array_size)
1554		num_layers = array_size;
1555	else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP)
1556		num_layers = SVGA3D_MAX_SURFACE_FACES;
1557	else
1558		num_layers = 1;
1559
1560	srf->res.backup_size   =
1561		svga3dsurface_get_serialized_size(srf->format,
1562						  srf->base_size,
1563						  srf->mip_levels[0],
1564						  num_layers);
1565
1566	if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1567		srf->res.backup_size += sizeof(SVGA3dDXSOState);
1568
1569	if (dev_priv->active_display_unit == vmw_du_screen_target &&
1570	    for_scanout)
1571		srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1572
1573	/*
1574	 * From this point, the generic resource management functions
1575	 * destroy the object on failure.
1576	 */
1577	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1578
1579	ttm_read_unlock(&dev_priv->reservation_sem);
1580	return ret;
1581
1582out_no_user_srf:
1583	ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
1584
1585out_unlock:
1586	ttm_read_unlock(&dev_priv->reservation_sem);
1587	return ret;
1588}
1589