1 /**************************************************************************
2  *
3  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #ifndef _VMWGFX_DRV_H_
29 #define _VMWGFX_DRV_H_
30 
31 #include "vmwgfx_reg.h"
32 #include <drm/drmP.h>
33 #include <drm/vmwgfx_drm.h>
34 #include <drm/drm_hashtab.h>
35 #include <linux/suspend.h>
36 #include <drm/ttm/ttm_bo_driver.h>
37 #include <drm/ttm/ttm_object.h>
38 #include <drm/ttm/ttm_lock.h>
39 #include <drm/ttm/ttm_execbuf_util.h>
40 #include <drm/ttm/ttm_module.h>
41 #include "vmwgfx_fence.h"
42 
43 #define VMWGFX_DRIVER_DATE "20140704"
44 #define VMWGFX_DRIVER_MAJOR 2
45 #define VMWGFX_DRIVER_MINOR 6
46 #define VMWGFX_DRIVER_PATCHLEVEL 1
47 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
48 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
49 #define VMWGFX_MAX_RELOCATIONS 2048
50 #define VMWGFX_MAX_VALIDATIONS 2048
51 #define VMWGFX_MAX_DISPLAYS 16
52 #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
53 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 0
54 
55 /*
56  * Perhaps we should have sysfs entries for these.
57  */
58 #define VMWGFX_NUM_GB_CONTEXT 256
59 #define VMWGFX_NUM_GB_SHADER 20000
60 #define VMWGFX_NUM_GB_SURFACE 32768
61 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
62 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
63 			VMWGFX_NUM_GB_SHADER +\
64 			VMWGFX_NUM_GB_SURFACE +\
65 			VMWGFX_NUM_GB_SCREEN_TARGET)
66 
67 #define VMW_PL_GMR TTM_PL_PRIV0
68 #define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
69 #define VMW_PL_MOB TTM_PL_PRIV1
70 #define VMW_PL_FLAG_MOB TTM_PL_FLAG_PRIV1
71 
72 #define VMW_RES_CONTEXT ttm_driver_type0
73 #define VMW_RES_SURFACE ttm_driver_type1
74 #define VMW_RES_STREAM ttm_driver_type2
75 #define VMW_RES_FENCE ttm_driver_type3
76 #define VMW_RES_SHADER ttm_driver_type4
77 
78 struct vmw_fpriv {
79 	struct drm_master *locked_master;
80 	struct ttm_object_file *tfile;
81 	struct list_head fence_events;
82 	bool gb_aware;
83 };
84 
85 struct vmw_dma_buffer {
86 	struct ttm_buffer_object base;
87 	struct list_head res_list;
88 };
89 
90 /**
91  * struct vmw_validate_buffer - Carries validation info about buffers.
92  *
93  * @base: Validation info for TTM.
94  * @hash: Hash entry for quick lookup of the TTM buffer object.
95  *
96  * This structure contains also driver private validation info
97  * on top of the info needed by TTM.
98  */
99 struct vmw_validate_buffer {
100 	struct ttm_validate_buffer base;
101 	struct drm_hash_item hash;
102 	bool validate_as_mob;
103 };
104 
105 struct vmw_res_func;
106 struct vmw_resource {
107 	struct kref kref;
108 	struct vmw_private *dev_priv;
109 	int id;
110 	bool avail;
111 	unsigned long backup_size;
112 	bool res_dirty; /* Protected by backup buffer reserved */
113 	bool backup_dirty; /* Protected by backup buffer reserved */
114 	struct vmw_dma_buffer *backup;
115 	unsigned long backup_offset;
116 	const struct vmw_res_func *func;
117 	struct list_head lru_head; /* Protected by the resource lock */
118 	struct list_head mob_head; /* Protected by @backup reserved */
119 	struct list_head binding_head; /* Protected by binding_mutex */
120 	void (*res_free) (struct vmw_resource *res);
121 	void (*hw_destroy) (struct vmw_resource *res);
122 };
123 
124 
125 /*
126  * Resources that are managed using ioctls.
127  */
128 enum vmw_res_type {
129 	vmw_res_context,
130 	vmw_res_surface,
131 	vmw_res_stream,
132 	vmw_res_shader,
133 	vmw_res_max
134 };
135 
136 /*
137  * Resources that are managed using command streams.
138  */
139 enum vmw_cmdbuf_res_type {
140 	vmw_cmdbuf_res_compat_shader
141 };
142 
143 struct vmw_cmdbuf_res_manager;
144 
145 struct vmw_cursor_snooper {
146 	struct drm_crtc *crtc;
147 	size_t age;
148 	uint32_t *image;
149 };
150 
151 struct vmw_framebuffer;
152 struct vmw_surface_offset;
153 
154 struct vmw_surface {
155 	struct vmw_resource res;
156 	uint32_t flags;
157 	uint32_t format;
158 	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
159 	struct drm_vmw_size base_size;
160 	struct drm_vmw_size *sizes;
161 	uint32_t num_sizes;
162 	bool scanout;
163 	/* TODO so far just a extra pointer */
164 	struct vmw_cursor_snooper snooper;
165 	struct vmw_surface_offset *offsets;
166 	SVGA3dTextureFilter autogen_filter;
167 	uint32_t multisample_count;
168 };
169 
170 struct vmw_marker_queue {
171 	struct list_head head;
172 	u64 lag;
173 	u64 lag_time;
174 	spinlock_t lock;
175 };
176 
177 struct vmw_fifo_state {
178 	unsigned long reserved_size;
179 	__le32 *dynamic_buffer;
180 	__le32 *static_buffer;
181 	unsigned long static_buffer_size;
182 	bool using_bounce_buffer;
183 	uint32_t capabilities;
184 	struct mutex fifo_mutex;
185 	struct rw_semaphore rwsem;
186 	struct vmw_marker_queue marker_queue;
187 };
188 
189 struct vmw_relocation {
190 	SVGAMobId *mob_loc;
191 	SVGAGuestPtr *location;
192 	uint32_t index;
193 };
194 
195 /**
196  * struct vmw_res_cache_entry - resource information cache entry
197  *
198  * @valid: Whether the entry is valid, which also implies that the execbuf
199  * code holds a reference to the resource, and it's placed on the
200  * validation list.
201  * @handle: User-space handle of a resource.
202  * @res: Non-ref-counted pointer to the resource.
203  *
204  * Used to avoid frequent repeated user-space handle lookups of the
205  * same resource.
206  */
207 struct vmw_res_cache_entry {
208 	bool valid;
209 	uint32_t handle;
210 	struct vmw_resource *res;
211 	struct vmw_resource_val_node *node;
212 };
213 
214 /**
215  * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
216  */
217 enum vmw_dma_map_mode {
218 	vmw_dma_phys,           /* Use physical page addresses */
219 	vmw_dma_alloc_coherent, /* Use TTM coherent pages */
220 	vmw_dma_map_populate,   /* Unmap from DMA just after unpopulate */
221 	vmw_dma_map_bind,       /* Unmap from DMA just before unbind */
222 	vmw_dma_map_max
223 };
224 
225 /**
226  * struct vmw_sg_table - Scatter/gather table for binding, with additional
227  * device-specific information.
228  *
229  * @sgt: Pointer to a struct sg_table with binding information
230  * @num_regions: Number of regions with device-address contigous pages
231  */
232 struct vmw_sg_table {
233 	enum vmw_dma_map_mode mode;
234 	struct page **pages;
235 	const dma_addr_t *addrs;
236 	struct sg_table *sgt;
237 	unsigned long num_regions;
238 	unsigned long num_pages;
239 };
240 
241 /**
242  * struct vmw_piter - Page iterator that iterates over a list of pages
243  * and DMA addresses that could be either a scatter-gather list or
244  * arrays
245  *
246  * @pages: Array of page pointers to the pages.
247  * @addrs: DMA addresses to the pages if coherent pages are used.
248  * @iter: Scatter-gather page iterator. Current position in SG list.
249  * @i: Current position in arrays.
250  * @num_pages: Number of pages total.
251  * @next: Function to advance the iterator. Returns false if past the list
252  * of pages, true otherwise.
253  * @dma_address: Function to return the DMA address of the current page.
254  */
255 struct vmw_piter {
256 	struct page **pages;
257 	const dma_addr_t *addrs;
258 	struct sg_page_iter iter;
259 	unsigned long i;
260 	unsigned long num_pages;
261 	bool (*next)(struct vmw_piter *);
262 	dma_addr_t (*dma_address)(struct vmw_piter *);
263 	struct page *(*page)(struct vmw_piter *);
264 };
265 
266 /*
267  * enum vmw_ctx_binding_type - abstract resource to context binding types
268  */
269 enum vmw_ctx_binding_type {
270 	vmw_ctx_binding_shader,
271 	vmw_ctx_binding_rt,
272 	vmw_ctx_binding_tex,
273 	vmw_ctx_binding_max
274 };
275 
276 /**
277  * struct vmw_ctx_bindinfo - structure representing a single context binding
278  *
279  * @ctx: Pointer to the context structure. NULL means the binding is not
280  * active.
281  * @res: Non ref-counted pointer to the bound resource.
282  * @bt: The binding type.
283  * @i1: Union of information needed to unbind.
284  */
285 struct vmw_ctx_bindinfo {
286 	struct vmw_resource *ctx;
287 	struct vmw_resource *res;
288 	enum vmw_ctx_binding_type bt;
289 	bool scrubbed;
290 	union {
291 		SVGA3dShaderType shader_type;
292 		SVGA3dRenderTargetType rt_type;
293 		uint32 texture_stage;
294 	} i1;
295 };
296 
297 /**
298  * struct vmw_ctx_binding - structure representing a single context binding
299  *                        - suitable for tracking in a context
300  *
301  * @ctx_list: List head for context.
302  * @res_list: List head for bound resource.
303  * @bi: Binding info
304  */
305 struct vmw_ctx_binding {
306 	struct list_head ctx_list;
307 	struct list_head res_list;
308 	struct vmw_ctx_bindinfo bi;
309 };
310 
311 
312 /**
313  * struct vmw_ctx_binding_state - context binding state
314  *
315  * @list: linked list of individual bindings.
316  * @render_targets: Render target bindings.
317  * @texture_units: Texture units/samplers bindings.
318  * @shaders: Shader bindings.
319  *
320  * Note that this structure also provides storage space for the individual
321  * struct vmw_ctx_binding objects, so that no dynamic allocation is needed
322  * for individual bindings.
323  *
324  */
325 struct vmw_ctx_binding_state {
326 	struct list_head list;
327 	struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX];
328 	struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS];
329 	struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_MAX];
330 };
331 
332 struct vmw_sw_context{
333 	struct drm_open_hash res_ht;
334 	bool res_ht_initialized;
335 	bool kernel; /**< is the called made from the kernel */
336 	struct vmw_fpriv *fp;
337 	struct list_head validate_nodes;
338 	struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
339 	uint32_t cur_reloc;
340 	struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
341 	uint32_t cur_val_buf;
342 	uint32_t *cmd_bounce;
343 	uint32_t cmd_bounce_size;
344 	struct list_head resource_list;
345 	struct ttm_buffer_object *cur_query_bo;
346 	struct list_head res_relocations;
347 	uint32_t *buf_start;
348 	struct vmw_res_cache_entry res_cache[vmw_res_max];
349 	struct vmw_resource *last_query_ctx;
350 	bool needs_post_query_barrier;
351 	struct vmw_resource *error_resource;
352 	struct vmw_ctx_binding_state staged_bindings;
353 	struct list_head staged_cmd_res;
354 };
355 
356 struct vmw_legacy_display;
357 struct vmw_overlay;
358 
359 struct vmw_master {
360 	struct ttm_lock lock;
361 	struct mutex fb_surf_mutex;
362 	struct list_head fb_surf;
363 };
364 
365 struct vmw_vga_topology_state {
366 	uint32_t width;
367 	uint32_t height;
368 	uint32_t primary;
369 	uint32_t pos_x;
370 	uint32_t pos_y;
371 };
372 
373 struct vmw_private {
374 	struct ttm_bo_device bdev;
375 	struct ttm_bo_global_ref bo_global_ref;
376 	struct drm_global_reference mem_global_ref;
377 
378 	struct vmw_fifo_state fifo;
379 
380 	struct drm_device *dev;
381 	unsigned long vmw_chipset;
382 	unsigned int io_start;
383 	uint32_t vram_start;
384 	uint32_t vram_size;
385 	uint32_t prim_bb_mem;
386 	uint32_t mmio_start;
387 	uint32_t mmio_size;
388 	uint32_t fb_max_width;
389 	uint32_t fb_max_height;
390 	uint32_t initial_width;
391 	uint32_t initial_height;
392 	__le32 __iomem *mmio_virt;
393 	int mmio_mtrr;
394 	uint32_t capabilities;
395 	uint32_t max_gmr_ids;
396 	uint32_t max_gmr_pages;
397 	uint32_t max_mob_pages;
398 	uint32_t max_mob_size;
399 	uint32_t memory_size;
400 	bool has_gmr;
401 	bool has_mob;
402 	spinlock_t hw_lock;
403 	spinlock_t cap_lock;
404 
405 	/*
406 	 * VGA registers.
407 	 */
408 
409 	struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
410 	uint32_t vga_width;
411 	uint32_t vga_height;
412 	uint32_t vga_bpp;
413 	uint32_t vga_bpl;
414 	uint32_t vga_pitchlock;
415 
416 	uint32_t num_displays;
417 
418 	/*
419 	 * Framebuffer info.
420 	 */
421 
422 	void *fb_info;
423 	struct vmw_legacy_display *ldu_priv;
424 	struct vmw_screen_object_display *sou_priv;
425 	struct vmw_overlay *overlay_priv;
426 
427 	/*
428 	 * Context and surface management.
429 	 */
430 
431 	rwlock_t resource_lock;
432 	struct idr res_idr[vmw_res_max];
433 	/*
434 	 * Block lastclose from racing with firstopen.
435 	 */
436 
437 	struct mutex init_mutex;
438 
439 	/*
440 	 * A resource manager for kernel-only surfaces and
441 	 * contexts.
442 	 */
443 
444 	struct ttm_object_device *tdev;
445 
446 	/*
447 	 * Fencing and IRQs.
448 	 */
449 
450 	atomic_t marker_seq;
451 	wait_queue_head_t fence_queue;
452 	wait_queue_head_t fifo_queue;
453 	spinlock_t waiter_lock;
454 	int fence_queue_waiters; /* Protected by waiter_lock */
455 	int goal_queue_waiters; /* Protected by waiter_lock */
456 	atomic_t fifo_queue_waiters;
457 	uint32_t last_read_seqno;
458 	spinlock_t irq_lock;
459 	struct vmw_fence_manager *fman;
460 	uint32_t irq_mask;
461 
462 	/*
463 	 * Device state
464 	 */
465 
466 	uint32_t traces_state;
467 	uint32_t enable_state;
468 	uint32_t config_done_state;
469 
470 	/**
471 	 * Execbuf
472 	 */
473 	/**
474 	 * Protected by the cmdbuf mutex.
475 	 */
476 
477 	struct vmw_sw_context ctx;
478 	struct mutex cmdbuf_mutex;
479 	struct mutex binding_mutex;
480 
481 	/**
482 	 * Operating mode.
483 	 */
484 
485 	bool stealth;
486 	bool enable_fb;
487 
488 	/**
489 	 * Master management.
490 	 */
491 
492 	struct vmw_master *active_master;
493 	struct vmw_master fbdev_master;
494 	struct notifier_block pm_nb;
495 	bool suspended;
496 
497 	struct mutex release_mutex;
498 	uint32_t num_3d_resources;
499 
500 	/*
501 	 * Replace this with an rwsem as soon as we have down_xx_interruptible()
502 	 */
503 	struct ttm_lock reservation_sem;
504 
505 	/*
506 	 * Query processing. These members
507 	 * are protected by the cmdbuf mutex.
508 	 */
509 
510 	struct ttm_buffer_object *dummy_query_bo;
511 	struct ttm_buffer_object *pinned_bo;
512 	uint32_t query_cid;
513 	uint32_t query_cid_valid;
514 	bool dummy_query_bo_pinned;
515 
516 	/*
517 	 * Surface swapping. The "surface_lru" list is protected by the
518 	 * resource lock in order to be able to destroy a surface and take
519 	 * it off the lru atomically. "used_memory_size" is currently
520 	 * protected by the cmdbuf mutex for simplicity.
521 	 */
522 
523 	struct list_head res_lru[vmw_res_max];
524 	uint32_t used_memory_size;
525 
526 	/*
527 	 * DMA mapping stuff.
528 	 */
529 	enum vmw_dma_map_mode map_mode;
530 
531 	/*
532 	 * Guest Backed stuff
533 	 */
534 	struct ttm_buffer_object *otable_bo;
535 	struct vmw_otable *otables;
536 };
537 
vmw_res_to_srf(struct vmw_resource * res)538 static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
539 {
540 	return container_of(res, struct vmw_surface, res);
541 }
542 
vmw_priv(struct drm_device * dev)543 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
544 {
545 	return (struct vmw_private *)dev->dev_private;
546 }
547 
vmw_fpriv(struct drm_file * file_priv)548 static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
549 {
550 	return (struct vmw_fpriv *)file_priv->driver_priv;
551 }
552 
vmw_master(struct drm_master * master)553 static inline struct vmw_master *vmw_master(struct drm_master *master)
554 {
555 	return (struct vmw_master *) master->driver_priv;
556 }
557 
558 /*
559  * The locking here is fine-grained, so that it is performed once
560  * for every read- and write operation. This is of course costly, but we
561  * don't perform much register access in the timing critical paths anyway.
562  * Instead we have the extra benefit of being sure that we don't forget
563  * the hw lock around register accesses.
564  */
vmw_write(struct vmw_private * dev_priv,unsigned int offset,uint32_t value)565 static inline void vmw_write(struct vmw_private *dev_priv,
566 			     unsigned int offset, uint32_t value)
567 {
568 	unsigned long irq_flags;
569 
570 	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
571 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
572 	outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
573 	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
574 }
575 
vmw_read(struct vmw_private * dev_priv,unsigned int offset)576 static inline uint32_t vmw_read(struct vmw_private *dev_priv,
577 				unsigned int offset)
578 {
579 	unsigned long irq_flags;
580 	u32 val;
581 
582 	spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
583 	outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
584 	val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
585 	spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
586 
587 	return val;
588 }
589 
590 int vmw_3d_resource_inc(struct vmw_private *dev_priv, bool unhide_svga);
591 void vmw_3d_resource_dec(struct vmw_private *dev_priv, bool hide_svga);
592 
593 /**
594  * GMR utilities - vmwgfx_gmr.c
595  */
596 
597 extern int vmw_gmr_bind(struct vmw_private *dev_priv,
598 			const struct vmw_sg_table *vsgt,
599 			unsigned long num_pages,
600 			int gmr_id);
601 extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
602 
603 /**
604  * Resource utilities - vmwgfx_resource.c
605  */
606 struct vmw_user_resource_conv;
607 
608 extern void vmw_resource_unreference(struct vmw_resource **p_res);
609 extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
610 extern struct vmw_resource *
611 vmw_resource_reference_unless_doomed(struct vmw_resource *res);
612 extern int vmw_resource_validate(struct vmw_resource *res);
613 extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
614 extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
615 extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
616 				  struct ttm_object_file *tfile,
617 				  uint32_t handle,
618 				  struct vmw_surface **out_surf,
619 				  struct vmw_dma_buffer **out_buf);
620 extern int vmw_user_resource_lookup_handle(
621 	struct vmw_private *dev_priv,
622 	struct ttm_object_file *tfile,
623 	uint32_t handle,
624 	const struct vmw_user_resource_conv *converter,
625 	struct vmw_resource **p_res);
626 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
627 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
628 			   struct vmw_dma_buffer *vmw_bo,
629 			   size_t size, struct ttm_placement *placement,
630 			   bool interuptable,
631 			   void (*bo_free) (struct ttm_buffer_object *bo));
632 extern int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
633 				  struct ttm_object_file *tfile);
634 extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
635 				 struct ttm_object_file *tfile,
636 				 uint32_t size,
637 				 bool shareable,
638 				 uint32_t *handle,
639 				 struct vmw_dma_buffer **p_dma_buf,
640 				 struct ttm_base_object **p_base);
641 extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
642 				     struct vmw_dma_buffer *dma_buf,
643 				     uint32_t *handle);
644 extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
645 				  struct drm_file *file_priv);
646 extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
647 				  struct drm_file *file_priv);
648 extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
649 					 struct drm_file *file_priv);
650 extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
651 					 uint32_t cur_validate_node);
652 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
653 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
654 				  uint32_t id, struct vmw_dma_buffer **out,
655 				  struct ttm_base_object **base);
656 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
657 				  struct drm_file *file_priv);
658 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
659 				  struct drm_file *file_priv);
660 extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
661 				  struct ttm_object_file *tfile,
662 				  uint32_t *inout_id,
663 				  struct vmw_resource **out);
664 extern void vmw_resource_unreserve(struct vmw_resource *res,
665 				   struct vmw_dma_buffer *new_backup,
666 				   unsigned long new_backup_offset);
667 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
668 				     struct ttm_mem_reg *mem);
669 extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
670 				struct vmw_fence_obj *fence);
671 extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
672 
673 /**
674  * DMA buffer helper routines - vmwgfx_dmabuf.c
675  */
676 extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
677 				   struct vmw_dma_buffer *bo,
678 				   struct ttm_placement *placement,
679 				   bool interruptible);
680 extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
681 			      struct vmw_dma_buffer *buf,
682 			      bool pin, bool interruptible);
683 extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
684 				     struct vmw_dma_buffer *buf,
685 				     bool pin, bool interruptible);
686 extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
687 				       struct vmw_dma_buffer *bo,
688 				       bool pin, bool interruptible);
689 extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
690 			    struct vmw_dma_buffer *bo,
691 			    bool interruptible);
692 extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
693 				 SVGAGuestPtr *ptr);
694 extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
695 
696 /**
697  * Misc Ioctl functionality - vmwgfx_ioctl.c
698  */
699 
700 extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
701 			      struct drm_file *file_priv);
702 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
703 				struct drm_file *file_priv);
704 extern int vmw_present_ioctl(struct drm_device *dev, void *data,
705 			     struct drm_file *file_priv);
706 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
707 				      struct drm_file *file_priv);
708 extern unsigned int vmw_fops_poll(struct file *filp,
709 				  struct poll_table_struct *wait);
710 extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
711 			     size_t count, loff_t *offset);
712 
713 /**
714  * Fifo utilities - vmwgfx_fifo.c
715  */
716 
717 extern int vmw_fifo_init(struct vmw_private *dev_priv,
718 			 struct vmw_fifo_state *fifo);
719 extern void vmw_fifo_release(struct vmw_private *dev_priv,
720 			     struct vmw_fifo_state *fifo);
721 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
722 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
723 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
724 			       uint32_t *seqno);
725 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
726 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
727 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
728 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
729 extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
730 				     uint32_t cid);
731 
732 /**
733  * TTM glue - vmwgfx_ttm_glue.c
734  */
735 
736 extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
737 extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
738 extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
739 
740 /**
741  * TTM buffer object driver - vmwgfx_buffer.c
742  */
743 
744 extern const size_t vmw_tt_size;
745 extern struct ttm_placement vmw_vram_placement;
746 extern struct ttm_placement vmw_vram_ne_placement;
747 extern struct ttm_placement vmw_vram_sys_placement;
748 extern struct ttm_placement vmw_vram_gmr_placement;
749 extern struct ttm_placement vmw_vram_gmr_ne_placement;
750 extern struct ttm_placement vmw_sys_placement;
751 extern struct ttm_placement vmw_sys_ne_placement;
752 extern struct ttm_placement vmw_evictable_placement;
753 extern struct ttm_placement vmw_srf_placement;
754 extern struct ttm_placement vmw_mob_placement;
755 extern struct ttm_bo_driver vmw_bo_driver;
756 extern int vmw_dma_quiescent(struct drm_device *dev);
757 extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
758 extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
759 extern const struct vmw_sg_table *
760 vmw_bo_sg_table(struct ttm_buffer_object *bo);
761 extern void vmw_piter_start(struct vmw_piter *viter,
762 			    const struct vmw_sg_table *vsgt,
763 			    unsigned long p_offs);
764 
765 /**
766  * vmw_piter_next - Advance the iterator one page.
767  *
768  * @viter: Pointer to the iterator to advance.
769  *
770  * Returns false if past the list of pages, true otherwise.
771  */
vmw_piter_next(struct vmw_piter * viter)772 static inline bool vmw_piter_next(struct vmw_piter *viter)
773 {
774 	return viter->next(viter);
775 }
776 
777 /**
778  * vmw_piter_dma_addr - Return the DMA address of the current page.
779  *
780  * @viter: Pointer to the iterator
781  *
782  * Returns the DMA address of the page pointed to by @viter.
783  */
vmw_piter_dma_addr(struct vmw_piter * viter)784 static inline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
785 {
786 	return viter->dma_address(viter);
787 }
788 
789 /**
790  * vmw_piter_page - Return a pointer to the current page.
791  *
792  * @viter: Pointer to the iterator
793  *
794  * Returns the DMA address of the page pointed to by @viter.
795  */
vmw_piter_page(struct vmw_piter * viter)796 static inline struct page *vmw_piter_page(struct vmw_piter *viter)
797 {
798 	return viter->page(viter);
799 }
800 
801 /**
802  * Command submission - vmwgfx_execbuf.c
803  */
804 
805 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
806 			     struct drm_file *file_priv);
807 extern int vmw_execbuf_process(struct drm_file *file_priv,
808 			       struct vmw_private *dev_priv,
809 			       void __user *user_commands,
810 			       void *kernel_commands,
811 			       uint32_t command_size,
812 			       uint64_t throttle_us,
813 			       struct drm_vmw_fence_rep __user
814 			       *user_fence_rep,
815 			       struct vmw_fence_obj **out_fence);
816 extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
817 					    struct vmw_fence_obj *fence);
818 extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
819 
820 extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
821 				      struct vmw_private *dev_priv,
822 				      struct vmw_fence_obj **p_fence,
823 				      uint32_t *p_handle);
824 extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
825 					struct vmw_fpriv *vmw_fp,
826 					int ret,
827 					struct drm_vmw_fence_rep __user
828 					*user_fence_rep,
829 					struct vmw_fence_obj *fence,
830 					uint32_t fence_handle);
831 
832 /**
833  * IRQs and wating - vmwgfx_irq.c
834  */
835 
836 extern irqreturn_t vmw_irq_handler(int irq, void *arg);
837 extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
838 			     uint32_t seqno, bool interruptible,
839 			     unsigned long timeout);
840 extern void vmw_irq_preinstall(struct drm_device *dev);
841 extern int vmw_irq_postinstall(struct drm_device *dev);
842 extern void vmw_irq_uninstall(struct drm_device *dev);
843 extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
844 				uint32_t seqno);
845 extern int vmw_fallback_wait(struct vmw_private *dev_priv,
846 			     bool lazy,
847 			     bool fifo_idle,
848 			     uint32_t seqno,
849 			     bool interruptible,
850 			     unsigned long timeout);
851 extern void vmw_update_seqno(struct vmw_private *dev_priv,
852 				struct vmw_fifo_state *fifo_state);
853 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
854 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
855 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
856 extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
857 
858 /**
859  * Rudimentary fence-like objects currently used only for throttling -
860  * vmwgfx_marker.c
861  */
862 
863 extern void vmw_marker_queue_init(struct vmw_marker_queue *queue);
864 extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue);
865 extern int vmw_marker_push(struct vmw_marker_queue *queue,
866 			  uint32_t seqno);
867 extern int vmw_marker_pull(struct vmw_marker_queue *queue,
868 			  uint32_t signaled_seqno);
869 extern int vmw_wait_lag(struct vmw_private *dev_priv,
870 			struct vmw_marker_queue *queue, uint32_t us);
871 
872 /**
873  * Kernel framebuffer - vmwgfx_fb.c
874  */
875 
876 int vmw_fb_init(struct vmw_private *vmw_priv);
877 int vmw_fb_close(struct vmw_private *dev_priv);
878 int vmw_fb_off(struct vmw_private *vmw_priv);
879 int vmw_fb_on(struct vmw_private *vmw_priv);
880 
881 /**
882  * Kernel modesetting - vmwgfx_kms.c
883  */
884 
885 int vmw_kms_init(struct vmw_private *dev_priv);
886 int vmw_kms_close(struct vmw_private *dev_priv);
887 int vmw_kms_save_vga(struct vmw_private *vmw_priv);
888 int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
889 int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
890 				struct drm_file *file_priv);
891 void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
892 void vmw_kms_cursor_snoop(struct vmw_surface *srf,
893 			  struct ttm_object_file *tfile,
894 			  struct ttm_buffer_object *bo,
895 			  SVGA3dCmdHeader *header);
896 int vmw_kms_write_svga(struct vmw_private *vmw_priv,
897 		       unsigned width, unsigned height, unsigned pitch,
898 		       unsigned bpp, unsigned depth);
899 void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
900 bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
901 				uint32_t pitch,
902 				uint32_t height);
903 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
904 int vmw_enable_vblank(struct drm_device *dev, int crtc);
905 void vmw_disable_vblank(struct drm_device *dev, int crtc);
906 int vmw_kms_present(struct vmw_private *dev_priv,
907 		    struct drm_file *file_priv,
908 		    struct vmw_framebuffer *vfb,
909 		    struct vmw_surface *surface,
910 		    uint32_t sid, int32_t destX, int32_t destY,
911 		    struct drm_vmw_rect *clips,
912 		    uint32_t num_clips);
913 int vmw_kms_readback(struct vmw_private *dev_priv,
914 		     struct drm_file *file_priv,
915 		     struct vmw_framebuffer *vfb,
916 		     struct drm_vmw_fence_rep __user *user_fence_rep,
917 		     struct drm_vmw_rect *clips,
918 		     uint32_t num_clips);
919 int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
920 				struct drm_file *file_priv);
921 
922 int vmw_dumb_create(struct drm_file *file_priv,
923 		    struct drm_device *dev,
924 		    struct drm_mode_create_dumb *args);
925 
926 int vmw_dumb_map_offset(struct drm_file *file_priv,
927 			struct drm_device *dev, uint32_t handle,
928 			uint64_t *offset);
929 int vmw_dumb_destroy(struct drm_file *file_priv,
930 		     struct drm_device *dev,
931 		     uint32_t handle);
932 /**
933  * Overlay control - vmwgfx_overlay.c
934  */
935 
936 int vmw_overlay_init(struct vmw_private *dev_priv);
937 int vmw_overlay_close(struct vmw_private *dev_priv);
938 int vmw_overlay_ioctl(struct drm_device *dev, void *data,
939 		      struct drm_file *file_priv);
940 int vmw_overlay_stop_all(struct vmw_private *dev_priv);
941 int vmw_overlay_resume_all(struct vmw_private *dev_priv);
942 int vmw_overlay_pause_all(struct vmw_private *dev_priv);
943 int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
944 int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
945 int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
946 int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
947 
948 /**
949  * GMR Id manager
950  */
951 
952 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
953 
954 /**
955  * Prime - vmwgfx_prime.c
956  */
957 
958 extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
959 extern int vmw_prime_fd_to_handle(struct drm_device *dev,
960 				  struct drm_file *file_priv,
961 				  int fd, u32 *handle);
962 extern int vmw_prime_handle_to_fd(struct drm_device *dev,
963 				  struct drm_file *file_priv,
964 				  uint32_t handle, uint32_t flags,
965 				  int *prime_fd);
966 
967 /*
968  * MemoryOBject management -  vmwgfx_mob.c
969  */
970 struct vmw_mob;
971 extern int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob,
972 			const struct vmw_sg_table *vsgt,
973 			unsigned long num_data_pages, int32_t mob_id);
974 extern void vmw_mob_unbind(struct vmw_private *dev_priv,
975 			   struct vmw_mob *mob);
976 extern void vmw_mob_destroy(struct vmw_mob *mob);
977 extern struct vmw_mob *vmw_mob_create(unsigned long data_pages);
978 extern int vmw_otables_setup(struct vmw_private *dev_priv);
979 extern void vmw_otables_takedown(struct vmw_private *dev_priv);
980 
981 /*
982  * Context management - vmwgfx_context.c
983  */
984 
985 extern const struct vmw_user_resource_conv *user_context_converter;
986 
987 extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
988 
989 extern int vmw_context_check(struct vmw_private *dev_priv,
990 			     struct ttm_object_file *tfile,
991 			     int id,
992 			     struct vmw_resource **p_res);
993 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
994 				    struct drm_file *file_priv);
995 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
996 				     struct drm_file *file_priv);
997 extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
998 				   const struct vmw_ctx_bindinfo *ci);
999 extern void
1000 vmw_context_binding_state_transfer(struct vmw_resource *res,
1001 				   struct vmw_ctx_binding_state *cbs);
1002 extern void vmw_context_binding_res_list_kill(struct list_head *head);
1003 extern void vmw_context_binding_res_list_scrub(struct list_head *head);
1004 extern int vmw_context_rebind_all(struct vmw_resource *ctx);
1005 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
1006 extern struct vmw_cmdbuf_res_manager *
1007 vmw_context_res_man(struct vmw_resource *ctx);
1008 /*
1009  * Surface management - vmwgfx_surface.c
1010  */
1011 
1012 extern const struct vmw_user_resource_conv *user_surface_converter;
1013 
1014 extern void vmw_surface_res_free(struct vmw_resource *res);
1015 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1016 				     struct drm_file *file_priv);
1017 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1018 				    struct drm_file *file_priv);
1019 extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1020 				       struct drm_file *file_priv);
1021 extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
1022 				       struct drm_file *file_priv);
1023 extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
1024 					  struct drm_file *file_priv);
1025 extern int vmw_surface_check(struct vmw_private *dev_priv,
1026 			     struct ttm_object_file *tfile,
1027 			     uint32_t handle, int *id);
1028 extern int vmw_surface_validate(struct vmw_private *dev_priv,
1029 				struct vmw_surface *srf);
1030 
1031 /*
1032  * Shader management - vmwgfx_shader.c
1033  */
1034 
1035 extern const struct vmw_user_resource_conv *user_shader_converter;
1036 
1037 extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
1038 				   struct drm_file *file_priv);
1039 extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
1040 				    struct drm_file *file_priv);
1041 extern int vmw_compat_shader_add(struct vmw_private *dev_priv,
1042 				 struct vmw_cmdbuf_res_manager *man,
1043 				 u32 user_key, const void *bytecode,
1044 				 SVGA3dShaderType shader_type,
1045 				 size_t size,
1046 				 struct list_head *list);
1047 extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man,
1048 				    u32 user_key, SVGA3dShaderType shader_type,
1049 				    struct list_head *list);
1050 extern struct vmw_resource *
1051 vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man,
1052 			 u32 user_key, SVGA3dShaderType shader_type);
1053 
1054 /*
1055  * Command buffer managed resources - vmwgfx_cmdbuf_res.c
1056  */
1057 
1058 extern struct vmw_cmdbuf_res_manager *
1059 vmw_cmdbuf_res_man_create(struct vmw_private *dev_priv);
1060 extern void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man);
1061 extern size_t vmw_cmdbuf_res_man_size(void);
1062 extern struct vmw_resource *
1063 vmw_cmdbuf_res_lookup(struct vmw_cmdbuf_res_manager *man,
1064 		      enum vmw_cmdbuf_res_type res_type,
1065 		      u32 user_key);
1066 extern void vmw_cmdbuf_res_revert(struct list_head *list);
1067 extern void vmw_cmdbuf_res_commit(struct list_head *list);
1068 extern int vmw_cmdbuf_res_add(struct vmw_cmdbuf_res_manager *man,
1069 			      enum vmw_cmdbuf_res_type res_type,
1070 			      u32 user_key,
1071 			      struct vmw_resource *res,
1072 			      struct list_head *list);
1073 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
1074 				 enum vmw_cmdbuf_res_type res_type,
1075 				 u32 user_key,
1076 				 struct list_head *list);
1077 
1078 
1079 /**
1080  * Inline helper functions
1081  */
1082 
vmw_surface_unreference(struct vmw_surface ** srf)1083 static inline void vmw_surface_unreference(struct vmw_surface **srf)
1084 {
1085 	struct vmw_surface *tmp_srf = *srf;
1086 	struct vmw_resource *res = &tmp_srf->res;
1087 	*srf = NULL;
1088 
1089 	vmw_resource_unreference(&res);
1090 }
1091 
vmw_surface_reference(struct vmw_surface * srf)1092 static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
1093 {
1094 	(void) vmw_resource_reference(&srf->res);
1095 	return srf;
1096 }
1097 
vmw_dmabuf_unreference(struct vmw_dma_buffer ** buf)1098 static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
1099 {
1100 	struct vmw_dma_buffer *tmp_buf = *buf;
1101 
1102 	*buf = NULL;
1103 	if (tmp_buf != NULL) {
1104 		struct ttm_buffer_object *bo = &tmp_buf->base;
1105 
1106 		ttm_bo_unref(&bo);
1107 	}
1108 }
1109 
vmw_dmabuf_reference(struct vmw_dma_buffer * buf)1110 static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
1111 {
1112 	if (ttm_bo_reference(&buf->base))
1113 		return buf;
1114 	return NULL;
1115 }
1116 
vmw_mem_glob(struct vmw_private * dev_priv)1117 static inline struct ttm_mem_global *vmw_mem_glob(struct vmw_private *dev_priv)
1118 {
1119 	return (struct ttm_mem_global *) dev_priv->mem_global_ref.object;
1120 }
1121 #endif
1122