root/include/uapi/drm/vmwgfx_drm.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


   1 /**************************************************************************
   2  *
   3  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
   4  * All Rights Reserved.
   5  *
   6  * Permission is hereby granted, free of charge, to any person obtaining a
   7  * copy of this software and associated documentation files (the
   8  * "Software"), to deal in the Software without restriction, including
   9  * without limitation the rights to use, copy, modify, merge, publish,
  10  * distribute, sub license, and/or sell copies of the Software, and to
  11  * permit persons to whom the Software is furnished to do so, subject to
  12  * the following conditions:
  13  *
  14  * The above copyright notice and this permission notice (including the
  15  * next paragraph) shall be included in all copies or substantial portions
  16  * of the Software.
  17  *
  18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25  *
  26  **************************************************************************/
  27 
  28 #ifndef __VMWGFX_DRM_H__
  29 #define __VMWGFX_DRM_H__
  30 
  31 #include "drm.h"
  32 
  33 #if defined(__cplusplus)
  34 extern "C" {
  35 #endif
  36 
  37 #define DRM_VMW_MAX_SURFACE_FACES 6
  38 #define DRM_VMW_MAX_MIP_LEVELS 24
  39 
  40 
  41 #define DRM_VMW_GET_PARAM            0
  42 #define DRM_VMW_ALLOC_DMABUF         1
  43 #define DRM_VMW_ALLOC_BO             1
  44 #define DRM_VMW_UNREF_DMABUF         2
  45 #define DRM_VMW_HANDLE_CLOSE         2
  46 #define DRM_VMW_CURSOR_BYPASS        3
  47 /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
  48 #define DRM_VMW_CONTROL_STREAM       4
  49 #define DRM_VMW_CLAIM_STREAM         5
  50 #define DRM_VMW_UNREF_STREAM         6
  51 /* guarded by DRM_VMW_PARAM_3D == 1 */
  52 #define DRM_VMW_CREATE_CONTEXT       7
  53 #define DRM_VMW_UNREF_CONTEXT        8
  54 #define DRM_VMW_CREATE_SURFACE       9
  55 #define DRM_VMW_UNREF_SURFACE        10
  56 #define DRM_VMW_REF_SURFACE          11
  57 #define DRM_VMW_EXECBUF              12
  58 #define DRM_VMW_GET_3D_CAP           13
  59 #define DRM_VMW_FENCE_WAIT           14
  60 #define DRM_VMW_FENCE_SIGNALED       15
  61 #define DRM_VMW_FENCE_UNREF          16
  62 #define DRM_VMW_FENCE_EVENT          17
  63 #define DRM_VMW_PRESENT              18
  64 #define DRM_VMW_PRESENT_READBACK     19
  65 #define DRM_VMW_UPDATE_LAYOUT        20
  66 #define DRM_VMW_CREATE_SHADER        21
  67 #define DRM_VMW_UNREF_SHADER         22
  68 #define DRM_VMW_GB_SURFACE_CREATE    23
  69 #define DRM_VMW_GB_SURFACE_REF       24
  70 #define DRM_VMW_SYNCCPU              25
  71 #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
  72 #define DRM_VMW_GB_SURFACE_CREATE_EXT   27
  73 #define DRM_VMW_GB_SURFACE_REF_EXT      28
  74 
  75 /*************************************************************************/
  76 /**
  77  * DRM_VMW_GET_PARAM - get device information.
  78  *
  79  * DRM_VMW_PARAM_FIFO_OFFSET:
  80  * Offset to use to map the first page of the FIFO read-only.
  81  * The fifo is mapped using the mmap() system call on the drm device.
  82  *
  83  * DRM_VMW_PARAM_OVERLAY_IOCTL:
  84  * Does the driver support the overlay ioctl.
  85  *
  86  * DRM_VMW_PARAM_SM4_1
  87  * SM4_1 support is enabled.
  88  */
  89 
  90 #define DRM_VMW_PARAM_NUM_STREAMS      0
  91 #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
  92 #define DRM_VMW_PARAM_3D               2
  93 #define DRM_VMW_PARAM_HW_CAPS          3
  94 #define DRM_VMW_PARAM_FIFO_CAPS        4
  95 #define DRM_VMW_PARAM_MAX_FB_SIZE      5
  96 #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
  97 #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
  98 #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
  99 #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
 100 #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
 101 #define DRM_VMW_PARAM_SCREEN_TARGET    11
 102 #define DRM_VMW_PARAM_DX               12
 103 #define DRM_VMW_PARAM_HW_CAPS2         13
 104 #define DRM_VMW_PARAM_SM4_1            14
 105 
 106 /**
 107  * enum drm_vmw_handle_type - handle type for ref ioctls
 108  *
 109  */
 110 enum drm_vmw_handle_type {
 111         DRM_VMW_HANDLE_LEGACY = 0,
 112         DRM_VMW_HANDLE_PRIME = 1
 113 };
 114 
 115 /**
 116  * struct drm_vmw_getparam_arg
 117  *
 118  * @value: Returned value. //Out
 119  * @param: Parameter to query. //In.
 120  *
 121  * Argument to the DRM_VMW_GET_PARAM Ioctl.
 122  */
 123 
 124 struct drm_vmw_getparam_arg {
 125         __u64 value;
 126         __u32 param;
 127         __u32 pad64;
 128 };
 129 
 130 /*************************************************************************/
 131 /**
 132  * DRM_VMW_CREATE_CONTEXT - Create a host context.
 133  *
 134  * Allocates a device unique context id, and queues a create context command
 135  * for the host. Does not wait for host completion.
 136  */
 137 
 138 /**
 139  * struct drm_vmw_context_arg
 140  *
 141  * @cid: Device unique context ID.
 142  *
 143  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
 144  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
 145  */
 146 
 147 struct drm_vmw_context_arg {
 148         __s32 cid;
 149         __u32 pad64;
 150 };
 151 
 152 /*************************************************************************/
 153 /**
 154  * DRM_VMW_UNREF_CONTEXT - Create a host context.
 155  *
 156  * Frees a global context id, and queues a destroy host command for the host.
 157  * Does not wait for host completion. The context ID can be used directly
 158  * in the command stream and shows up as the same context ID on the host.
 159  */
 160 
 161 /*************************************************************************/
 162 /**
 163  * DRM_VMW_CREATE_SURFACE - Create a host suface.
 164  *
 165  * Allocates a device unique surface id, and queues a create surface command
 166  * for the host. Does not wait for host completion. The surface ID can be
 167  * used directly in the command stream and shows up as the same surface
 168  * ID on the host.
 169  */
 170 
 171 /**
 172  * struct drm_wmv_surface_create_req
 173  *
 174  * @flags: Surface flags as understood by the host.
 175  * @format: Surface format as understood by the host.
 176  * @mip_levels: Number of mip levels for each face.
 177  * An unused face should have 0 encoded.
 178  * @size_addr: Address of a user-space array of sruct drm_vmw_size
 179  * cast to an __u64 for 32-64 bit compatibility.
 180  * The size of the array should equal the total number of mipmap levels.
 181  * @shareable: Boolean whether other clients (as identified by file descriptors)
 182  * may reference this surface.
 183  * @scanout: Boolean whether the surface is intended to be used as a
 184  * scanout.
 185  *
 186  * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
 187  * Output data from the DRM_VMW_REF_SURFACE Ioctl.
 188  */
 189 
 190 struct drm_vmw_surface_create_req {
 191         __u32 flags;
 192         __u32 format;
 193         __u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
 194         __u64 size_addr;
 195         __s32 shareable;
 196         __s32 scanout;
 197 };
 198 
 199 /**
 200  * struct drm_wmv_surface_arg
 201  *
 202  * @sid: Surface id of created surface or surface to destroy or reference.
 203  * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
 204  *
 205  * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
 206  * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
 207  * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
 208  */
 209 
 210 struct drm_vmw_surface_arg {
 211         __s32 sid;
 212         enum drm_vmw_handle_type handle_type;
 213 };
 214 
 215 /**
 216  * struct drm_vmw_size ioctl.
 217  *
 218  * @width - mip level width
 219  * @height - mip level height
 220  * @depth - mip level depth
 221  *
 222  * Description of a mip level.
 223  * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
 224  */
 225 
 226 struct drm_vmw_size {
 227         __u32 width;
 228         __u32 height;
 229         __u32 depth;
 230         __u32 pad64;
 231 };
 232 
 233 /**
 234  * union drm_vmw_surface_create_arg
 235  *
 236  * @rep: Output data as described above.
 237  * @req: Input data as described above.
 238  *
 239  * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
 240  */
 241 
 242 union drm_vmw_surface_create_arg {
 243         struct drm_vmw_surface_arg rep;
 244         struct drm_vmw_surface_create_req req;
 245 };
 246 
 247 /*************************************************************************/
 248 /**
 249  * DRM_VMW_REF_SURFACE - Reference a host surface.
 250  *
 251  * Puts a reference on a host surface with a give sid, as previously
 252  * returned by the DRM_VMW_CREATE_SURFACE ioctl.
 253  * A reference will make sure the surface isn't destroyed while we hold
 254  * it and will allow the calling client to use the surface ID in the command
 255  * stream.
 256  *
 257  * On successful return, the Ioctl returns the surface information given
 258  * in the DRM_VMW_CREATE_SURFACE ioctl.
 259  */
 260 
 261 /**
 262  * union drm_vmw_surface_reference_arg
 263  *
 264  * @rep: Output data as described above.
 265  * @req: Input data as described above.
 266  *
 267  * Argument to the DRM_VMW_REF_SURFACE Ioctl.
 268  */
 269 
 270 union drm_vmw_surface_reference_arg {
 271         struct drm_vmw_surface_create_req rep;
 272         struct drm_vmw_surface_arg req;
 273 };
 274 
 275 /*************************************************************************/
 276 /**
 277  * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
 278  *
 279  * Clear a reference previously put on a host surface.
 280  * When all references are gone, including the one implicitly placed
 281  * on creation,
 282  * a destroy surface command will be queued for the host.
 283  * Does not wait for completion.
 284  */
 285 
 286 /*************************************************************************/
 287 /**
 288  * DRM_VMW_EXECBUF
 289  *
 290  * Submit a command buffer for execution on the host, and return a
 291  * fence seqno that when signaled, indicates that the command buffer has
 292  * executed.
 293  */
 294 
 295 /**
 296  * struct drm_vmw_execbuf_arg
 297  *
 298  * @commands: User-space address of a command buffer cast to an __u64.
 299  * @command-size: Size in bytes of the command buffer.
 300  * @throttle-us: Sleep until software is less than @throttle_us
 301  * microseconds ahead of hardware. The driver may round this value
 302  * to the nearest kernel tick.
 303  * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
 304  * __u64.
 305  * @version: Allows expanding the execbuf ioctl parameters without breaking
 306  * backwards compatibility, since user-space will always tell the kernel
 307  * which version it uses.
 308  * @flags: Execbuf flags.
 309  * @imported_fence_fd:  FD for a fence imported from another device
 310  *
 311  * Argument to the DRM_VMW_EXECBUF Ioctl.
 312  */
 313 
 314 #define DRM_VMW_EXECBUF_VERSION 2
 315 
 316 #define DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD (1 << 0)
 317 #define DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD (1 << 1)
 318 
 319 struct drm_vmw_execbuf_arg {
 320         __u64 commands;
 321         __u32 command_size;
 322         __u32 throttle_us;
 323         __u64 fence_rep;
 324         __u32 version;
 325         __u32 flags;
 326         __u32 context_handle;
 327         __s32 imported_fence_fd;
 328 };
 329 
 330 /**
 331  * struct drm_vmw_fence_rep
 332  *
 333  * @handle: Fence object handle for fence associated with a command submission.
 334  * @mask: Fence flags relevant for this fence object.
 335  * @seqno: Fence sequence number in fifo. A fence object with a lower
 336  * seqno will signal the EXEC flag before a fence object with a higher
 337  * seqno. This can be used by user-space to avoid kernel calls to determine
 338  * whether a fence has signaled the EXEC flag. Note that @seqno will
 339  * wrap at 32-bit.
 340  * @passed_seqno: The highest seqno number processed by the hardware
 341  * so far. This can be used to mark user-space fence objects as signaled, and
 342  * to determine whether a fence seqno might be stale.
 343  * @fd: FD associated with the fence, -1 if not exported
 344  * @error: This member should've been set to -EFAULT on submission.
 345  * The following actions should be take on completion:
 346  * error == -EFAULT: Fence communication failed. The host is synchronized.
 347  * Use the last fence id read from the FIFO fence register.
 348  * error != 0 && error != -EFAULT:
 349  * Fence submission failed. The host is synchronized. Use the fence_seq member.
 350  * error == 0: All is OK, The host may not be synchronized.
 351  * Use the fence_seq member.
 352  *
 353  * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
 354  */
 355 
 356 struct drm_vmw_fence_rep {
 357         __u32 handle;
 358         __u32 mask;
 359         __u32 seqno;
 360         __u32 passed_seqno;
 361         __s32 fd;
 362         __s32 error;
 363 };
 364 
 365 /*************************************************************************/
 366 /**
 367  * DRM_VMW_ALLOC_BO
 368  *
 369  * Allocate a buffer object that is visible also to the host.
 370  * NOTE: The buffer is
 371  * identified by a handle and an offset, which are private to the guest, but
 372  * useable in the command stream. The guest kernel may translate these
 373  * and patch up the command stream accordingly. In the future, the offset may
 374  * be zero at all times, or it may disappear from the interface before it is
 375  * fixed.
 376  *
 377  * The buffer object may stay user-space mapped in the guest at all times,
 378  * and is thus suitable for sub-allocation.
 379  *
 380  * Buffer objects are mapped using the mmap() syscall on the drm device.
 381  */
 382 
 383 /**
 384  * struct drm_vmw_alloc_bo_req
 385  *
 386  * @size: Required minimum size of the buffer.
 387  *
 388  * Input data to the DRM_VMW_ALLOC_BO Ioctl.
 389  */
 390 
 391 struct drm_vmw_alloc_bo_req {
 392         __u32 size;
 393         __u32 pad64;
 394 };
 395 #define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
 396 
 397 /**
 398  * struct drm_vmw_bo_rep
 399  *
 400  * @map_handle: Offset to use in the mmap() call used to map the buffer.
 401  * @handle: Handle unique to this buffer. Used for unreferencing.
 402  * @cur_gmr_id: GMR id to use in the command stream when this buffer is
 403  * referenced. See not above.
 404  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
 405  * referenced. See note above.
 406  *
 407  * Output data from the DRM_VMW_ALLOC_BO Ioctl.
 408  */
 409 
 410 struct drm_vmw_bo_rep {
 411         __u64 map_handle;
 412         __u32 handle;
 413         __u32 cur_gmr_id;
 414         __u32 cur_gmr_offset;
 415         __u32 pad64;
 416 };
 417 #define drm_vmw_dmabuf_rep drm_vmw_bo_rep
 418 
 419 /**
 420  * union drm_vmw_alloc_bo_arg
 421  *
 422  * @req: Input data as described above.
 423  * @rep: Output data as described above.
 424  *
 425  * Argument to the DRM_VMW_ALLOC_BO Ioctl.
 426  */
 427 
 428 union drm_vmw_alloc_bo_arg {
 429         struct drm_vmw_alloc_bo_req req;
 430         struct drm_vmw_bo_rep rep;
 431 };
 432 #define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
 433 
 434 /*************************************************************************/
 435 /**
 436  * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
 437  *
 438  * This IOCTL controls the overlay units of the svga device.
 439  * The SVGA overlay units does not work like regular hardware units in
 440  * that they do not automaticaly read back the contents of the given dma
 441  * buffer. But instead only read back for each call to this ioctl, and
 442  * at any point between this call being made and a following call that
 443  * either changes the buffer or disables the stream.
 444  */
 445 
 446 /**
 447  * struct drm_vmw_rect
 448  *
 449  * Defines a rectangle. Used in the overlay ioctl to define
 450  * source and destination rectangle.
 451  */
 452 
 453 struct drm_vmw_rect {
 454         __s32 x;
 455         __s32 y;
 456         __u32 w;
 457         __u32 h;
 458 };
 459 
 460 /**
 461  * struct drm_vmw_control_stream_arg
 462  *
 463  * @stream_id: Stearm to control
 464  * @enabled: If false all following arguments are ignored.
 465  * @handle: Handle to buffer for getting data from.
 466  * @format: Format of the overlay as understood by the host.
 467  * @width: Width of the overlay.
 468  * @height: Height of the overlay.
 469  * @size: Size of the overlay in bytes.
 470  * @pitch: Array of pitches, the two last are only used for YUV12 formats.
 471  * @offset: Offset from start of dma buffer to overlay.
 472  * @src: Source rect, must be within the defined area above.
 473  * @dst: Destination rect, x and y may be negative.
 474  *
 475  * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
 476  */
 477 
 478 struct drm_vmw_control_stream_arg {
 479         __u32 stream_id;
 480         __u32 enabled;
 481 
 482         __u32 flags;
 483         __u32 color_key;
 484 
 485         __u32 handle;
 486         __u32 offset;
 487         __s32 format;
 488         __u32 size;
 489         __u32 width;
 490         __u32 height;
 491         __u32 pitch[3];
 492 
 493         __u32 pad64;
 494         struct drm_vmw_rect src;
 495         struct drm_vmw_rect dst;
 496 };
 497 
 498 /*************************************************************************/
 499 /**
 500  * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
 501  *
 502  */
 503 
 504 #define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
 505 #define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
 506 
 507 /**
 508  * struct drm_vmw_cursor_bypass_arg
 509  *
 510  * @flags: Flags.
 511  * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
 512  * @xpos: X position of cursor.
 513  * @ypos: Y position of cursor.
 514  * @xhot: X hotspot.
 515  * @yhot: Y hotspot.
 516  *
 517  * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
 518  */
 519 
 520 struct drm_vmw_cursor_bypass_arg {
 521         __u32 flags;
 522         __u32 crtc_id;
 523         __s32 xpos;
 524         __s32 ypos;
 525         __s32 xhot;
 526         __s32 yhot;
 527 };
 528 
 529 /*************************************************************************/
 530 /**
 531  * DRM_VMW_CLAIM_STREAM - Claim a single stream.
 532  */
 533 
 534 /**
 535  * struct drm_vmw_context_arg
 536  *
 537  * @stream_id: Device unique context ID.
 538  *
 539  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
 540  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
 541  */
 542 
 543 struct drm_vmw_stream_arg {
 544         __u32 stream_id;
 545         __u32 pad64;
 546 };
 547 
 548 /*************************************************************************/
 549 /**
 550  * DRM_VMW_UNREF_STREAM - Unclaim a stream.
 551  *
 552  * Return a single stream that was claimed by this process. Also makes
 553  * sure that the stream has been stopped.
 554  */
 555 
 556 /*************************************************************************/
 557 /**
 558  * DRM_VMW_GET_3D_CAP
 559  *
 560  * Read 3D capabilities from the FIFO
 561  *
 562  */
 563 
 564 /**
 565  * struct drm_vmw_get_3d_cap_arg
 566  *
 567  * @buffer: Pointer to a buffer for capability data, cast to an __u64
 568  * @size: Max size to copy
 569  *
 570  * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
 571  * ioctls.
 572  */
 573 
 574 struct drm_vmw_get_3d_cap_arg {
 575         __u64 buffer;
 576         __u32 max_size;
 577         __u32 pad64;
 578 };
 579 
 580 /*************************************************************************/
 581 /**
 582  * DRM_VMW_FENCE_WAIT
 583  *
 584  * Waits for a fence object to signal. The wait is interruptible, so that
 585  * signals may be delivered during the interrupt. The wait may timeout,
 586  * in which case the calls returns -EBUSY. If the wait is restarted,
 587  * that is restarting without resetting @cookie_valid to zero,
 588  * the timeout is computed from the first call.
 589  *
 590  * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
 591  * on:
 592  * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
 593  * stream
 594  * have executed.
 595  * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
 596  * commands
 597  * in the buffer given to the EXECBUF ioctl returning the fence object handle
 598  * are available to user-space.
 599  *
 600  * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
 601  * fenc wait ioctl returns 0, the fence object has been unreferenced after
 602  * the wait.
 603  */
 604 
 605 #define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
 606 #define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
 607 
 608 #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
 609 
 610 /**
 611  * struct drm_vmw_fence_wait_arg
 612  *
 613  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
 614  * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
 615  * @kernel_cookie: Set to 0 on first call. Left alone on restart.
 616  * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
 617  * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
 618  * before returning.
 619  * @flags: Fence flags to wait on.
 620  * @wait_options: Options that control the behaviour of the wait ioctl.
 621  *
 622  * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
 623  */
 624 
 625 struct drm_vmw_fence_wait_arg {
 626         __u32 handle;
 627         __s32  cookie_valid;
 628         __u64 kernel_cookie;
 629         __u64 timeout_us;
 630         __s32 lazy;
 631         __s32 flags;
 632         __s32 wait_options;
 633         __s32 pad64;
 634 };
 635 
 636 /*************************************************************************/
 637 /**
 638  * DRM_VMW_FENCE_SIGNALED
 639  *
 640  * Checks if a fence object is signaled..
 641  */
 642 
 643 /**
 644  * struct drm_vmw_fence_signaled_arg
 645  *
 646  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
 647  * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
 648  * @signaled: Out: Flags signaled.
 649  * @sequence: Out: Highest sequence passed so far. Can be used to signal the
 650  * EXEC flag of user-space fence objects.
 651  *
 652  * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
 653  * ioctls.
 654  */
 655 
 656 struct drm_vmw_fence_signaled_arg {
 657          __u32 handle;
 658          __u32 flags;
 659          __s32 signaled;
 660          __u32 passed_seqno;
 661          __u32 signaled_flags;
 662          __u32 pad64;
 663 };
 664 
 665 /*************************************************************************/
 666 /**
 667  * DRM_VMW_FENCE_UNREF
 668  *
 669  * Unreferences a fence object, and causes it to be destroyed if there are no
 670  * other references to it.
 671  *
 672  */
 673 
 674 /**
 675  * struct drm_vmw_fence_arg
 676  *
 677  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
 678  *
 679  * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
 680  */
 681 
 682 struct drm_vmw_fence_arg {
 683          __u32 handle;
 684          __u32 pad64;
 685 };
 686 
 687 
 688 /*************************************************************************/
 689 /**
 690  * DRM_VMW_FENCE_EVENT
 691  *
 692  * Queues an event on a fence to be delivered on the drm character device
 693  * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
 694  * Optionally the approximate time when the fence signaled is
 695  * given by the event.
 696  */
 697 
 698 /*
 699  * The event type
 700  */
 701 #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
 702 
 703 struct drm_vmw_event_fence {
 704         struct drm_event base;
 705         __u64 user_data;
 706         __u32 tv_sec;
 707         __u32 tv_usec;
 708 };
 709 
 710 /*
 711  * Flags that may be given to the command.
 712  */
 713 /* Request fence signaled time on the event. */
 714 #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
 715 
 716 /**
 717  * struct drm_vmw_fence_event_arg
 718  *
 719  * @fence_rep: Pointer to fence_rep structure cast to __u64 or 0 if
 720  * the fence is not supposed to be referenced by user-space.
 721  * @user_info: Info to be delivered with the event.
 722  * @handle: Attach the event to this fence only.
 723  * @flags: A set of flags as defined above.
 724  */
 725 struct drm_vmw_fence_event_arg {
 726         __u64 fence_rep;
 727         __u64 user_data;
 728         __u32 handle;
 729         __u32 flags;
 730 };
 731 
 732 
 733 /*************************************************************************/
 734 /**
 735  * DRM_VMW_PRESENT
 736  *
 737  * Executes an SVGA present on a given fb for a given surface. The surface
 738  * is placed on the framebuffer. Cliprects are given relative to the given
 739  * point (the point disignated by dest_{x|y}).
 740  *
 741  */
 742 
 743 /**
 744  * struct drm_vmw_present_arg
 745  * @fb_id: framebuffer id to present / read back from.
 746  * @sid: Surface id to present from.
 747  * @dest_x: X placement coordinate for surface.
 748  * @dest_y: Y placement coordinate for surface.
 749  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
 750  * @num_clips: Number of cliprects given relative to the framebuffer origin,
 751  * in the same coordinate space as the frame buffer.
 752  * @pad64: Unused 64-bit padding.
 753  *
 754  * Input argument to the DRM_VMW_PRESENT ioctl.
 755  */
 756 
 757 struct drm_vmw_present_arg {
 758         __u32 fb_id;
 759         __u32 sid;
 760         __s32 dest_x;
 761         __s32 dest_y;
 762         __u64 clips_ptr;
 763         __u32 num_clips;
 764         __u32 pad64;
 765 };
 766 
 767 
 768 /*************************************************************************/
 769 /**
 770  * DRM_VMW_PRESENT_READBACK
 771  *
 772  * Executes an SVGA present readback from a given fb to the dma buffer
 773  * currently bound as the fb. If there is no dma buffer bound to the fb,
 774  * an error will be returned.
 775  *
 776  */
 777 
 778 /**
 779  * struct drm_vmw_present_arg
 780  * @fb_id: fb_id to present / read back from.
 781  * @num_clips: Number of cliprects.
 782  * @clips_ptr: Pointer to an array of clip rects cast to an __u64.
 783  * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an __u64.
 784  * If this member is NULL, then the ioctl should not return a fence.
 785  */
 786 
 787 struct drm_vmw_present_readback_arg {
 788          __u32 fb_id;
 789          __u32 num_clips;
 790          __u64 clips_ptr;
 791          __u64 fence_rep;
 792 };
 793 
 794 /*************************************************************************/
 795 /**
 796  * DRM_VMW_UPDATE_LAYOUT - Update layout
 797  *
 798  * Updates the preferred modes and connection status for connectors. The
 799  * command consists of one drm_vmw_update_layout_arg pointing to an array
 800  * of num_outputs drm_vmw_rect's.
 801  */
 802 
 803 /**
 804  * struct drm_vmw_update_layout_arg
 805  *
 806  * @num_outputs: number of active connectors
 807  * @rects: pointer to array of drm_vmw_rect cast to an __u64
 808  *
 809  * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
 810  */
 811 struct drm_vmw_update_layout_arg {
 812         __u32 num_outputs;
 813         __u32 pad64;
 814         __u64 rects;
 815 };
 816 
 817 
 818 /*************************************************************************/
 819 /**
 820  * DRM_VMW_CREATE_SHADER - Create shader
 821  *
 822  * Creates a shader and optionally binds it to a dma buffer containing
 823  * the shader byte-code.
 824  */
 825 
 826 /**
 827  * enum drm_vmw_shader_type - Shader types
 828  */
 829 enum drm_vmw_shader_type {
 830         drm_vmw_shader_type_vs = 0,
 831         drm_vmw_shader_type_ps,
 832 };
 833 
 834 
 835 /**
 836  * struct drm_vmw_shader_create_arg
 837  *
 838  * @shader_type: Shader type of the shader to create.
 839  * @size: Size of the byte-code in bytes.
 840  * where the shader byte-code starts
 841  * @buffer_handle: Buffer handle identifying the buffer containing the
 842  * shader byte-code
 843  * @shader_handle: On successful completion contains a handle that
 844  * can be used to subsequently identify the shader.
 845  * @offset: Offset in bytes into the buffer given by @buffer_handle,
 846  *
 847  * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
 848  */
 849 struct drm_vmw_shader_create_arg {
 850         enum drm_vmw_shader_type shader_type;
 851         __u32 size;
 852         __u32 buffer_handle;
 853         __u32 shader_handle;
 854         __u64 offset;
 855 };
 856 
 857 /*************************************************************************/
 858 /**
 859  * DRM_VMW_UNREF_SHADER - Unreferences a shader
 860  *
 861  * Destroys a user-space reference to a shader, optionally destroying
 862  * it.
 863  */
 864 
 865 /**
 866  * struct drm_vmw_shader_arg
 867  *
 868  * @handle: Handle identifying the shader to destroy.
 869  *
 870  * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
 871  */
 872 struct drm_vmw_shader_arg {
 873         __u32 handle;
 874         __u32 pad64;
 875 };
 876 
 877 /*************************************************************************/
 878 /**
 879  * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
 880  *
 881  * Allocates a surface handle and queues a create surface command
 882  * for the host on the first use of the surface. The surface ID can
 883  * be used as the surface ID in commands referencing the surface.
 884  */
 885 
 886 /**
 887  * enum drm_vmw_surface_flags
 888  *
 889  * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
 890  * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
 891  *                                      surface.
 892  * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
 893  *                                      given.
 894  */
 895 enum drm_vmw_surface_flags {
 896         drm_vmw_surface_flag_shareable = (1 << 0),
 897         drm_vmw_surface_flag_scanout = (1 << 1),
 898         drm_vmw_surface_flag_create_buffer = (1 << 2)
 899 };
 900 
 901 /**
 902  * struct drm_vmw_gb_surface_create_req
 903  *
 904  * @svga3d_flags:     SVGA3d surface flags for the device.
 905  * @format:           SVGA3d format.
 906  * @mip_level:        Number of mip levels for all faces.
 907  * @drm_surface_flags Flags as described above.
 908  * @multisample_count Future use. Set to 0.
 909  * @autogen_filter    Future use. Set to 0.
 910  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
 911  *                    if none.
 912  * @base_size         Size of the base mip level for all faces.
 913  * @array_size        Must be zero for non-DX hardware, and if non-zero
 914  *                    svga3d_flags must have proper bind flags setup.
 915  *
 916  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
 917  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
 918  */
 919 struct drm_vmw_gb_surface_create_req {
 920         __u32 svga3d_flags;
 921         __u32 format;
 922         __u32 mip_levels;
 923         enum drm_vmw_surface_flags drm_surface_flags;
 924         __u32 multisample_count;
 925         __u32 autogen_filter;
 926         __u32 buffer_handle;
 927         __u32 array_size;
 928         struct drm_vmw_size base_size;
 929 };
 930 
 931 /**
 932  * struct drm_vmw_gb_surface_create_rep
 933  *
 934  * @handle:            Surface handle.
 935  * @backup_size:       Size of backup buffers for this surface.
 936  * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
 937  * @buffer_size:       Actual size of the buffer identified by
 938  *                     @buffer_handle
 939  * @buffer_map_handle: Offset into device address space for the buffer
 940  *                     identified by @buffer_handle.
 941  *
 942  * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
 943  * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
 944  */
 945 struct drm_vmw_gb_surface_create_rep {
 946         __u32 handle;
 947         __u32 backup_size;
 948         __u32 buffer_handle;
 949         __u32 buffer_size;
 950         __u64 buffer_map_handle;
 951 };
 952 
 953 /**
 954  * union drm_vmw_gb_surface_create_arg
 955  *
 956  * @req: Input argument as described above.
 957  * @rep: Output argument as described above.
 958  *
 959  * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
 960  */
 961 union drm_vmw_gb_surface_create_arg {
 962         struct drm_vmw_gb_surface_create_rep rep;
 963         struct drm_vmw_gb_surface_create_req req;
 964 };
 965 
 966 /*************************************************************************/
 967 /**
 968  * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
 969  *
 970  * Puts a reference on a host surface with a given handle, as previously
 971  * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
 972  * A reference will make sure the surface isn't destroyed while we hold
 973  * it and will allow the calling client to use the surface handle in
 974  * the command stream.
 975  *
 976  * On successful return, the Ioctl returns the surface information given
 977  * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
 978  */
 979 
 980 /**
 981  * struct drm_vmw_gb_surface_reference_arg
 982  *
 983  * @creq: The data used as input when the surface was created, as described
 984  *        above at "struct drm_vmw_gb_surface_create_req"
 985  * @crep: Additional data output when the surface was created, as described
 986  *        above at "struct drm_vmw_gb_surface_create_rep"
 987  *
 988  * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
 989  */
 990 struct drm_vmw_gb_surface_ref_rep {
 991         struct drm_vmw_gb_surface_create_req creq;
 992         struct drm_vmw_gb_surface_create_rep crep;
 993 };
 994 
 995 /**
 996  * union drm_vmw_gb_surface_reference_arg
 997  *
 998  * @req: Input data as described above at "struct drm_vmw_surface_arg"
 999  * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
1000  *
1001  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1002  */
1003 union drm_vmw_gb_surface_reference_arg {
1004         struct drm_vmw_gb_surface_ref_rep rep;
1005         struct drm_vmw_surface_arg req;
1006 };
1007 
1008 
1009 /*************************************************************************/
1010 /**
1011  * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
1012  *
1013  * Idles any previously submitted GPU operations on the buffer and
1014  * by default blocks command submissions that reference the buffer.
1015  * If the file descriptor used to grab a blocking CPU sync is closed, the
1016  * cpu sync is released.
1017  * The flags argument indicates how the grab / release operation should be
1018  * performed:
1019  */
1020 
1021 /**
1022  * enum drm_vmw_synccpu_flags - Synccpu flags:
1023  *
1024  * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
1025  * hint to the kernel to allow command submissions that references the buffer
1026  * for read-only.
1027  * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
1028  * referencing this buffer.
1029  * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
1030  * -EBUSY should the buffer be busy.
1031  * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
1032  * while the buffer is synced for CPU. This is similar to the GEM bo idle
1033  * behavior.
1034  */
1035 enum drm_vmw_synccpu_flags {
1036         drm_vmw_synccpu_read = (1 << 0),
1037         drm_vmw_synccpu_write = (1 << 1),
1038         drm_vmw_synccpu_dontblock = (1 << 2),
1039         drm_vmw_synccpu_allow_cs = (1 << 3)
1040 };
1041 
1042 /**
1043  * enum drm_vmw_synccpu_op - Synccpu operations:
1044  *
1045  * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
1046  * @drm_vmw_synccpu_release: Release a previous grab.
1047  */
1048 enum drm_vmw_synccpu_op {
1049         drm_vmw_synccpu_grab,
1050         drm_vmw_synccpu_release
1051 };
1052 
1053 /**
1054  * struct drm_vmw_synccpu_arg
1055  *
1056  * @op:                      The synccpu operation as described above.
1057  * @handle:                  Handle identifying the buffer object.
1058  * @flags:                   Flags as described above.
1059  */
1060 struct drm_vmw_synccpu_arg {
1061         enum drm_vmw_synccpu_op op;
1062         enum drm_vmw_synccpu_flags flags;
1063         __u32 handle;
1064         __u32 pad64;
1065 };
1066 
1067 /*************************************************************************/
1068 /**
1069  * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
1070  *
1071  * Allocates a device unique context id, and queues a create context command
1072  * for the host. Does not wait for host completion.
1073  */
1074 enum drm_vmw_extended_context {
1075         drm_vmw_context_legacy,
1076         drm_vmw_context_dx
1077 };
1078 
1079 /**
1080  * union drm_vmw_extended_context_arg
1081  *
1082  * @req: Context type.
1083  * @rep: Context identifier.
1084  *
1085  * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
1086  */
1087 union drm_vmw_extended_context_arg {
1088         enum drm_vmw_extended_context req;
1089         struct drm_vmw_context_arg rep;
1090 };
1091 
1092 /*************************************************************************/
1093 /*
1094  * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
1095  * underlying resource.
1096  *
1097  * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
1098  * Ioctl.
1099  */
1100 
1101 /**
1102  * struct drm_vmw_handle_close_arg
1103  *
1104  * @handle: Handle to close.
1105  *
1106  * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
1107  */
1108 struct drm_vmw_handle_close_arg {
1109         __u32 handle;
1110         __u32 pad64;
1111 };
1112 #define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
1113 
1114 /*************************************************************************/
1115 /**
1116  * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
1117  *
1118  * Allocates a surface handle and queues a create surface command
1119  * for the host on the first use of the surface. The surface ID can
1120  * be used as the surface ID in commands referencing the surface.
1121  *
1122  * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
1123  * parameter and 64 bit svga flag.
1124  */
1125 
1126 /**
1127  * enum drm_vmw_surface_version
1128  *
1129  * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
1130  * svga3d surface flags split into 2, upper half and lower half.
1131  */
1132 enum drm_vmw_surface_version {
1133         drm_vmw_gb_surface_v1
1134 };
1135 
1136 /**
1137  * struct drm_vmw_gb_surface_create_ext_req
1138  *
1139  * @base: Surface create parameters.
1140  * @version: Version of surface create ioctl.
1141  * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
1142  * @multisample_pattern: Multisampling pattern when msaa is supported.
1143  * @quality_level: Precision settings for each sample.
1144  * @must_be_zero: Reserved for future usage.
1145  *
1146  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
1147  * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
1148  */
1149 struct drm_vmw_gb_surface_create_ext_req {
1150         struct drm_vmw_gb_surface_create_req base;
1151         enum drm_vmw_surface_version version;
1152         uint32_t svga3d_flags_upper_32_bits;
1153         SVGA3dMSPattern multisample_pattern;
1154         SVGA3dMSQualityLevel quality_level;
1155         uint64_t must_be_zero;
1156 };
1157 
1158 /**
1159  * union drm_vmw_gb_surface_create_ext_arg
1160  *
1161  * @req: Input argument as described above.
1162  * @rep: Output argument as described above.
1163  *
1164  * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1165  */
1166 union drm_vmw_gb_surface_create_ext_arg {
1167         struct drm_vmw_gb_surface_create_rep rep;
1168         struct drm_vmw_gb_surface_create_ext_req req;
1169 };
1170 
1171 /*************************************************************************/
1172 /**
1173  * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
1174  *
1175  * Puts a reference on a host surface with a given handle, as previously
1176  * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1177  * A reference will make sure the surface isn't destroyed while we hold
1178  * it and will allow the calling client to use the surface handle in
1179  * the command stream.
1180  *
1181  * On successful return, the Ioctl returns the surface information given
1182  * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
1183  */
1184 
1185 /**
1186  * struct drm_vmw_gb_surface_ref_ext_rep
1187  *
1188  * @creq: The data used as input when the surface was created, as described
1189  *        above at "struct drm_vmw_gb_surface_create_ext_req"
1190  * @crep: Additional data output when the surface was created, as described
1191  *        above at "struct drm_vmw_gb_surface_create_rep"
1192  *
1193  * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
1194  */
1195 struct drm_vmw_gb_surface_ref_ext_rep {
1196         struct drm_vmw_gb_surface_create_ext_req creq;
1197         struct drm_vmw_gb_surface_create_rep crep;
1198 };
1199 
1200 /**
1201  * union drm_vmw_gb_surface_reference_ext_arg
1202  *
1203  * @req: Input data as described above at "struct drm_vmw_surface_arg"
1204  * @rep: Output data as described above at
1205  *       "struct drm_vmw_gb_surface_ref_ext_rep"
1206  *
1207  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
1208  */
1209 union drm_vmw_gb_surface_reference_ext_arg {
1210         struct drm_vmw_gb_surface_ref_ext_rep rep;
1211         struct drm_vmw_surface_arg req;
1212 };
1213 
1214 #if defined(__cplusplus)
1215 }
1216 #endif
1217 
1218 #endif

/* [<][>][^][v][top][bottom][index][help] */