1/************************************************************************** 2 * 3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "vmwgfx_drv.h" 29#include "vmwgfx_resource_priv.h" 30#include "ttm/ttm_placement.h" 31 32struct vmw_shader { 33 struct vmw_resource res; 34 SVGA3dShaderType type; 35 uint32_t size; 36}; 37 38struct vmw_user_shader { 39 struct ttm_base_object base; 40 struct vmw_shader shader; 41}; 42 43static uint64_t vmw_user_shader_size; 44static uint64_t vmw_shader_size; 45 46static void vmw_user_shader_free(struct vmw_resource *res); 47static struct vmw_resource * 48vmw_user_shader_base_to_res(struct ttm_base_object *base); 49 50static int vmw_gb_shader_create(struct vmw_resource *res); 51static int vmw_gb_shader_bind(struct vmw_resource *res, 52 struct ttm_validate_buffer *val_buf); 53static int vmw_gb_shader_unbind(struct vmw_resource *res, 54 bool readback, 55 struct ttm_validate_buffer *val_buf); 56static int vmw_gb_shader_destroy(struct vmw_resource *res); 57 58static const struct vmw_user_resource_conv user_shader_conv = { 59 .object_type = VMW_RES_SHADER, 60 .base_obj_to_res = vmw_user_shader_base_to_res, 61 .res_free = vmw_user_shader_free 62}; 63 64const struct vmw_user_resource_conv *user_shader_converter = 65 &user_shader_conv; 66 67 68static const struct vmw_res_func vmw_gb_shader_func = { 69 .res_type = vmw_res_shader, 70 .needs_backup = true, 71 .may_evict = true, 72 .type_name = "guest backed shaders", 73 .backup_placement = &vmw_mob_placement, 74 .create = vmw_gb_shader_create, 75 .destroy = vmw_gb_shader_destroy, 76 .bind = vmw_gb_shader_bind, 77 .unbind = vmw_gb_shader_unbind 78}; 79 80/** 81 * Shader management: 82 */ 83 84static inline struct vmw_shader * 85vmw_res_to_shader(struct vmw_resource *res) 86{ 87 return container_of(res, struct vmw_shader, res); 88} 89 90static void vmw_hw_shader_destroy(struct vmw_resource *res) 91{ 92 (void) vmw_gb_shader_destroy(res); 93} 94 95static int vmw_gb_shader_init(struct vmw_private *dev_priv, 96 struct vmw_resource *res, 97 uint32_t size, 98 uint64_t offset, 99 SVGA3dShaderType type, 100 struct vmw_dma_buffer *byte_code, 101 void (*res_free) (struct vmw_resource *res)) 102{ 103 struct vmw_shader *shader = vmw_res_to_shader(res); 104 int ret; 105 106 ret = vmw_resource_init(dev_priv, res, true, 107 res_free, &vmw_gb_shader_func); 108 109 110 if (unlikely(ret != 0)) { 111 if (res_free) 112 res_free(res); 113 else 114 kfree(res); 115 return ret; 116 } 117 118 res->backup_size = size; 119 if (byte_code) { 120 res->backup = vmw_dmabuf_reference(byte_code); 121 res->backup_offset = offset; 122 } 123 shader->size = size; 124 shader->type = type; 125 126 vmw_resource_activate(res, vmw_hw_shader_destroy); 127 return 0; 128} 129 130static int vmw_gb_shader_create(struct vmw_resource *res) 131{ 132 struct vmw_private *dev_priv = res->dev_priv; 133 struct vmw_shader *shader = vmw_res_to_shader(res); 134 int ret; 135 struct { 136 SVGA3dCmdHeader header; 137 SVGA3dCmdDefineGBShader body; 138 } *cmd; 139 140 if (likely(res->id != -1)) 141 return 0; 142 143 ret = vmw_resource_alloc_id(res); 144 if (unlikely(ret != 0)) { 145 DRM_ERROR("Failed to allocate a shader id.\n"); 146 goto out_no_id; 147 } 148 149 if (unlikely(res->id >= VMWGFX_NUM_GB_SHADER)) { 150 ret = -EBUSY; 151 goto out_no_fifo; 152 } 153 154 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 155 if (unlikely(cmd == NULL)) { 156 DRM_ERROR("Failed reserving FIFO space for shader " 157 "creation.\n"); 158 ret = -ENOMEM; 159 goto out_no_fifo; 160 } 161 162 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SHADER; 163 cmd->header.size = sizeof(cmd->body); 164 cmd->body.shid = res->id; 165 cmd->body.type = shader->type; 166 cmd->body.sizeInBytes = shader->size; 167 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 168 (void) vmw_3d_resource_inc(dev_priv, false); 169 170 return 0; 171 172out_no_fifo: 173 vmw_resource_release_id(res); 174out_no_id: 175 return ret; 176} 177 178static int vmw_gb_shader_bind(struct vmw_resource *res, 179 struct ttm_validate_buffer *val_buf) 180{ 181 struct vmw_private *dev_priv = res->dev_priv; 182 struct { 183 SVGA3dCmdHeader header; 184 SVGA3dCmdBindGBShader body; 185 } *cmd; 186 struct ttm_buffer_object *bo = val_buf->bo; 187 188 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 189 190 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 191 if (unlikely(cmd == NULL)) { 192 DRM_ERROR("Failed reserving FIFO space for shader " 193 "binding.\n"); 194 return -ENOMEM; 195 } 196 197 cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; 198 cmd->header.size = sizeof(cmd->body); 199 cmd->body.shid = res->id; 200 cmd->body.mobid = bo->mem.start; 201 cmd->body.offsetInBytes = res->backup_offset; 202 res->backup_dirty = false; 203 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 204 205 return 0; 206} 207 208static int vmw_gb_shader_unbind(struct vmw_resource *res, 209 bool readback, 210 struct ttm_validate_buffer *val_buf) 211{ 212 struct vmw_private *dev_priv = res->dev_priv; 213 struct { 214 SVGA3dCmdHeader header; 215 SVGA3dCmdBindGBShader body; 216 } *cmd; 217 struct vmw_fence_obj *fence; 218 219 BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); 220 221 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 222 if (unlikely(cmd == NULL)) { 223 DRM_ERROR("Failed reserving FIFO space for shader " 224 "unbinding.\n"); 225 return -ENOMEM; 226 } 227 228 cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER; 229 cmd->header.size = sizeof(cmd->body); 230 cmd->body.shid = res->id; 231 cmd->body.mobid = SVGA3D_INVALID_ID; 232 cmd->body.offsetInBytes = 0; 233 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 234 235 /* 236 * Create a fence object and fence the backup buffer. 237 */ 238 239 (void) vmw_execbuf_fence_commands(NULL, dev_priv, 240 &fence, NULL); 241 242 vmw_fence_single_bo(val_buf->bo, fence); 243 244 if (likely(fence != NULL)) 245 vmw_fence_obj_unreference(&fence); 246 247 return 0; 248} 249 250static int vmw_gb_shader_destroy(struct vmw_resource *res) 251{ 252 struct vmw_private *dev_priv = res->dev_priv; 253 struct { 254 SVGA3dCmdHeader header; 255 SVGA3dCmdDestroyGBShader body; 256 } *cmd; 257 258 if (likely(res->id == -1)) 259 return 0; 260 261 mutex_lock(&dev_priv->binding_mutex); 262 vmw_context_binding_res_list_scrub(&res->binding_head); 263 264 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 265 if (unlikely(cmd == NULL)) { 266 DRM_ERROR("Failed reserving FIFO space for shader " 267 "destruction.\n"); 268 mutex_unlock(&dev_priv->binding_mutex); 269 return -ENOMEM; 270 } 271 272 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; 273 cmd->header.size = sizeof(cmd->body); 274 cmd->body.shid = res->id; 275 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 276 mutex_unlock(&dev_priv->binding_mutex); 277 vmw_resource_release_id(res); 278 vmw_3d_resource_dec(dev_priv, false); 279 280 return 0; 281} 282 283/** 284 * User-space shader management: 285 */ 286 287static struct vmw_resource * 288vmw_user_shader_base_to_res(struct ttm_base_object *base) 289{ 290 return &(container_of(base, struct vmw_user_shader, base)-> 291 shader.res); 292} 293 294static void vmw_user_shader_free(struct vmw_resource *res) 295{ 296 struct vmw_user_shader *ushader = 297 container_of(res, struct vmw_user_shader, shader.res); 298 struct vmw_private *dev_priv = res->dev_priv; 299 300 ttm_base_object_kfree(ushader, base); 301 ttm_mem_global_free(vmw_mem_glob(dev_priv), 302 vmw_user_shader_size); 303} 304 305static void vmw_shader_free(struct vmw_resource *res) 306{ 307 struct vmw_shader *shader = vmw_res_to_shader(res); 308 struct vmw_private *dev_priv = res->dev_priv; 309 310 kfree(shader); 311 ttm_mem_global_free(vmw_mem_glob(dev_priv), 312 vmw_shader_size); 313} 314 315/** 316 * This function is called when user space has no more references on the 317 * base object. It releases the base-object's reference on the resource object. 318 */ 319 320static void vmw_user_shader_base_release(struct ttm_base_object **p_base) 321{ 322 struct ttm_base_object *base = *p_base; 323 struct vmw_resource *res = vmw_user_shader_base_to_res(base); 324 325 *p_base = NULL; 326 vmw_resource_unreference(&res); 327} 328 329int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, 330 struct drm_file *file_priv) 331{ 332 struct drm_vmw_shader_arg *arg = (struct drm_vmw_shader_arg *)data; 333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 334 335 return ttm_ref_object_base_unref(tfile, arg->handle, 336 TTM_REF_USAGE); 337} 338 339static int vmw_user_shader_alloc(struct vmw_private *dev_priv, 340 struct vmw_dma_buffer *buffer, 341 size_t shader_size, 342 size_t offset, 343 SVGA3dShaderType shader_type, 344 struct ttm_object_file *tfile, 345 u32 *handle) 346{ 347 struct vmw_user_shader *ushader; 348 struct vmw_resource *res, *tmp; 349 int ret; 350 351 /* 352 * Approximate idr memory usage with 128 bytes. It will be limited 353 * by maximum number_of shaders anyway. 354 */ 355 if (unlikely(vmw_user_shader_size == 0)) 356 vmw_user_shader_size = 357 ttm_round_pot(sizeof(struct vmw_user_shader)) + 128; 358 359 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 360 vmw_user_shader_size, 361 false, true); 362 if (unlikely(ret != 0)) { 363 if (ret != -ERESTARTSYS) 364 DRM_ERROR("Out of graphics memory for shader " 365 "creation.\n"); 366 goto out; 367 } 368 369 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 370 if (unlikely(ushader == NULL)) { 371 ttm_mem_global_free(vmw_mem_glob(dev_priv), 372 vmw_user_shader_size); 373 ret = -ENOMEM; 374 goto out; 375 } 376 377 res = &ushader->shader.res; 378 ushader->base.shareable = false; 379 ushader->base.tfile = NULL; 380 381 /* 382 * From here on, the destructor takes over resource freeing. 383 */ 384 385 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 386 offset, shader_type, buffer, 387 vmw_user_shader_free); 388 if (unlikely(ret != 0)) 389 goto out; 390 391 tmp = vmw_resource_reference(res); 392 ret = ttm_base_object_init(tfile, &ushader->base, false, 393 VMW_RES_SHADER, 394 &vmw_user_shader_base_release, NULL); 395 396 if (unlikely(ret != 0)) { 397 vmw_resource_unreference(&tmp); 398 goto out_err; 399 } 400 401 if (handle) 402 *handle = ushader->base.hash.key; 403out_err: 404 vmw_resource_unreference(&res); 405out: 406 return ret; 407} 408 409 410struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv, 411 struct vmw_dma_buffer *buffer, 412 size_t shader_size, 413 size_t offset, 414 SVGA3dShaderType shader_type) 415{ 416 struct vmw_shader *shader; 417 struct vmw_resource *res; 418 int ret; 419 420 /* 421 * Approximate idr memory usage with 128 bytes. It will be limited 422 * by maximum number_of shaders anyway. 423 */ 424 if (unlikely(vmw_shader_size == 0)) 425 vmw_shader_size = 426 ttm_round_pot(sizeof(struct vmw_shader)) + 128; 427 428 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 429 vmw_shader_size, 430 false, true); 431 if (unlikely(ret != 0)) { 432 if (ret != -ERESTARTSYS) 433 DRM_ERROR("Out of graphics memory for shader " 434 "creation.\n"); 435 goto out_err; 436 } 437 438 shader = kzalloc(sizeof(*shader), GFP_KERNEL); 439 if (unlikely(shader == NULL)) { 440 ttm_mem_global_free(vmw_mem_glob(dev_priv), 441 vmw_shader_size); 442 ret = -ENOMEM; 443 goto out_err; 444 } 445 446 res = &shader->res; 447 448 /* 449 * From here on, the destructor takes over resource freeing. 450 */ 451 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 452 offset, shader_type, buffer, 453 vmw_shader_free); 454 455out_err: 456 return ret ? ERR_PTR(ret) : res; 457} 458 459 460int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 461 struct drm_file *file_priv) 462{ 463 struct vmw_private *dev_priv = vmw_priv(dev); 464 struct drm_vmw_shader_create_arg *arg = 465 (struct drm_vmw_shader_create_arg *)data; 466 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 467 struct vmw_dma_buffer *buffer = NULL; 468 SVGA3dShaderType shader_type; 469 int ret; 470 471 if (arg->buffer_handle != SVGA3D_INVALID_ID) { 472 ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, 473 &buffer, NULL); 474 if (unlikely(ret != 0)) { 475 DRM_ERROR("Could not find buffer for shader " 476 "creation.\n"); 477 return ret; 478 } 479 480 if ((u64)buffer->base.num_pages * PAGE_SIZE < 481 (u64)arg->size + (u64)arg->offset) { 482 DRM_ERROR("Illegal buffer- or shader size.\n"); 483 ret = -EINVAL; 484 goto out_bad_arg; 485 } 486 } 487 488 switch (arg->shader_type) { 489 case drm_vmw_shader_type_vs: 490 shader_type = SVGA3D_SHADERTYPE_VS; 491 break; 492 case drm_vmw_shader_type_ps: 493 shader_type = SVGA3D_SHADERTYPE_PS; 494 break; 495 case drm_vmw_shader_type_gs: 496 shader_type = SVGA3D_SHADERTYPE_GS; 497 break; 498 default: 499 DRM_ERROR("Illegal shader type.\n"); 500 ret = -EINVAL; 501 goto out_bad_arg; 502 } 503 504 ret = ttm_read_lock(&dev_priv->reservation_sem, true); 505 if (unlikely(ret != 0)) 506 goto out_bad_arg; 507 508 ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 509 shader_type, tfile, &arg->shader_handle); 510 511 ttm_read_unlock(&dev_priv->reservation_sem); 512out_bad_arg: 513 vmw_dmabuf_unreference(&buffer); 514 return ret; 515} 516 517/** 518 * vmw_compat_shader_id_ok - Check whether a compat shader user key and 519 * shader type are within valid bounds. 520 * 521 * @user_key: User space id of the shader. 522 * @shader_type: Shader type. 523 * 524 * Returns true if valid false if not. 525 */ 526static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) 527{ 528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; 529} 530 531/** 532 * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. 533 * 534 * @user_key: User space id of the shader. 535 * @shader_type: Shader type. 536 * 537 * Returns a hash key suitable for a command buffer managed resource 538 * manager hash table. 539 */ 540static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) 541{ 542 return user_key | (shader_type << 20); 543} 544 545/** 546 * vmw_compat_shader_remove - Stage a compat shader for removal. 547 * 548 * @man: Pointer to the compat shader manager identifying the shader namespace. 549 * @user_key: The key that is used to identify the shader. The key is 550 * unique to the shader type. 551 * @shader_type: Shader type. 552 * @list: Caller's list of staged command buffer resource actions. 553 */ 554int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 555 u32 user_key, SVGA3dShaderType shader_type, 556 struct list_head *list) 557{ 558 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 559 return -EINVAL; 560 561 return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, 562 vmw_compat_shader_key(user_key, 563 shader_type), 564 list); 565} 566 567/** 568 * vmw_compat_shader_add - Create a compat shader and stage it for addition 569 * as a command buffer managed resource. 570 * 571 * @man: Pointer to the compat shader manager identifying the shader namespace. 572 * @user_key: The key that is used to identify the shader. The key is 573 * unique to the shader type. 574 * @bytecode: Pointer to the bytecode of the shader. 575 * @shader_type: Shader type. 576 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is 577 * to be created with. 578 * @list: Caller's list of staged command buffer resource actions. 579 * 580 */ 581int vmw_compat_shader_add(struct vmw_private *dev_priv, 582 struct vmw_cmdbuf_res_manager *man, 583 u32 user_key, const void *bytecode, 584 SVGA3dShaderType shader_type, 585 size_t size, 586 struct list_head *list) 587{ 588 struct vmw_dma_buffer *buf; 589 struct ttm_bo_kmap_obj map; 590 bool is_iomem; 591 int ret; 592 struct vmw_resource *res; 593 594 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 595 return -EINVAL; 596 597 /* Allocate and pin a DMA buffer */ 598 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 599 if (unlikely(buf == NULL)) 600 return -ENOMEM; 601 602 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement, 603 true, vmw_dmabuf_bo_free); 604 if (unlikely(ret != 0)) 605 goto out; 606 607 ret = ttm_bo_reserve(&buf->base, false, true, false, NULL); 608 if (unlikely(ret != 0)) 609 goto no_reserve; 610 611 /* Map and copy shader bytecode. */ 612 ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT, 613 &map); 614 if (unlikely(ret != 0)) { 615 ttm_bo_unreserve(&buf->base); 616 goto no_reserve; 617 } 618 619 memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size); 620 WARN_ON(is_iomem); 621 622 ttm_bo_kunmap(&map); 623 ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true); 624 WARN_ON(ret != 0); 625 ttm_bo_unreserve(&buf->base); 626 627 res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type); 628 if (unlikely(ret != 0)) 629 goto no_reserve; 630 631 ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, 632 vmw_compat_shader_key(user_key, shader_type), 633 res, list); 634 vmw_resource_unreference(&res); 635no_reserve: 636 vmw_dmabuf_unreference(&buf); 637out: 638 return ret; 639} 640 641/** 642 * vmw_compat_shader_lookup - Look up a compat shader 643 * 644 * @man: Pointer to the command buffer managed resource manager identifying 645 * the shader namespace. 646 * @user_key: The user space id of the shader. 647 * @shader_type: The shader type. 648 * 649 * Returns a refcounted pointer to a struct vmw_resource if the shader was 650 * found. An error pointer otherwise. 651 */ 652struct vmw_resource * 653vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 654 u32 user_key, 655 SVGA3dShaderType shader_type) 656{ 657 if (!vmw_compat_shader_id_ok(user_key, shader_type)) 658 return ERR_PTR(-EINVAL); 659 660 return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, 661 vmw_compat_shader_key(user_key, 662 shader_type)); 663} 664