root/drivers/gpu/drm/msm/msm_gem_submit.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. submit_create
  2. msm_gem_submit_free
  3. submit_lookup_objects
  4. submit_unlock_unpin_bo
  5. submit_lock_objects
  6. submit_fence_sync
  7. submit_pin_objects
  8. submit_bo
  9. submit_reloc
  10. submit_cleanup
  11. msm_ioctl_gem_submit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * Copyright (C) 2013 Red Hat
   4  * Author: Rob Clark <robdclark@gmail.com>
   5  */
   6 
   7 #include <linux/file.h>
   8 #include <linux/sync_file.h>
   9 #include <linux/uaccess.h>
  10 
  11 #include <drm/drm_file.h>
  12 
  13 #include "msm_drv.h"
  14 #include "msm_gpu.h"
  15 #include "msm_gem.h"
  16 #include "msm_gpu_trace.h"
  17 
  18 /*
  19  * Cmdstream submission:
  20  */
  21 
  22 /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
  23 #define BO_VALID    0x8000   /* is current addr in cmdstream correct/valid? */
  24 #define BO_LOCKED   0x4000
  25 #define BO_PINNED   0x2000
  26 
  27 static struct msm_gem_submit *submit_create(struct drm_device *dev,
  28                 struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
  29                 struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
  30                 uint32_t nr_cmds)
  31 {
  32         struct msm_gem_submit *submit;
  33         uint64_t sz = struct_size(submit, bos, nr_bos) +
  34                                   ((u64)nr_cmds * sizeof(submit->cmd[0]));
  35 
  36         if (sz > SIZE_MAX)
  37                 return NULL;
  38 
  39         submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
  40         if (!submit)
  41                 return NULL;
  42 
  43         submit->dev = dev;
  44         submit->aspace = aspace;
  45         submit->gpu = gpu;
  46         submit->fence = NULL;
  47         submit->cmd = (void *)&submit->bos[nr_bos];
  48         submit->queue = queue;
  49         submit->ring = gpu->rb[queue->prio];
  50 
  51         /* initially, until copy_from_user() and bo lookup succeeds: */
  52         submit->nr_bos = 0;
  53         submit->nr_cmds = 0;
  54 
  55         INIT_LIST_HEAD(&submit->node);
  56         INIT_LIST_HEAD(&submit->bo_list);
  57         ww_acquire_init(&submit->ticket, &reservation_ww_class);
  58 
  59         return submit;
  60 }
  61 
  62 void msm_gem_submit_free(struct msm_gem_submit *submit)
  63 {
  64         dma_fence_put(submit->fence);
  65         list_del(&submit->node);
  66         put_pid(submit->pid);
  67         msm_submitqueue_put(submit->queue);
  68 
  69         kfree(submit);
  70 }
  71 
  72 static int submit_lookup_objects(struct msm_gem_submit *submit,
  73                 struct drm_msm_gem_submit *args, struct drm_file *file)
  74 {
  75         unsigned i;
  76         int ret = 0;
  77 
  78         for (i = 0; i < args->nr_bos; i++) {
  79                 struct drm_msm_gem_submit_bo submit_bo;
  80                 void __user *userptr =
  81                         u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
  82 
  83                 /* make sure we don't have garbage flags, in case we hit
  84                  * error path before flags is initialized:
  85                  */
  86                 submit->bos[i].flags = 0;
  87 
  88                 if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
  89                         ret = -EFAULT;
  90                         i = 0;
  91                         goto out;
  92                 }
  93 
  94 /* at least one of READ and/or WRITE flags should be set: */
  95 #define MANDATORY_FLAGS (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
  96 
  97                 if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
  98                         !(submit_bo.flags & MANDATORY_FLAGS)) {
  99                         DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
 100                         ret = -EINVAL;
 101                         i = 0;
 102                         goto out;
 103                 }
 104 
 105                 submit->bos[i].handle = submit_bo.handle;
 106                 submit->bos[i].flags = submit_bo.flags;
 107                 /* in validate_objects() we figure out if this is true: */
 108                 submit->bos[i].iova  = submit_bo.presumed;
 109         }
 110 
 111         spin_lock(&file->table_lock);
 112 
 113         for (i = 0; i < args->nr_bos; i++) {
 114                 struct drm_gem_object *obj;
 115                 struct msm_gem_object *msm_obj;
 116 
 117                 /* normally use drm_gem_object_lookup(), but for bulk lookup
 118                  * all under single table_lock just hit object_idr directly:
 119                  */
 120                 obj = idr_find(&file->object_idr, submit->bos[i].handle);
 121                 if (!obj) {
 122                         DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
 123                         ret = -EINVAL;
 124                         goto out_unlock;
 125                 }
 126 
 127                 msm_obj = to_msm_bo(obj);
 128 
 129                 if (!list_empty(&msm_obj->submit_entry)) {
 130                         DRM_ERROR("handle %u at index %u already on submit list\n",
 131                                         submit->bos[i].handle, i);
 132                         ret = -EINVAL;
 133                         goto out_unlock;
 134                 }
 135 
 136                 drm_gem_object_get(obj);
 137 
 138                 submit->bos[i].obj = msm_obj;
 139 
 140                 list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
 141         }
 142 
 143 out_unlock:
 144         spin_unlock(&file->table_lock);
 145 
 146 out:
 147         submit->nr_bos = i;
 148 
 149         return ret;
 150 }
 151 
 152 static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
 153                 int i, bool backoff)
 154 {
 155         struct msm_gem_object *msm_obj = submit->bos[i].obj;
 156 
 157         if (submit->bos[i].flags & BO_PINNED)
 158                 msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
 159 
 160         if (submit->bos[i].flags & BO_LOCKED)
 161                 ww_mutex_unlock(&msm_obj->base.resv->lock);
 162 
 163         if (backoff && !(submit->bos[i].flags & BO_VALID))
 164                 submit->bos[i].iova = 0;
 165 
 166         submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
 167 }
 168 
 169 /* This is where we make sure all the bo's are reserved and pin'd: */
 170 static int submit_lock_objects(struct msm_gem_submit *submit)
 171 {
 172         int contended, slow_locked = -1, i, ret = 0;
 173 
 174 retry:
 175         for (i = 0; i < submit->nr_bos; i++) {
 176                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
 177 
 178                 if (slow_locked == i)
 179                         slow_locked = -1;
 180 
 181                 contended = i;
 182 
 183                 if (!(submit->bos[i].flags & BO_LOCKED)) {
 184                         ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
 185                                         &submit->ticket);
 186                         if (ret)
 187                                 goto fail;
 188                         submit->bos[i].flags |= BO_LOCKED;
 189                 }
 190         }
 191 
 192         ww_acquire_done(&submit->ticket);
 193 
 194         return 0;
 195 
 196 fail:
 197         for (; i >= 0; i--)
 198                 submit_unlock_unpin_bo(submit, i, true);
 199 
 200         if (slow_locked > 0)
 201                 submit_unlock_unpin_bo(submit, slow_locked, true);
 202 
 203         if (ret == -EDEADLK) {
 204                 struct msm_gem_object *msm_obj = submit->bos[contended].obj;
 205                 /* we lost out in a seqno race, lock and retry.. */
 206                 ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
 207                                 &submit->ticket);
 208                 if (!ret) {
 209                         submit->bos[contended].flags |= BO_LOCKED;
 210                         slow_locked = contended;
 211                         goto retry;
 212                 }
 213         }
 214 
 215         return ret;
 216 }
 217 
 218 static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
 219 {
 220         int i, ret = 0;
 221 
 222         for (i = 0; i < submit->nr_bos; i++) {
 223                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
 224                 bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE;
 225 
 226                 if (!write) {
 227                         /* NOTE: _reserve_shared() must happen before
 228                          * _add_shared_fence(), which makes this a slightly
 229                          * strange place to call it.  OTOH this is a
 230                          * convenient can-fail point to hook it in.
 231                          */
 232                         ret = dma_resv_reserve_shared(msm_obj->base.resv,
 233                                                                 1);
 234                         if (ret)
 235                                 return ret;
 236                 }
 237 
 238                 if (no_implicit)
 239                         continue;
 240 
 241                 ret = msm_gem_sync_object(&msm_obj->base, submit->ring->fctx,
 242                         write);
 243                 if (ret)
 244                         break;
 245         }
 246 
 247         return ret;
 248 }
 249 
 250 static int submit_pin_objects(struct msm_gem_submit *submit)
 251 {
 252         int i, ret = 0;
 253 
 254         submit->valid = true;
 255 
 256         for (i = 0; i < submit->nr_bos; i++) {
 257                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
 258                 uint64_t iova;
 259 
 260                 /* if locking succeeded, pin bo: */
 261                 ret = msm_gem_get_and_pin_iova(&msm_obj->base,
 262                                 submit->aspace, &iova);
 263 
 264                 if (ret)
 265                         break;
 266 
 267                 submit->bos[i].flags |= BO_PINNED;
 268 
 269                 if (iova == submit->bos[i].iova) {
 270                         submit->bos[i].flags |= BO_VALID;
 271                 } else {
 272                         submit->bos[i].iova = iova;
 273                         /* iova changed, so address in cmdstream is not valid: */
 274                         submit->bos[i].flags &= ~BO_VALID;
 275                         submit->valid = false;
 276                 }
 277         }
 278 
 279         return ret;
 280 }
 281 
 282 static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
 283                 struct msm_gem_object **obj, uint64_t *iova, bool *valid)
 284 {
 285         if (idx >= submit->nr_bos) {
 286                 DRM_ERROR("invalid buffer index: %u (out of %u)\n",
 287                                 idx, submit->nr_bos);
 288                 return -EINVAL;
 289         }
 290 
 291         if (obj)
 292                 *obj = submit->bos[idx].obj;
 293         if (iova)
 294                 *iova = submit->bos[idx].iova;
 295         if (valid)
 296                 *valid = !!(submit->bos[idx].flags & BO_VALID);
 297 
 298         return 0;
 299 }
 300 
 301 /* process the reloc's and patch up the cmdstream as needed: */
 302 static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
 303                 uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
 304 {
 305         uint32_t i, last_offset = 0;
 306         uint32_t *ptr;
 307         int ret = 0;
 308 
 309         if (!nr_relocs)
 310                 return 0;
 311 
 312         if (offset % 4) {
 313                 DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
 314                 return -EINVAL;
 315         }
 316 
 317         /* For now, just map the entire thing.  Eventually we probably
 318          * to do it page-by-page, w/ kmap() if not vmap()d..
 319          */
 320         ptr = msm_gem_get_vaddr(&obj->base);
 321 
 322         if (IS_ERR(ptr)) {
 323                 ret = PTR_ERR(ptr);
 324                 DBG("failed to map: %d", ret);
 325                 return ret;
 326         }
 327 
 328         for (i = 0; i < nr_relocs; i++) {
 329                 struct drm_msm_gem_submit_reloc submit_reloc;
 330                 void __user *userptr =
 331                         u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
 332                 uint32_t off;
 333                 uint64_t iova;
 334                 bool valid;
 335 
 336                 if (copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc))) {
 337                         ret = -EFAULT;
 338                         goto out;
 339                 }
 340 
 341                 if (submit_reloc.submit_offset % 4) {
 342                         DRM_ERROR("non-aligned reloc offset: %u\n",
 343                                         submit_reloc.submit_offset);
 344                         ret = -EINVAL;
 345                         goto out;
 346                 }
 347 
 348                 /* offset in dwords: */
 349                 off = submit_reloc.submit_offset / 4;
 350 
 351                 if ((off >= (obj->base.size / 4)) ||
 352                                 (off < last_offset)) {
 353                         DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
 354                         ret = -EINVAL;
 355                         goto out;
 356                 }
 357 
 358                 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
 359                 if (ret)
 360                         goto out;
 361 
 362                 if (valid)
 363                         continue;
 364 
 365                 iova += submit_reloc.reloc_offset;
 366 
 367                 if (submit_reloc.shift < 0)
 368                         iova >>= -submit_reloc.shift;
 369                 else
 370                         iova <<= submit_reloc.shift;
 371 
 372                 ptr[off] = iova | submit_reloc.or;
 373 
 374                 last_offset = off;
 375         }
 376 
 377 out:
 378         msm_gem_put_vaddr(&obj->base);
 379 
 380         return ret;
 381 }
 382 
 383 static void submit_cleanup(struct msm_gem_submit *submit)
 384 {
 385         unsigned i;
 386 
 387         for (i = 0; i < submit->nr_bos; i++) {
 388                 struct msm_gem_object *msm_obj = submit->bos[i].obj;
 389                 submit_unlock_unpin_bo(submit, i, false);
 390                 list_del_init(&msm_obj->submit_entry);
 391                 drm_gem_object_put(&msm_obj->base);
 392         }
 393 
 394         ww_acquire_fini(&submit->ticket);
 395 }
 396 
 397 int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
 398                 struct drm_file *file)
 399 {
 400         static atomic_t ident = ATOMIC_INIT(0);
 401         struct msm_drm_private *priv = dev->dev_private;
 402         struct drm_msm_gem_submit *args = data;
 403         struct msm_file_private *ctx = file->driver_priv;
 404         struct msm_gem_submit *submit;
 405         struct msm_gpu *gpu = priv->gpu;
 406         struct sync_file *sync_file = NULL;
 407         struct msm_gpu_submitqueue *queue;
 408         struct msm_ringbuffer *ring;
 409         int out_fence_fd = -1;
 410         struct pid *pid = get_pid(task_pid(current));
 411         unsigned i;
 412         int ret, submitid;
 413         if (!gpu)
 414                 return -ENXIO;
 415 
 416         /* for now, we just have 3d pipe.. eventually this would need to
 417          * be more clever to dispatch to appropriate gpu module:
 418          */
 419         if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
 420                 return -EINVAL;
 421 
 422         if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
 423                 return -EINVAL;
 424 
 425         if (args->flags & MSM_SUBMIT_SUDO) {
 426                 if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
 427                     !capable(CAP_SYS_RAWIO))
 428                         return -EINVAL;
 429         }
 430 
 431         queue = msm_submitqueue_get(ctx, args->queueid);
 432         if (!queue)
 433                 return -ENOENT;
 434 
 435         /* Get a unique identifier for the submission for logging purposes */
 436         submitid = atomic_inc_return(&ident) - 1;
 437 
 438         ring = gpu->rb[queue->prio];
 439         trace_msm_gpu_submit(pid_nr(pid), ring->id, submitid,
 440                 args->nr_bos, args->nr_cmds);
 441 
 442         if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
 443                 struct dma_fence *in_fence;
 444 
 445                 in_fence = sync_file_get_fence(args->fence_fd);
 446 
 447                 if (!in_fence)
 448                         return -EINVAL;
 449 
 450                 /*
 451                  * Wait if the fence is from a foreign context, or if the fence
 452                  * array contains any fence from a foreign context.
 453                  */
 454                 ret = 0;
 455                 if (!dma_fence_match_context(in_fence, ring->fctx->context))
 456                         ret = dma_fence_wait(in_fence, true);
 457 
 458                 dma_fence_put(in_fence);
 459                 if (ret)
 460                         return ret;
 461         }
 462 
 463         ret = mutex_lock_interruptible(&dev->struct_mutex);
 464         if (ret)
 465                 return ret;
 466 
 467         if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
 468                 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
 469                 if (out_fence_fd < 0) {
 470                         ret = out_fence_fd;
 471                         goto out_unlock;
 472                 }
 473         }
 474 
 475         submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
 476                 args->nr_cmds);
 477         if (!submit) {
 478                 ret = -ENOMEM;
 479                 goto out_unlock;
 480         }
 481 
 482         submit->pid = pid;
 483         submit->ident = submitid;
 484 
 485         if (args->flags & MSM_SUBMIT_SUDO)
 486                 submit->in_rb = true;
 487 
 488         ret = submit_lookup_objects(submit, args, file);
 489         if (ret)
 490                 goto out;
 491 
 492         ret = submit_lock_objects(submit);
 493         if (ret)
 494                 goto out;
 495 
 496         ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT));
 497         if (ret)
 498                 goto out;
 499 
 500         ret = submit_pin_objects(submit);
 501         if (ret)
 502                 goto out;
 503 
 504         for (i = 0; i < args->nr_cmds; i++) {
 505                 struct drm_msm_gem_submit_cmd submit_cmd;
 506                 void __user *userptr =
 507                         u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
 508                 struct msm_gem_object *msm_obj;
 509                 uint64_t iova;
 510 
 511                 ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
 512                 if (ret) {
 513                         ret = -EFAULT;
 514                         goto out;
 515                 }
 516 
 517                 /* validate input from userspace: */
 518                 switch (submit_cmd.type) {
 519                 case MSM_SUBMIT_CMD_BUF:
 520                 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
 521                 case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
 522                         break;
 523                 default:
 524                         DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
 525                         ret = -EINVAL;
 526                         goto out;
 527                 }
 528 
 529                 ret = submit_bo(submit, submit_cmd.submit_idx,
 530                                 &msm_obj, &iova, NULL);
 531                 if (ret)
 532                         goto out;
 533 
 534                 if (submit_cmd.size % 4) {
 535                         DRM_ERROR("non-aligned cmdstream buffer size: %u\n",
 536                                         submit_cmd.size);
 537                         ret = -EINVAL;
 538                         goto out;
 539                 }
 540 
 541                 if (!submit_cmd.size ||
 542                         ((submit_cmd.size + submit_cmd.submit_offset) >
 543                                 msm_obj->base.size)) {
 544                         DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
 545                         ret = -EINVAL;
 546                         goto out;
 547                 }
 548 
 549                 submit->cmd[i].type = submit_cmd.type;
 550                 submit->cmd[i].size = submit_cmd.size / 4;
 551                 submit->cmd[i].iova = iova + submit_cmd.submit_offset;
 552                 submit->cmd[i].idx  = submit_cmd.submit_idx;
 553 
 554                 if (submit->valid)
 555                         continue;
 556 
 557                 ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
 558                                 submit_cmd.nr_relocs, submit_cmd.relocs);
 559                 if (ret)
 560                         goto out;
 561         }
 562 
 563         submit->nr_cmds = i;
 564 
 565         submit->fence = msm_fence_alloc(ring->fctx);
 566         if (IS_ERR(submit->fence)) {
 567                 ret = PTR_ERR(submit->fence);
 568                 submit->fence = NULL;
 569                 goto out;
 570         }
 571 
 572         if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
 573                 sync_file = sync_file_create(submit->fence);
 574                 if (!sync_file) {
 575                         ret = -ENOMEM;
 576                         goto out;
 577                 }
 578         }
 579 
 580         msm_gpu_submit(gpu, submit, ctx);
 581 
 582         args->fence = submit->fence->seqno;
 583 
 584         if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
 585                 fd_install(out_fence_fd, sync_file->file);
 586                 args->fence_fd = out_fence_fd;
 587         }
 588 
 589 out:
 590         submit_cleanup(submit);
 591         if (ret)
 592                 msm_gem_submit_free(submit);
 593 out_unlock:
 594         if (ret && (out_fence_fd >= 0))
 595                 put_unused_fd(out_fence_fd);
 596         mutex_unlock(&dev->struct_mutex);
 597         return ret;
 598 }

/* [<][>][^][v][top][bottom][index][help] */