root/drivers/gpu/drm/ttm/ttm_bo_vm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ttm_bo_vm_fault_idle
  2. ttm_bo_io_mem_pfn
  3. ttm_bo_vm_fault
  4. ttm_bo_vm_open
  5. ttm_bo_vm_close
  6. ttm_bo_vm_access_kmap
  7. ttm_bo_vm_access
  8. ttm_bo_vm_lookup
  9. ttm_bo_mmap
  10. ttm_fbdev_mmap

   1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */
   2 /**************************************************************************
   3  *
   4  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
   5  * All Rights Reserved.
   6  *
   7  * Permission is hereby granted, free of charge, to any person obtaining a
   8  * copy of this software and associated documentation files (the
   9  * "Software"), to deal in the Software without restriction, including
  10  * without limitation the rights to use, copy, modify, merge, publish,
  11  * distribute, sub license, and/or sell copies of the Software, and to
  12  * permit persons to whom the Software is furnished to do so, subject to
  13  * the following conditions:
  14  *
  15  * The above copyright notice and this permission notice (including the
  16  * next paragraph) shall be included in all copies or substantial portions
  17  * of the Software.
  18  *
  19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  22  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  23  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  24  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  25  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  26  *
  27  **************************************************************************/
  28 /*
  29  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  30  */
  31 
  32 #define pr_fmt(fmt) "[TTM] " fmt
  33 
  34 #include <drm/ttm/ttm_module.h>
  35 #include <drm/ttm/ttm_bo_driver.h>
  36 #include <drm/ttm/ttm_placement.h>
  37 #include <drm/drm_vma_manager.h>
  38 #include <linux/mm.h>
  39 #include <linux/pfn_t.h>
  40 #include <linux/rbtree.h>
  41 #include <linux/module.h>
  42 #include <linux/uaccess.h>
  43 #include <linux/mem_encrypt.h>
  44 
  45 #define TTM_BO_VM_NUM_PREFAULT 16
  46 
  47 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
  48                                 struct vm_fault *vmf)
  49 {
  50         vm_fault_t ret = 0;
  51         int err = 0;
  52 
  53         if (likely(!bo->moving))
  54                 goto out_unlock;
  55 
  56         /*
  57          * Quick non-stalling check for idle.
  58          */
  59         if (dma_fence_is_signaled(bo->moving))
  60                 goto out_clear;
  61 
  62         /*
  63          * If possible, avoid waiting for GPU with mmap_sem
  64          * held.
  65          */
  66         if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
  67                 ret = VM_FAULT_RETRY;
  68                 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
  69                         goto out_unlock;
  70 
  71                 ttm_bo_get(bo);
  72                 up_read(&vmf->vma->vm_mm->mmap_sem);
  73                 (void) dma_fence_wait(bo->moving, true);
  74                 dma_resv_unlock(bo->base.resv);
  75                 ttm_bo_put(bo);
  76                 goto out_unlock;
  77         }
  78 
  79         /*
  80          * Ordinary wait.
  81          */
  82         err = dma_fence_wait(bo->moving, true);
  83         if (unlikely(err != 0)) {
  84                 ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
  85                         VM_FAULT_NOPAGE;
  86                 goto out_unlock;
  87         }
  88 
  89 out_clear:
  90         dma_fence_put(bo->moving);
  91         bo->moving = NULL;
  92 
  93 out_unlock:
  94         return ret;
  95 }
  96 
  97 static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
  98                                        unsigned long page_offset)
  99 {
 100         struct ttm_bo_device *bdev = bo->bdev;
 101 
 102         if (bdev->driver->io_mem_pfn)
 103                 return bdev->driver->io_mem_pfn(bo, page_offset);
 104 
 105         return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
 106                 + page_offset;
 107 }
 108 
 109 static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 110 {
 111         struct vm_area_struct *vma = vmf->vma;
 112         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
 113             vma->vm_private_data;
 114         struct ttm_bo_device *bdev = bo->bdev;
 115         unsigned long page_offset;
 116         unsigned long page_last;
 117         unsigned long pfn;
 118         struct ttm_tt *ttm = NULL;
 119         struct page *page;
 120         int err;
 121         int i;
 122         vm_fault_t ret = VM_FAULT_NOPAGE;
 123         unsigned long address = vmf->address;
 124         struct ttm_mem_type_manager *man =
 125                 &bdev->man[bo->mem.mem_type];
 126         struct vm_area_struct cvma;
 127 
 128         /*
 129          * Work around locking order reversal in fault / nopfn
 130          * between mmap_sem and bo_reserve: Perform a trylock operation
 131          * for reserve, and if it fails, retry the fault after waiting
 132          * for the buffer to become unreserved.
 133          */
 134         if (unlikely(!dma_resv_trylock(bo->base.resv))) {
 135                 if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
 136                         if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 137                                 ttm_bo_get(bo);
 138                                 up_read(&vmf->vma->vm_mm->mmap_sem);
 139                                 (void) ttm_bo_wait_unreserved(bo);
 140                                 ttm_bo_put(bo);
 141                         }
 142 
 143                         return VM_FAULT_RETRY;
 144                 }
 145 
 146                 /*
 147                  * If we'd want to change locking order to
 148                  * mmap_sem -> bo::reserve, we'd use a blocking reserve here
 149                  * instead of retrying the fault...
 150                  */
 151                 return VM_FAULT_NOPAGE;
 152         }
 153 
 154         /*
 155          * Refuse to fault imported pages. This should be handled
 156          * (if at all) by redirecting mmap to the exporter.
 157          */
 158         if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
 159                 ret = VM_FAULT_SIGBUS;
 160                 goto out_unlock;
 161         }
 162 
 163         if (bdev->driver->fault_reserve_notify) {
 164                 struct dma_fence *moving = dma_fence_get(bo->moving);
 165 
 166                 err = bdev->driver->fault_reserve_notify(bo);
 167                 switch (err) {
 168                 case 0:
 169                         break;
 170                 case -EBUSY:
 171                 case -ERESTARTSYS:
 172                         ret = VM_FAULT_NOPAGE;
 173                         goto out_unlock;
 174                 default:
 175                         ret = VM_FAULT_SIGBUS;
 176                         goto out_unlock;
 177                 }
 178 
 179                 if (bo->moving != moving) {
 180                         spin_lock(&bdev->glob->lru_lock);
 181                         ttm_bo_move_to_lru_tail(bo, NULL);
 182                         spin_unlock(&bdev->glob->lru_lock);
 183                 }
 184                 dma_fence_put(moving);
 185         }
 186 
 187         /*
 188          * Wait for buffer data in transit, due to a pipelined
 189          * move.
 190          */
 191         ret = ttm_bo_vm_fault_idle(bo, vmf);
 192         if (unlikely(ret != 0)) {
 193                 if (ret == VM_FAULT_RETRY &&
 194                     !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
 195                         /* The BO has already been unreserved. */
 196                         return ret;
 197                 }
 198 
 199                 goto out_unlock;
 200         }
 201 
 202         err = ttm_mem_io_lock(man, true);
 203         if (unlikely(err != 0)) {
 204                 ret = VM_FAULT_NOPAGE;
 205                 goto out_unlock;
 206         }
 207         err = ttm_mem_io_reserve_vm(bo);
 208         if (unlikely(err != 0)) {
 209                 ret = VM_FAULT_SIGBUS;
 210                 goto out_io_unlock;
 211         }
 212 
 213         page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
 214                 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node);
 215         page_last = vma_pages(vma) + vma->vm_pgoff -
 216                 drm_vma_node_start(&bo->base.vma_node);
 217 
 218         if (unlikely(page_offset >= bo->num_pages)) {
 219                 ret = VM_FAULT_SIGBUS;
 220                 goto out_io_unlock;
 221         }
 222 
 223         /*
 224          * Make a local vma copy to modify the page_prot member
 225          * and vm_flags if necessary. The vma parameter is protected
 226          * by mmap_sem in write mode.
 227          */
 228         cvma = *vma;
 229         cvma.vm_page_prot = vm_get_page_prot(cvma.vm_flags);
 230 
 231         if (bo->mem.bus.is_iomem) {
 232                 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
 233                                                 cvma.vm_page_prot);
 234         } else {
 235                 struct ttm_operation_ctx ctx = {
 236                         .interruptible = false,
 237                         .no_wait_gpu = false,
 238                         .flags = TTM_OPT_FLAG_FORCE_ALLOC
 239 
 240                 };
 241 
 242                 ttm = bo->ttm;
 243                 cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
 244                                                 cvma.vm_page_prot);
 245 
 246                 /* Allocate all page at once, most common usage */
 247                 if (ttm_tt_populate(ttm, &ctx)) {
 248                         ret = VM_FAULT_OOM;
 249                         goto out_io_unlock;
 250                 }
 251         }
 252 
 253         /*
 254          * Speculatively prefault a number of pages. Only error on
 255          * first page.
 256          */
 257         for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
 258                 if (bo->mem.bus.is_iomem) {
 259                         /* Iomem should not be marked encrypted */
 260                         cvma.vm_page_prot = pgprot_decrypted(cvma.vm_page_prot);
 261                         pfn = ttm_bo_io_mem_pfn(bo, page_offset);
 262                 } else {
 263                         page = ttm->pages[page_offset];
 264                         if (unlikely(!page && i == 0)) {
 265                                 ret = VM_FAULT_OOM;
 266                                 goto out_io_unlock;
 267                         } else if (unlikely(!page)) {
 268                                 break;
 269                         }
 270                         page->index = drm_vma_node_start(&bo->base.vma_node) +
 271                                 page_offset;
 272                         pfn = page_to_pfn(page);
 273                 }
 274 
 275                 if (vma->vm_flags & VM_MIXEDMAP)
 276                         ret = vmf_insert_mixed(&cvma, address,
 277                                         __pfn_to_pfn_t(pfn, PFN_DEV));
 278                 else
 279                         ret = vmf_insert_pfn(&cvma, address, pfn);
 280 
 281                 /* Never error on prefaulted PTEs */
 282                 if (unlikely((ret & VM_FAULT_ERROR))) {
 283                         if (i == 0)
 284                                 goto out_io_unlock;
 285                         else
 286                                 break;
 287                 }
 288 
 289                 address += PAGE_SIZE;
 290                 if (unlikely(++page_offset >= page_last))
 291                         break;
 292         }
 293         ret = VM_FAULT_NOPAGE;
 294 out_io_unlock:
 295         ttm_mem_io_unlock(man);
 296 out_unlock:
 297         dma_resv_unlock(bo->base.resv);
 298         return ret;
 299 }
 300 
 301 static void ttm_bo_vm_open(struct vm_area_struct *vma)
 302 {
 303         struct ttm_buffer_object *bo =
 304             (struct ttm_buffer_object *)vma->vm_private_data;
 305 
 306         WARN_ON(bo->bdev->dev_mapping != vma->vm_file->f_mapping);
 307 
 308         ttm_bo_get(bo);
 309 }
 310 
 311 static void ttm_bo_vm_close(struct vm_area_struct *vma)
 312 {
 313         struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data;
 314 
 315         ttm_bo_put(bo);
 316         vma->vm_private_data = NULL;
 317 }
 318 
 319 static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
 320                                  unsigned long offset,
 321                                  uint8_t *buf, int len, int write)
 322 {
 323         unsigned long page = offset >> PAGE_SHIFT;
 324         unsigned long bytes_left = len;
 325         int ret;
 326 
 327         /* Copy a page at a time, that way no extra virtual address
 328          * mapping is needed
 329          */
 330         offset -= page << PAGE_SHIFT;
 331         do {
 332                 unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
 333                 struct ttm_bo_kmap_obj map;
 334                 void *ptr;
 335                 bool is_iomem;
 336 
 337                 ret = ttm_bo_kmap(bo, page, 1, &map);
 338                 if (ret)
 339                         return ret;
 340 
 341                 ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
 342                 WARN_ON_ONCE(is_iomem);
 343                 if (write)
 344                         memcpy(ptr, buf, bytes);
 345                 else
 346                         memcpy(buf, ptr, bytes);
 347                 ttm_bo_kunmap(&map);
 348 
 349                 page++;
 350                 buf += bytes;
 351                 bytes_left -= bytes;
 352                 offset = 0;
 353         } while (bytes_left);
 354 
 355         return len;
 356 }
 357 
 358 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
 359                             void *buf, int len, int write)
 360 {
 361         unsigned long offset = (addr) - vma->vm_start;
 362         struct ttm_buffer_object *bo = vma->vm_private_data;
 363         int ret;
 364 
 365         if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
 366                 return -EIO;
 367 
 368         ret = ttm_bo_reserve(bo, true, false, NULL);
 369         if (ret)
 370                 return ret;
 371 
 372         switch (bo->mem.mem_type) {
 373         case TTM_PL_SYSTEM:
 374                 if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
 375                         ret = ttm_tt_swapin(bo->ttm);
 376                         if (unlikely(ret != 0))
 377                                 return ret;
 378                 }
 379                 /* fall through */
 380         case TTM_PL_TT:
 381                 ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
 382                 break;
 383         default:
 384                 if (bo->bdev->driver->access_memory)
 385                         ret = bo->bdev->driver->access_memory(
 386                                 bo, offset, buf, len, write);
 387                 else
 388                         ret = -EIO;
 389         }
 390 
 391         ttm_bo_unreserve(bo);
 392 
 393         return ret;
 394 }
 395 
 396 static const struct vm_operations_struct ttm_bo_vm_ops = {
 397         .fault = ttm_bo_vm_fault,
 398         .open = ttm_bo_vm_open,
 399         .close = ttm_bo_vm_close,
 400         .access = ttm_bo_vm_access
 401 };
 402 
 403 static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
 404                                                   unsigned long offset,
 405                                                   unsigned long pages)
 406 {
 407         struct drm_vma_offset_node *node;
 408         struct ttm_buffer_object *bo = NULL;
 409 
 410         drm_vma_offset_lock_lookup(&bdev->vma_manager);
 411 
 412         node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
 413         if (likely(node)) {
 414                 bo = container_of(node, struct ttm_buffer_object,
 415                                   base.vma_node);
 416                 bo = ttm_bo_get_unless_zero(bo);
 417         }
 418 
 419         drm_vma_offset_unlock_lookup(&bdev->vma_manager);
 420 
 421         if (!bo)
 422                 pr_err("Could not find buffer object to map\n");
 423 
 424         return bo;
 425 }
 426 
 427 int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
 428                 struct ttm_bo_device *bdev)
 429 {
 430         struct ttm_bo_driver *driver;
 431         struct ttm_buffer_object *bo;
 432         int ret;
 433 
 434         if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
 435                 return -EINVAL;
 436 
 437         bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
 438         if (unlikely(!bo))
 439                 return -EINVAL;
 440 
 441         driver = bo->bdev->driver;
 442         if (unlikely(!driver->verify_access)) {
 443                 ret = -EPERM;
 444                 goto out_unref;
 445         }
 446         ret = driver->verify_access(bo, filp);
 447         if (unlikely(ret != 0))
 448                 goto out_unref;
 449 
 450         vma->vm_ops = &ttm_bo_vm_ops;
 451 
 452         /*
 453          * Note: We're transferring the bo reference to
 454          * vma->vm_private_data here.
 455          */
 456 
 457         vma->vm_private_data = bo;
 458 
 459         /*
 460          * We'd like to use VM_PFNMAP on shared mappings, where
 461          * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
 462          * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
 463          * bad for performance. Until that has been sorted out, use
 464          * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
 465          */
 466         vma->vm_flags |= VM_MIXEDMAP;
 467         vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
 468         return 0;
 469 out_unref:
 470         ttm_bo_put(bo);
 471         return ret;
 472 }
 473 EXPORT_SYMBOL(ttm_bo_mmap);
 474 
 475 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
 476 {
 477         if (vma->vm_pgoff != 0)
 478                 return -EACCES;
 479 
 480         ttm_bo_get(bo);
 481 
 482         vma->vm_ops = &ttm_bo_vm_ops;
 483         vma->vm_private_data = bo;
 484         vma->vm_flags |= VM_MIXEDMAP;
 485         vma->vm_flags |= VM_IO | VM_DONTEXPAND;
 486         return 0;
 487 }
 488 EXPORT_SYMBOL(ttm_fbdev_mmap);

/* [<][>][^][v][top][bottom][index][help] */