root/drivers/gpu/drm/radeon/radeon_ttm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. radeon_get_rdev
  2. radeon_invalidate_caches
  3. radeon_init_mem_type
  4. radeon_evict_flags
  5. radeon_verify_access
  6. radeon_move_null
  7. radeon_move_blit
  8. radeon_move_vram_ram
  9. radeon_move_ram_vram
  10. radeon_bo_move
  11. radeon_ttm_io_mem_reserve
  12. radeon_ttm_io_mem_free
  13. radeon_ttm_tt_pin_userptr
  14. radeon_ttm_tt_unpin_userptr
  15. radeon_ttm_backend_bind
  16. radeon_ttm_backend_unbind
  17. radeon_ttm_backend_destroy
  18. radeon_ttm_tt_create
  19. radeon_ttm_tt_to_gtt
  20. radeon_ttm_tt_populate
  21. radeon_ttm_tt_unpopulate
  22. radeon_ttm_tt_set_userptr
  23. radeon_ttm_tt_has_userptr
  24. radeon_ttm_tt_is_readonly
  25. radeon_ttm_init
  26. radeon_ttm_fini
  27. radeon_ttm_set_active_vram_size
  28. radeon_ttm_fault
  29. radeon_mmap
  30. radeon_mm_dump_table
  31. radeon_ttm_vram_open
  32. radeon_ttm_vram_read
  33. radeon_ttm_gtt_open
  34. radeon_ttm_gtt_read
  35. radeon_ttm_debugfs_init
  36. radeon_ttm_debugfs_fini

   1 /*
   2  * Copyright 2009 Jerome Glisse.
   3  * All Rights Reserved.
   4  *
   5  * Permission is hereby granted, free of charge, to any person obtaining a
   6  * copy of this software and associated documentation files (the
   7  * "Software"), to deal in the Software without restriction, including
   8  * without limitation the rights to use, copy, modify, merge, publish,
   9  * distribute, sub license, and/or sell copies of the Software, and to
  10  * permit persons to whom the Software is furnished to do so, subject to
  11  * the following conditions:
  12  *
  13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20  *
  21  * The above copyright notice and this permission notice (including the
  22  * next paragraph) shall be included in all copies or substantial portions
  23  * of the Software.
  24  *
  25  */
  26 /*
  27  * Authors:
  28  *    Jerome Glisse <glisse@freedesktop.org>
  29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30  *    Dave Airlie
  31  */
  32 
  33 #include <linux/dma-mapping.h>
  34 #include <linux/pagemap.h>
  35 #include <linux/seq_file.h>
  36 #include <linux/slab.h>
  37 #include <linux/swap.h>
  38 #include <linux/swiotlb.h>
  39 
  40 #include <drm/drm_agpsupport.h>
  41 #include <drm/drm_debugfs.h>
  42 #include <drm/drm_device.h>
  43 #include <drm/drm_file.h>
  44 #include <drm/drm_pci.h>
  45 #include <drm/drm_prime.h>
  46 #include <drm/radeon_drm.h>
  47 #include <drm/ttm/ttm_bo_api.h>
  48 #include <drm/ttm/ttm_bo_driver.h>
  49 #include <drm/ttm/ttm_module.h>
  50 #include <drm/ttm/ttm_page_alloc.h>
  51 #include <drm/ttm/ttm_placement.h>
  52 
  53 #include "radeon_reg.h"
  54 #include "radeon.h"
  55 
  56 static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
  57 static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
  58 
  59 static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
  60 {
  61         struct radeon_mman *mman;
  62         struct radeon_device *rdev;
  63 
  64         mman = container_of(bdev, struct radeon_mman, bdev);
  65         rdev = container_of(mman, struct radeon_device, mman);
  66         return rdev;
  67 }
  68 
  69 static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  70 {
  71         return 0;
  72 }
  73 
  74 static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  75                                 struct ttm_mem_type_manager *man)
  76 {
  77         struct radeon_device *rdev;
  78 
  79         rdev = radeon_get_rdev(bdev);
  80 
  81         switch (type) {
  82         case TTM_PL_SYSTEM:
  83                 /* System memory */
  84                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  85                 man->available_caching = TTM_PL_MASK_CACHING;
  86                 man->default_caching = TTM_PL_FLAG_CACHED;
  87                 break;
  88         case TTM_PL_TT:
  89                 man->func = &ttm_bo_manager_func;
  90                 man->gpu_offset = rdev->mc.gtt_start;
  91                 man->available_caching = TTM_PL_MASK_CACHING;
  92                 man->default_caching = TTM_PL_FLAG_CACHED;
  93                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
  94 #if IS_ENABLED(CONFIG_AGP)
  95                 if (rdev->flags & RADEON_IS_AGP) {
  96                         if (!rdev->ddev->agp) {
  97                                 DRM_ERROR("AGP is not enabled for memory type %u\n",
  98                                           (unsigned)type);
  99                                 return -EINVAL;
 100                         }
 101                         if (!rdev->ddev->agp->cant_use_aperture)
 102                                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 103                         man->available_caching = TTM_PL_FLAG_UNCACHED |
 104                                                  TTM_PL_FLAG_WC;
 105                         man->default_caching = TTM_PL_FLAG_WC;
 106                 }
 107 #endif
 108                 break;
 109         case TTM_PL_VRAM:
 110                 /* "On-card" video ram */
 111                 man->func = &ttm_bo_manager_func;
 112                 man->gpu_offset = rdev->mc.vram_start;
 113                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
 114                              TTM_MEMTYPE_FLAG_MAPPABLE;
 115                 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
 116                 man->default_caching = TTM_PL_FLAG_WC;
 117                 break;
 118         default:
 119                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
 120                 return -EINVAL;
 121         }
 122         return 0;
 123 }
 124 
 125 static void radeon_evict_flags(struct ttm_buffer_object *bo,
 126                                 struct ttm_placement *placement)
 127 {
 128         static const struct ttm_place placements = {
 129                 .fpfn = 0,
 130                 .lpfn = 0,
 131                 .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM
 132         };
 133 
 134         struct radeon_bo *rbo;
 135 
 136         if (!radeon_ttm_bo_is_radeon_bo(bo)) {
 137                 placement->placement = &placements;
 138                 placement->busy_placement = &placements;
 139                 placement->num_placement = 1;
 140                 placement->num_busy_placement = 1;
 141                 return;
 142         }
 143         rbo = container_of(bo, struct radeon_bo, tbo);
 144         switch (bo->mem.mem_type) {
 145         case TTM_PL_VRAM:
 146                 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
 147                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 148                 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
 149                          bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
 150                         unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
 151                         int i;
 152 
 153                         /* Try evicting to the CPU inaccessible part of VRAM
 154                          * first, but only set GTT as busy placement, so this
 155                          * BO will be evicted to GTT rather than causing other
 156                          * BOs to be evicted from VRAM
 157                          */
 158                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
 159                                                          RADEON_GEM_DOMAIN_GTT);
 160                         rbo->placement.num_busy_placement = 0;
 161                         for (i = 0; i < rbo->placement.num_placement; i++) {
 162                                 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
 163                                         if (rbo->placements[i].fpfn < fpfn)
 164                                                 rbo->placements[i].fpfn = fpfn;
 165                                 } else {
 166                                         rbo->placement.busy_placement =
 167                                                 &rbo->placements[i];
 168                                         rbo->placement.num_busy_placement = 1;
 169                                 }
 170                         }
 171                 } else
 172                         radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
 173                 break;
 174         case TTM_PL_TT:
 175         default:
 176                 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 177         }
 178         *placement = rbo->placement;
 179 }
 180 
 181 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
 182 {
 183         struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 184 
 185         if (radeon_ttm_tt_has_userptr(bo->ttm))
 186                 return -EPERM;
 187         return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
 188                                           filp->private_data);
 189 }
 190 
 191 static void radeon_move_null(struct ttm_buffer_object *bo,
 192                              struct ttm_mem_reg *new_mem)
 193 {
 194         struct ttm_mem_reg *old_mem = &bo->mem;
 195 
 196         BUG_ON(old_mem->mm_node != NULL);
 197         *old_mem = *new_mem;
 198         new_mem->mm_node = NULL;
 199 }
 200 
 201 static int radeon_move_blit(struct ttm_buffer_object *bo,
 202                         bool evict, bool no_wait_gpu,
 203                         struct ttm_mem_reg *new_mem,
 204                         struct ttm_mem_reg *old_mem)
 205 {
 206         struct radeon_device *rdev;
 207         uint64_t old_start, new_start;
 208         struct radeon_fence *fence;
 209         unsigned num_pages;
 210         int r, ridx;
 211 
 212         rdev = radeon_get_rdev(bo->bdev);
 213         ridx = radeon_copy_ring_index(rdev);
 214         old_start = (u64)old_mem->start << PAGE_SHIFT;
 215         new_start = (u64)new_mem->start << PAGE_SHIFT;
 216 
 217         switch (old_mem->mem_type) {
 218         case TTM_PL_VRAM:
 219                 old_start += rdev->mc.vram_start;
 220                 break;
 221         case TTM_PL_TT:
 222                 old_start += rdev->mc.gtt_start;
 223                 break;
 224         default:
 225                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
 226                 return -EINVAL;
 227         }
 228         switch (new_mem->mem_type) {
 229         case TTM_PL_VRAM:
 230                 new_start += rdev->mc.vram_start;
 231                 break;
 232         case TTM_PL_TT:
 233                 new_start += rdev->mc.gtt_start;
 234                 break;
 235         default:
 236                 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
 237                 return -EINVAL;
 238         }
 239         if (!rdev->ring[ridx].ready) {
 240                 DRM_ERROR("Trying to move memory with ring turned off.\n");
 241                 return -EINVAL;
 242         }
 243 
 244         BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
 245 
 246         num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 247         fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv);
 248         if (IS_ERR(fence))
 249                 return PTR_ERR(fence);
 250 
 251         r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
 252         radeon_fence_unref(&fence);
 253         return r;
 254 }
 255 
 256 static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
 257                                 bool evict, bool interruptible,
 258                                 bool no_wait_gpu,
 259                                 struct ttm_mem_reg *new_mem)
 260 {
 261         struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
 262         struct ttm_mem_reg *old_mem = &bo->mem;
 263         struct ttm_mem_reg tmp_mem;
 264         struct ttm_place placements;
 265         struct ttm_placement placement;
 266         int r;
 267 
 268         tmp_mem = *new_mem;
 269         tmp_mem.mm_node = NULL;
 270         placement.num_placement = 1;
 271         placement.placement = &placements;
 272         placement.num_busy_placement = 1;
 273         placement.busy_placement = &placements;
 274         placements.fpfn = 0;
 275         placements.lpfn = 0;
 276         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 277         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
 278         if (unlikely(r)) {
 279                 return r;
 280         }
 281 
 282         r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
 283         if (unlikely(r)) {
 284                 goto out_cleanup;
 285         }
 286 
 287         r = ttm_tt_bind(bo->ttm, &tmp_mem, &ctx);
 288         if (unlikely(r)) {
 289                 goto out_cleanup;
 290         }
 291         r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
 292         if (unlikely(r)) {
 293                 goto out_cleanup;
 294         }
 295         r = ttm_bo_move_ttm(bo, &ctx, new_mem);
 296 out_cleanup:
 297         ttm_bo_mem_put(bo, &tmp_mem);
 298         return r;
 299 }
 300 
 301 static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
 302                                 bool evict, bool interruptible,
 303                                 bool no_wait_gpu,
 304                                 struct ttm_mem_reg *new_mem)
 305 {
 306         struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
 307         struct ttm_mem_reg *old_mem = &bo->mem;
 308         struct ttm_mem_reg tmp_mem;
 309         struct ttm_placement placement;
 310         struct ttm_place placements;
 311         int r;
 312 
 313         tmp_mem = *new_mem;
 314         tmp_mem.mm_node = NULL;
 315         placement.num_placement = 1;
 316         placement.placement = &placements;
 317         placement.num_busy_placement = 1;
 318         placement.busy_placement = &placements;
 319         placements.fpfn = 0;
 320         placements.lpfn = 0;
 321         placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
 322         r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
 323         if (unlikely(r)) {
 324                 return r;
 325         }
 326         r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
 327         if (unlikely(r)) {
 328                 goto out_cleanup;
 329         }
 330         r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
 331         if (unlikely(r)) {
 332                 goto out_cleanup;
 333         }
 334 out_cleanup:
 335         ttm_bo_mem_put(bo, &tmp_mem);
 336         return r;
 337 }
 338 
 339 static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
 340                           struct ttm_operation_ctx *ctx,
 341                           struct ttm_mem_reg *new_mem)
 342 {
 343         struct radeon_device *rdev;
 344         struct radeon_bo *rbo;
 345         struct ttm_mem_reg *old_mem = &bo->mem;
 346         int r;
 347 
 348         r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
 349         if (r)
 350                 return r;
 351 
 352         /* Can't move a pinned BO */
 353         rbo = container_of(bo, struct radeon_bo, tbo);
 354         if (WARN_ON_ONCE(rbo->pin_count > 0))
 355                 return -EINVAL;
 356 
 357         rdev = radeon_get_rdev(bo->bdev);
 358         if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 359                 radeon_move_null(bo, new_mem);
 360                 return 0;
 361         }
 362         if ((old_mem->mem_type == TTM_PL_TT &&
 363              new_mem->mem_type == TTM_PL_SYSTEM) ||
 364             (old_mem->mem_type == TTM_PL_SYSTEM &&
 365              new_mem->mem_type == TTM_PL_TT)) {
 366                 /* bind is enough */
 367                 radeon_move_null(bo, new_mem);
 368                 return 0;
 369         }
 370         if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
 371             rdev->asic->copy.copy == NULL) {
 372                 /* use memcpy */
 373                 goto memcpy;
 374         }
 375 
 376         if (old_mem->mem_type == TTM_PL_VRAM &&
 377             new_mem->mem_type == TTM_PL_SYSTEM) {
 378                 r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
 379                                         ctx->no_wait_gpu, new_mem);
 380         } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
 381                    new_mem->mem_type == TTM_PL_VRAM) {
 382                 r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
 383                                             ctx->no_wait_gpu, new_mem);
 384         } else {
 385                 r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
 386                                      new_mem, old_mem);
 387         }
 388 
 389         if (r) {
 390 memcpy:
 391                 r = ttm_bo_move_memcpy(bo, ctx, new_mem);
 392                 if (r) {
 393                         return r;
 394                 }
 395         }
 396 
 397         /* update statistics */
 398         atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
 399         return 0;
 400 }
 401 
 402 static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 403 {
 404         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
 405         struct radeon_device *rdev = radeon_get_rdev(bdev);
 406 
 407         mem->bus.addr = NULL;
 408         mem->bus.offset = 0;
 409         mem->bus.size = mem->num_pages << PAGE_SHIFT;
 410         mem->bus.base = 0;
 411         mem->bus.is_iomem = false;
 412         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
 413                 return -EINVAL;
 414         switch (mem->mem_type) {
 415         case TTM_PL_SYSTEM:
 416                 /* system memory */
 417                 return 0;
 418         case TTM_PL_TT:
 419 #if IS_ENABLED(CONFIG_AGP)
 420                 if (rdev->flags & RADEON_IS_AGP) {
 421                         /* RADEON_IS_AGP is set only if AGP is active */
 422                         mem->bus.offset = mem->start << PAGE_SHIFT;
 423                         mem->bus.base = rdev->mc.agp_base;
 424                         mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
 425                 }
 426 #endif
 427                 break;
 428         case TTM_PL_VRAM:
 429                 mem->bus.offset = mem->start << PAGE_SHIFT;
 430                 /* check if it's visible */
 431                 if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
 432                         return -EINVAL;
 433                 mem->bus.base = rdev->mc.aper_base;
 434                 mem->bus.is_iomem = true;
 435 #ifdef __alpha__
 436                 /*
 437                  * Alpha: use bus.addr to hold the ioremap() return,
 438                  * so we can modify bus.base below.
 439                  */
 440                 if (mem->placement & TTM_PL_FLAG_WC)
 441                         mem->bus.addr =
 442                                 ioremap_wc(mem->bus.base + mem->bus.offset,
 443                                            mem->bus.size);
 444                 else
 445                         mem->bus.addr =
 446                                 ioremap_nocache(mem->bus.base + mem->bus.offset,
 447                                                 mem->bus.size);
 448                 if (!mem->bus.addr)
 449                         return -ENOMEM;
 450 
 451                 /*
 452                  * Alpha: Use just the bus offset plus
 453                  * the hose/domain memory base for bus.base.
 454                  * It then can be used to build PTEs for VRAM
 455                  * access, as done in ttm_bo_vm_fault().
 456                  */
 457                 mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
 458                         rdev->ddev->hose->dense_mem_base;
 459 #endif
 460                 break;
 461         default:
 462                 return -EINVAL;
 463         }
 464         return 0;
 465 }
 466 
 467 static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 468 {
 469 }
 470 
 471 /*
 472  * TTM backend functions.
 473  */
 474 struct radeon_ttm_tt {
 475         struct ttm_dma_tt               ttm;
 476         struct radeon_device            *rdev;
 477         u64                             offset;
 478 
 479         uint64_t                        userptr;
 480         struct mm_struct                *usermm;
 481         uint32_t                        userflags;
 482 };
 483 
 484 /* prepare the sg table with the user pages */
 485 static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm)
 486 {
 487         struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
 488         struct radeon_ttm_tt *gtt = (void *)ttm;
 489         unsigned pinned = 0, nents;
 490         int r;
 491 
 492         int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 493         enum dma_data_direction direction = write ?
 494                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 495 
 496         if (current->mm != gtt->usermm)
 497                 return -EPERM;
 498 
 499         if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) {
 500                 /* check that we only pin down anonymous memory
 501                    to prevent problems with writeback */
 502                 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
 503                 struct vm_area_struct *vma;
 504                 vma = find_vma(gtt->usermm, gtt->userptr);
 505                 if (!vma || vma->vm_file || vma->vm_end < end)
 506                         return -EPERM;
 507         }
 508 
 509         do {
 510                 unsigned num_pages = ttm->num_pages - pinned;
 511                 uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
 512                 struct page **pages = ttm->pages + pinned;
 513 
 514                 r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0,
 515                                    pages, NULL);
 516                 if (r < 0)
 517                         goto release_pages;
 518 
 519                 pinned += r;
 520 
 521         } while (pinned < ttm->num_pages);
 522 
 523         r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
 524                                       ttm->num_pages << PAGE_SHIFT,
 525                                       GFP_KERNEL);
 526         if (r)
 527                 goto release_sg;
 528 
 529         r = -ENOMEM;
 530         nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 531         if (nents != ttm->sg->nents)
 532                 goto release_sg;
 533 
 534         drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 535                                          gtt->ttm.dma_address, ttm->num_pages);
 536 
 537         return 0;
 538 
 539 release_sg:
 540         kfree(ttm->sg);
 541 
 542 release_pages:
 543         release_pages(ttm->pages, pinned);
 544         return r;
 545 }
 546 
 547 static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 548 {
 549         struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
 550         struct radeon_ttm_tt *gtt = (void *)ttm;
 551         struct sg_page_iter sg_iter;
 552 
 553         int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 554         enum dma_data_direction direction = write ?
 555                 DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
 556 
 557         /* double check that we don't free the table twice */
 558         if (!ttm->sg->sgl)
 559                 return;
 560 
 561         /* free the sg table and pages again */
 562         dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 563 
 564         for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
 565                 struct page *page = sg_page_iter_page(&sg_iter);
 566                 if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
 567                         set_page_dirty(page);
 568 
 569                 mark_page_accessed(page);
 570                 put_page(page);
 571         }
 572 
 573         sg_free_table(ttm->sg);
 574 }
 575 
 576 static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
 577                                    struct ttm_mem_reg *bo_mem)
 578 {
 579         struct radeon_ttm_tt *gtt = (void*)ttm;
 580         uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ |
 581                 RADEON_GART_PAGE_WRITE;
 582         int r;
 583 
 584         if (gtt->userptr) {
 585                 radeon_ttm_tt_pin_userptr(ttm);
 586                 flags &= ~RADEON_GART_PAGE_WRITE;
 587         }
 588 
 589         gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
 590         if (!ttm->num_pages) {
 591                 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
 592                      ttm->num_pages, bo_mem, ttm);
 593         }
 594         if (ttm->caching_state == tt_cached)
 595                 flags |= RADEON_GART_PAGE_SNOOP;
 596         r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages,
 597                              ttm->pages, gtt->ttm.dma_address, flags);
 598         if (r) {
 599                 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
 600                           ttm->num_pages, (unsigned)gtt->offset);
 601                 return r;
 602         }
 603         return 0;
 604 }
 605 
 606 static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
 607 {
 608         struct radeon_ttm_tt *gtt = (void *)ttm;
 609 
 610         radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
 611 
 612         if (gtt->userptr)
 613                 radeon_ttm_tt_unpin_userptr(ttm);
 614 
 615         return 0;
 616 }
 617 
 618 static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
 619 {
 620         struct radeon_ttm_tt *gtt = (void *)ttm;
 621 
 622         ttm_dma_tt_fini(&gtt->ttm);
 623         kfree(gtt);
 624 }
 625 
 626 static struct ttm_backend_func radeon_backend_func = {
 627         .bind = &radeon_ttm_backend_bind,
 628         .unbind = &radeon_ttm_backend_unbind,
 629         .destroy = &radeon_ttm_backend_destroy,
 630 };
 631 
 632 static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
 633                                            uint32_t page_flags)
 634 {
 635         struct radeon_device *rdev;
 636         struct radeon_ttm_tt *gtt;
 637 
 638         rdev = radeon_get_rdev(bo->bdev);
 639 #if IS_ENABLED(CONFIG_AGP)
 640         if (rdev->flags & RADEON_IS_AGP) {
 641                 return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
 642                                          page_flags);
 643         }
 644 #endif
 645 
 646         gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
 647         if (gtt == NULL) {
 648                 return NULL;
 649         }
 650         gtt->ttm.ttm.func = &radeon_backend_func;
 651         gtt->rdev = rdev;
 652         if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
 653                 kfree(gtt);
 654                 return NULL;
 655         }
 656         return &gtt->ttm.ttm;
 657 }
 658 
 659 static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm)
 660 {
 661         if (!ttm || ttm->func != &radeon_backend_func)
 662                 return NULL;
 663         return (struct radeon_ttm_tt *)ttm;
 664 }
 665 
 666 static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
 667                         struct ttm_operation_ctx *ctx)
 668 {
 669         struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
 670         struct radeon_device *rdev;
 671         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 672 
 673         if (gtt && gtt->userptr) {
 674                 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
 675                 if (!ttm->sg)
 676                         return -ENOMEM;
 677 
 678                 ttm->page_flags |= TTM_PAGE_FLAG_SG;
 679                 ttm->state = tt_unbound;
 680                 return 0;
 681         }
 682 
 683         if (slave && ttm->sg) {
 684                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
 685                                                  gtt->ttm.dma_address, ttm->num_pages);
 686                 ttm->state = tt_unbound;
 687                 return 0;
 688         }
 689 
 690         rdev = radeon_get_rdev(ttm->bdev);
 691 #if IS_ENABLED(CONFIG_AGP)
 692         if (rdev->flags & RADEON_IS_AGP) {
 693                 return ttm_agp_tt_populate(ttm, ctx);
 694         }
 695 #endif
 696 
 697 #ifdef CONFIG_SWIOTLB
 698         if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
 699                 return ttm_dma_populate(&gtt->ttm, rdev->dev, ctx);
 700         }
 701 #endif
 702 
 703         return ttm_populate_and_map_pages(rdev->dev, &gtt->ttm, ctx);
 704 }
 705 
 706 static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
 707 {
 708         struct radeon_device *rdev;
 709         struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
 710         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
 711 
 712         if (gtt && gtt->userptr) {
 713                 kfree(ttm->sg);
 714                 ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
 715                 return;
 716         }
 717 
 718         if (slave)
 719                 return;
 720 
 721         rdev = radeon_get_rdev(ttm->bdev);
 722 #if IS_ENABLED(CONFIG_AGP)
 723         if (rdev->flags & RADEON_IS_AGP) {
 724                 ttm_agp_tt_unpopulate(ttm);
 725                 return;
 726         }
 727 #endif
 728 
 729 #ifdef CONFIG_SWIOTLB
 730         if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
 731                 ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
 732                 return;
 733         }
 734 #endif
 735 
 736         ttm_unmap_and_unpopulate_pages(rdev->dev, &gtt->ttm);
 737 }
 738 
 739 int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
 740                               uint32_t flags)
 741 {
 742         struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
 743 
 744         if (gtt == NULL)
 745                 return -EINVAL;
 746 
 747         gtt->userptr = addr;
 748         gtt->usermm = current->mm;
 749         gtt->userflags = flags;
 750         return 0;
 751 }
 752 
 753 bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm)
 754 {
 755         struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
 756 
 757         if (gtt == NULL)
 758                 return false;
 759 
 760         return !!gtt->userptr;
 761 }
 762 
 763 bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm)
 764 {
 765         struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
 766 
 767         if (gtt == NULL)
 768                 return false;
 769 
 770         return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
 771 }
 772 
 773 static struct ttm_bo_driver radeon_bo_driver = {
 774         .ttm_tt_create = &radeon_ttm_tt_create,
 775         .ttm_tt_populate = &radeon_ttm_tt_populate,
 776         .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
 777         .invalidate_caches = &radeon_invalidate_caches,
 778         .init_mem_type = &radeon_init_mem_type,
 779         .eviction_valuable = ttm_bo_eviction_valuable,
 780         .evict_flags = &radeon_evict_flags,
 781         .move = &radeon_bo_move,
 782         .verify_access = &radeon_verify_access,
 783         .move_notify = &radeon_bo_move_notify,
 784         .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
 785         .io_mem_reserve = &radeon_ttm_io_mem_reserve,
 786         .io_mem_free = &radeon_ttm_io_mem_free,
 787 };
 788 
 789 int radeon_ttm_init(struct radeon_device *rdev)
 790 {
 791         int r;
 792 
 793         /* No others user of address space so set it to 0 */
 794         r = ttm_bo_device_init(&rdev->mman.bdev,
 795                                &radeon_bo_driver,
 796                                rdev->ddev->anon_inode->i_mapping,
 797                                dma_addressing_limited(&rdev->pdev->dev));
 798         if (r) {
 799                 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 800                 return r;
 801         }
 802         rdev->mman.initialized = true;
 803         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
 804                                 rdev->mc.real_vram_size >> PAGE_SHIFT);
 805         if (r) {
 806                 DRM_ERROR("Failed initializing VRAM heap.\n");
 807                 return r;
 808         }
 809         /* Change the size here instead of the init above so only lpfn is affected */
 810         radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
 811 
 812         r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
 813                              RADEON_GEM_DOMAIN_VRAM, 0, NULL,
 814                              NULL, &rdev->stolen_vga_memory);
 815         if (r) {
 816                 return r;
 817         }
 818         r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
 819         if (r)
 820                 return r;
 821         r = radeon_bo_pin(rdev->stolen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
 822         radeon_bo_unreserve(rdev->stolen_vga_memory);
 823         if (r) {
 824                 radeon_bo_unref(&rdev->stolen_vga_memory);
 825                 return r;
 826         }
 827         DRM_INFO("radeon: %uM of VRAM memory ready\n",
 828                  (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
 829         r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
 830                                 rdev->mc.gtt_size >> PAGE_SHIFT);
 831         if (r) {
 832                 DRM_ERROR("Failed initializing GTT heap.\n");
 833                 return r;
 834         }
 835         DRM_INFO("radeon: %uM of GTT memory ready.\n",
 836                  (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
 837 
 838         r = radeon_ttm_debugfs_init(rdev);
 839         if (r) {
 840                 DRM_ERROR("Failed to init debugfs\n");
 841                 return r;
 842         }
 843         return 0;
 844 }
 845 
 846 void radeon_ttm_fini(struct radeon_device *rdev)
 847 {
 848         int r;
 849 
 850         if (!rdev->mman.initialized)
 851                 return;
 852         radeon_ttm_debugfs_fini(rdev);
 853         if (rdev->stolen_vga_memory) {
 854                 r = radeon_bo_reserve(rdev->stolen_vga_memory, false);
 855                 if (r == 0) {
 856                         radeon_bo_unpin(rdev->stolen_vga_memory);
 857                         radeon_bo_unreserve(rdev->stolen_vga_memory);
 858                 }
 859                 radeon_bo_unref(&rdev->stolen_vga_memory);
 860         }
 861         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 862         ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
 863         ttm_bo_device_release(&rdev->mman.bdev);
 864         radeon_gart_fini(rdev);
 865         rdev->mman.initialized = false;
 866         DRM_INFO("radeon: ttm finalized\n");
 867 }
 868 
 869 /* this should only be called at bootup or when userspace
 870  * isn't running */
 871 void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
 872 {
 873         struct ttm_mem_type_manager *man;
 874 
 875         if (!rdev->mman.initialized)
 876                 return;
 877 
 878         man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 879         /* this just adjusts TTM size idea, which sets lpfn to the correct value */
 880         man->size = size >> PAGE_SHIFT;
 881 }
 882 
 883 static struct vm_operations_struct radeon_ttm_vm_ops;
 884 static const struct vm_operations_struct *ttm_vm_ops = NULL;
 885 
 886 static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
 887 {
 888         struct ttm_buffer_object *bo;
 889         struct radeon_device *rdev;
 890         vm_fault_t ret;
 891 
 892         bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
 893         if (bo == NULL) {
 894                 return VM_FAULT_NOPAGE;
 895         }
 896         rdev = radeon_get_rdev(bo->bdev);
 897         down_read(&rdev->pm.mclk_lock);
 898         ret = ttm_vm_ops->fault(vmf);
 899         up_read(&rdev->pm.mclk_lock);
 900         return ret;
 901 }
 902 
 903 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
 904 {
 905         int r;
 906         struct drm_file *file_priv = filp->private_data;
 907         struct radeon_device *rdev = file_priv->minor->dev->dev_private;
 908 
 909         if (rdev == NULL) {
 910                 return -EINVAL;
 911         }
 912         r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
 913         if (unlikely(r != 0)) {
 914                 return r;
 915         }
 916         if (unlikely(ttm_vm_ops == NULL)) {
 917                 ttm_vm_ops = vma->vm_ops;
 918                 radeon_ttm_vm_ops = *ttm_vm_ops;
 919                 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
 920         }
 921         vma->vm_ops = &radeon_ttm_vm_ops;
 922         return 0;
 923 }
 924 
 925 #if defined(CONFIG_DEBUG_FS)
 926 
 927 static int radeon_mm_dump_table(struct seq_file *m, void *data)
 928 {
 929         struct drm_info_node *node = (struct drm_info_node *)m->private;
 930         unsigned ttm_pl = *(int*)node->info_ent->data;
 931         struct drm_device *dev = node->minor->dev;
 932         struct radeon_device *rdev = dev->dev_private;
 933         struct ttm_mem_type_manager *man = &rdev->mman.bdev.man[ttm_pl];
 934         struct drm_printer p = drm_seq_file_printer(m);
 935 
 936         man->func->debug(man, &p);
 937         return 0;
 938 }
 939 
 940 
 941 static int ttm_pl_vram = TTM_PL_VRAM;
 942 static int ttm_pl_tt = TTM_PL_TT;
 943 
 944 static struct drm_info_list radeon_ttm_debugfs_list[] = {
 945         {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
 946         {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
 947         {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
 948 #ifdef CONFIG_SWIOTLB
 949         {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
 950 #endif
 951 };
 952 
 953 static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
 954 {
 955         struct radeon_device *rdev = inode->i_private;
 956         i_size_write(inode, rdev->mc.mc_vram_size);
 957         filep->private_data = inode->i_private;
 958         return 0;
 959 }
 960 
 961 static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf,
 962                                     size_t size, loff_t *pos)
 963 {
 964         struct radeon_device *rdev = f->private_data;
 965         ssize_t result = 0;
 966         int r;
 967 
 968         if (size & 0x3 || *pos & 0x3)
 969                 return -EINVAL;
 970 
 971         while (size) {
 972                 unsigned long flags;
 973                 uint32_t value;
 974 
 975                 if (*pos >= rdev->mc.mc_vram_size)
 976                         return result;
 977 
 978                 spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
 979                 WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000);
 980                 if (rdev->family >= CHIP_CEDAR)
 981                         WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31);
 982                 value = RREG32(RADEON_MM_DATA);
 983                 spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
 984 
 985                 r = put_user(value, (uint32_t *)buf);
 986                 if (r)
 987                         return r;
 988 
 989                 result += 4;
 990                 buf += 4;
 991                 *pos += 4;
 992                 size -= 4;
 993         }
 994 
 995         return result;
 996 }
 997 
 998 static const struct file_operations radeon_ttm_vram_fops = {
 999         .owner = THIS_MODULE,
1000         .open = radeon_ttm_vram_open,
1001         .read = radeon_ttm_vram_read,
1002         .llseek = default_llseek
1003 };
1004 
1005 static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep)
1006 {
1007         struct radeon_device *rdev = inode->i_private;
1008         i_size_write(inode, rdev->mc.gtt_size);
1009         filep->private_data = inode->i_private;
1010         return 0;
1011 }
1012 
1013 static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
1014                                    size_t size, loff_t *pos)
1015 {
1016         struct radeon_device *rdev = f->private_data;
1017         ssize_t result = 0;
1018         int r;
1019 
1020         while (size) {
1021                 loff_t p = *pos / PAGE_SIZE;
1022                 unsigned off = *pos & ~PAGE_MASK;
1023                 size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
1024                 struct page *page;
1025                 void *ptr;
1026 
1027                 if (p >= rdev->gart.num_cpu_pages)
1028                         return result;
1029 
1030                 page = rdev->gart.pages[p];
1031                 if (page) {
1032                         ptr = kmap(page);
1033                         ptr += off;
1034 
1035                         r = copy_to_user(buf, ptr, cur_size);
1036                         kunmap(rdev->gart.pages[p]);
1037                 } else
1038                         r = clear_user(buf, cur_size);
1039 
1040                 if (r)
1041                         return -EFAULT;
1042 
1043                 result += cur_size;
1044                 buf += cur_size;
1045                 *pos += cur_size;
1046                 size -= cur_size;
1047         }
1048 
1049         return result;
1050 }
1051 
1052 static const struct file_operations radeon_ttm_gtt_fops = {
1053         .owner = THIS_MODULE,
1054         .open = radeon_ttm_gtt_open,
1055         .read = radeon_ttm_gtt_read,
1056         .llseek = default_llseek
1057 };
1058 
1059 #endif
1060 
1061 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
1062 {
1063 #if defined(CONFIG_DEBUG_FS)
1064         unsigned count;
1065 
1066         struct drm_minor *minor = rdev->ddev->primary;
1067         struct dentry *root = minor->debugfs_root;
1068 
1069         rdev->mman.vram = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO,
1070                                               root, rdev,
1071                                               &radeon_ttm_vram_fops);
1072 
1073         rdev->mman.gtt = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO,
1074                                              root, rdev, &radeon_ttm_gtt_fops);
1075 
1076         count = ARRAY_SIZE(radeon_ttm_debugfs_list);
1077 
1078 #ifdef CONFIG_SWIOTLB
1079         if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
1080                 --count;
1081 #endif
1082 
1083         return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
1084 #else
1085 
1086         return 0;
1087 #endif
1088 }
1089 
1090 static void radeon_ttm_debugfs_fini(struct radeon_device *rdev)
1091 {
1092 #if defined(CONFIG_DEBUG_FS)
1093 
1094         debugfs_remove(rdev->mman.vram);
1095         rdev->mman.vram = NULL;
1096 
1097         debugfs_remove(rdev->mman.gtt);
1098         rdev->mman.gtt = NULL;
1099 #endif
1100 }

/* [<][>][^][v][top][bottom][index][help] */