root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. amdgpu_bo_subtract_pin_size
  2. amdgpu_bo_destroy
  3. amdgpu_bo_is_amdgpu_bo
  4. amdgpu_bo_placement_from_domain
  5. amdgpu_bo_create_reserved
  6. amdgpu_bo_create_kernel
  7. amdgpu_bo_create_kernel_at
  8. amdgpu_bo_free_kernel
  9. amdgpu_bo_validate_size
  10. amdgpu_bo_support_uswc
  11. amdgpu_bo_do_create
  12. amdgpu_bo_create_shadow
  13. amdgpu_bo_create
  14. amdgpu_bo_validate
  15. amdgpu_bo_restore_shadow
  16. amdgpu_bo_kmap
  17. amdgpu_bo_kptr
  18. amdgpu_bo_kunmap
  19. amdgpu_bo_ref
  20. amdgpu_bo_unref
  21. amdgpu_bo_pin_restricted
  22. amdgpu_bo_pin
  23. amdgpu_bo_unpin
  24. amdgpu_bo_evict_vram
  25. amdgpu_bo_init
  26. amdgpu_bo_late_init
  27. amdgpu_bo_fini
  28. amdgpu_bo_fbdev_mmap
  29. amdgpu_bo_set_tiling_flags
  30. amdgpu_bo_get_tiling_flags
  31. amdgpu_bo_set_metadata
  32. amdgpu_bo_get_metadata
  33. amdgpu_bo_move_notify
  34. amdgpu_bo_release_notify
  35. amdgpu_bo_fault_reserve_notify
  36. amdgpu_bo_fence
  37. amdgpu_bo_sync_wait
  38. amdgpu_bo_gpu_offset
  39. amdgpu_bo_get_preferred_pin_domain

   1 /*
   2  * Copyright 2009 Jerome Glisse.
   3  * All Rights Reserved.
   4  *
   5  * Permission is hereby granted, free of charge, to any person obtaining a
   6  * copy of this software and associated documentation files (the
   7  * "Software"), to deal in the Software without restriction, including
   8  * without limitation the rights to use, copy, modify, merge, publish,
   9  * distribute, sub license, and/or sell copies of the Software, and to
  10  * permit persons to whom the Software is furnished to do so, subject to
  11  * the following conditions:
  12  *
  13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20  *
  21  * The above copyright notice and this permission notice (including the
  22  * next paragraph) shall be included in all copies or substantial portions
  23  * of the Software.
  24  *
  25  */
  26 /*
  27  * Authors:
  28  *    Jerome Glisse <glisse@freedesktop.org>
  29  *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
  30  *    Dave Airlie
  31  */
  32 #include <linux/list.h>
  33 #include <linux/slab.h>
  34 
  35 #include <drm/amdgpu_drm.h>
  36 #include <drm/drm_cache.h>
  37 #include "amdgpu.h"
  38 #include "amdgpu_trace.h"
  39 #include "amdgpu_amdkfd.h"
  40 
  41 /**
  42  * DOC: amdgpu_object
  43  *
  44  * This defines the interfaces to operate on an &amdgpu_bo buffer object which
  45  * represents memory used by driver (VRAM, system memory, etc.). The driver
  46  * provides DRM/GEM APIs to userspace. DRM/GEM APIs then use these interfaces
  47  * to create/destroy/set buffer object which are then managed by the kernel TTM
  48  * memory manager.
  49  * The interfaces are also used internally by kernel clients, including gfx,
  50  * uvd, etc. for kernel managed allocations used by the GPU.
  51  *
  52  */
  53 
  54 /**
  55  * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
  56  *
  57  * @bo: &amdgpu_bo buffer object
  58  *
  59  * This function is called when a BO stops being pinned, and updates the
  60  * &amdgpu_device pin_size values accordingly.
  61  */
  62 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
  63 {
  64         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
  65 
  66         if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
  67                 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
  68                 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
  69                              &adev->visible_pin_size);
  70         } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
  71                 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
  72         }
  73 }
  74 
  75 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
  76 {
  77         struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
  78         struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
  79 
  80         if (bo->pin_count > 0)
  81                 amdgpu_bo_subtract_pin_size(bo);
  82 
  83         amdgpu_bo_kunmap(bo);
  84 
  85         if (bo->tbo.base.import_attach)
  86                 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
  87         drm_gem_object_release(&bo->tbo.base);
  88         /* in case amdgpu_device_recover_vram got NULL of bo->parent */
  89         if (!list_empty(&bo->shadow_list)) {
  90                 mutex_lock(&adev->shadow_list_lock);
  91                 list_del_init(&bo->shadow_list);
  92                 mutex_unlock(&adev->shadow_list_lock);
  93         }
  94         amdgpu_bo_unref(&bo->parent);
  95 
  96         kfree(bo->metadata);
  97         kfree(bo);
  98 }
  99 
 100 /**
 101  * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
 102  * @bo: buffer object to be checked
 103  *
 104  * Uses destroy function associated with the object to determine if this is
 105  * an &amdgpu_bo.
 106  *
 107  * Returns:
 108  * true if the object belongs to &amdgpu_bo, false if not.
 109  */
 110 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
 111 {
 112         if (bo->destroy == &amdgpu_bo_destroy)
 113                 return true;
 114         return false;
 115 }
 116 
 117 /**
 118  * amdgpu_bo_placement_from_domain - set buffer's placement
 119  * @abo: &amdgpu_bo buffer object whose placement is to be set
 120  * @domain: requested domain
 121  *
 122  * Sets buffer's placement according to requested domain and the buffer's
 123  * flags.
 124  */
 125 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
 126 {
 127         struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
 128         struct ttm_placement *placement = &abo->placement;
 129         struct ttm_place *places = abo->placements;
 130         u64 flags = abo->flags;
 131         u32 c = 0;
 132 
 133         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 134                 unsigned visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 135 
 136                 places[c].fpfn = 0;
 137                 places[c].lpfn = 0;
 138                 places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
 139                         TTM_PL_FLAG_VRAM;
 140 
 141                 if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
 142                         places[c].lpfn = visible_pfn;
 143                 else
 144                         places[c].flags |= TTM_PL_FLAG_TOPDOWN;
 145 
 146                 if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
 147                         places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
 148                 c++;
 149         }
 150 
 151         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 152                 places[c].fpfn = 0;
 153                 places[c].lpfn = 0;
 154                 places[c].flags = TTM_PL_FLAG_TT;
 155                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 156                         places[c].flags |= TTM_PL_FLAG_WC |
 157                                 TTM_PL_FLAG_UNCACHED;
 158                 else
 159                         places[c].flags |= TTM_PL_FLAG_CACHED;
 160                 c++;
 161         }
 162 
 163         if (domain & AMDGPU_GEM_DOMAIN_CPU) {
 164                 places[c].fpfn = 0;
 165                 places[c].lpfn = 0;
 166                 places[c].flags = TTM_PL_FLAG_SYSTEM;
 167                 if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 168                         places[c].flags |= TTM_PL_FLAG_WC |
 169                                 TTM_PL_FLAG_UNCACHED;
 170                 else
 171                         places[c].flags |= TTM_PL_FLAG_CACHED;
 172                 c++;
 173         }
 174 
 175         if (domain & AMDGPU_GEM_DOMAIN_GDS) {
 176                 places[c].fpfn = 0;
 177                 places[c].lpfn = 0;
 178                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GDS;
 179                 c++;
 180         }
 181 
 182         if (domain & AMDGPU_GEM_DOMAIN_GWS) {
 183                 places[c].fpfn = 0;
 184                 places[c].lpfn = 0;
 185                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_GWS;
 186                 c++;
 187         }
 188 
 189         if (domain & AMDGPU_GEM_DOMAIN_OA) {
 190                 places[c].fpfn = 0;
 191                 places[c].lpfn = 0;
 192                 places[c].flags = TTM_PL_FLAG_UNCACHED | AMDGPU_PL_FLAG_OA;
 193                 c++;
 194         }
 195 
 196         if (!c) {
 197                 places[c].fpfn = 0;
 198                 places[c].lpfn = 0;
 199                 places[c].flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
 200                 c++;
 201         }
 202 
 203         BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
 204 
 205         placement->num_placement = c;
 206         placement->placement = places;
 207 
 208         placement->num_busy_placement = c;
 209         placement->busy_placement = places;
 210 }
 211 
 212 /**
 213  * amdgpu_bo_create_reserved - create reserved BO for kernel use
 214  *
 215  * @adev: amdgpu device object
 216  * @size: size for the new BO
 217  * @align: alignment for the new BO
 218  * @domain: where to place it
 219  * @bo_ptr: used to initialize BOs in structures
 220  * @gpu_addr: GPU addr of the pinned BO
 221  * @cpu_addr: optional CPU address mapping
 222  *
 223  * Allocates and pins a BO for kernel internal use, and returns it still
 224  * reserved.
 225  *
 226  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 227  *
 228  * Returns:
 229  * 0 on success, negative error code otherwise.
 230  */
 231 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 232                               unsigned long size, int align,
 233                               u32 domain, struct amdgpu_bo **bo_ptr,
 234                               u64 *gpu_addr, void **cpu_addr)
 235 {
 236         struct amdgpu_bo_param bp;
 237         bool free = false;
 238         int r;
 239 
 240         if (!size) {
 241                 amdgpu_bo_unref(bo_ptr);
 242                 return 0;
 243         }
 244 
 245         memset(&bp, 0, sizeof(bp));
 246         bp.size = size;
 247         bp.byte_align = align;
 248         bp.domain = domain;
 249         bp.flags = cpu_addr ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
 250                 : AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
 251         bp.flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 252         bp.type = ttm_bo_type_kernel;
 253         bp.resv = NULL;
 254 
 255         if (!*bo_ptr) {
 256                 r = amdgpu_bo_create(adev, &bp, bo_ptr);
 257                 if (r) {
 258                         dev_err(adev->dev, "(%d) failed to allocate kernel bo\n",
 259                                 r);
 260                         return r;
 261                 }
 262                 free = true;
 263         }
 264 
 265         r = amdgpu_bo_reserve(*bo_ptr, false);
 266         if (r) {
 267                 dev_err(adev->dev, "(%d) failed to reserve kernel bo\n", r);
 268                 goto error_free;
 269         }
 270 
 271         r = amdgpu_bo_pin(*bo_ptr, domain);
 272         if (r) {
 273                 dev_err(adev->dev, "(%d) kernel bo pin failed\n", r);
 274                 goto error_unreserve;
 275         }
 276 
 277         r = amdgpu_ttm_alloc_gart(&(*bo_ptr)->tbo);
 278         if (r) {
 279                 dev_err(adev->dev, "%p bind failed\n", *bo_ptr);
 280                 goto error_unpin;
 281         }
 282 
 283         if (gpu_addr)
 284                 *gpu_addr = amdgpu_bo_gpu_offset(*bo_ptr);
 285 
 286         if (cpu_addr) {
 287                 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 288                 if (r) {
 289                         dev_err(adev->dev, "(%d) kernel bo map failed\n", r);
 290                         goto error_unpin;
 291                 }
 292         }
 293 
 294         return 0;
 295 
 296 error_unpin:
 297         amdgpu_bo_unpin(*bo_ptr);
 298 error_unreserve:
 299         amdgpu_bo_unreserve(*bo_ptr);
 300 
 301 error_free:
 302         if (free)
 303                 amdgpu_bo_unref(bo_ptr);
 304 
 305         return r;
 306 }
 307 
 308 /**
 309  * amdgpu_bo_create_kernel - create BO for kernel use
 310  *
 311  * @adev: amdgpu device object
 312  * @size: size for the new BO
 313  * @align: alignment for the new BO
 314  * @domain: where to place it
 315  * @bo_ptr:  used to initialize BOs in structures
 316  * @gpu_addr: GPU addr of the pinned BO
 317  * @cpu_addr: optional CPU address mapping
 318  *
 319  * Allocates and pins a BO for kernel internal use.
 320  *
 321  * Note: For bo_ptr new BO is only created if bo_ptr points to NULL.
 322  *
 323  * Returns:
 324  * 0 on success, negative error code otherwise.
 325  */
 326 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 327                             unsigned long size, int align,
 328                             u32 domain, struct amdgpu_bo **bo_ptr,
 329                             u64 *gpu_addr, void **cpu_addr)
 330 {
 331         int r;
 332 
 333         r = amdgpu_bo_create_reserved(adev, size, align, domain, bo_ptr,
 334                                       gpu_addr, cpu_addr);
 335 
 336         if (r)
 337                 return r;
 338 
 339         if (*bo_ptr)
 340                 amdgpu_bo_unreserve(*bo_ptr);
 341 
 342         return 0;
 343 }
 344 
 345 /**
 346  * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
 347  *
 348  * @adev: amdgpu device object
 349  * @offset: offset of the BO
 350  * @size: size of the BO
 351  * @domain: where to place it
 352  * @bo_ptr:  used to initialize BOs in structures
 353  * @cpu_addr: optional CPU address mapping
 354  *
 355  * Creates a kernel BO at a specific offset in the address space of the domain.
 356  *
 357  * Returns:
 358  * 0 on success, negative error code otherwise.
 359  */
 360 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 361                                uint64_t offset, uint64_t size, uint32_t domain,
 362                                struct amdgpu_bo **bo_ptr, void **cpu_addr)
 363 {
 364         struct ttm_operation_ctx ctx = { false, false };
 365         unsigned int i;
 366         int r;
 367 
 368         offset &= PAGE_MASK;
 369         size = ALIGN(size, PAGE_SIZE);
 370 
 371         r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
 372                                       NULL, NULL);
 373         if (r)
 374                 return r;
 375 
 376         /*
 377          * Remove the original mem node and create a new one at the request
 378          * position.
 379          */
 380         for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
 381                 (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
 382                 (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
 383         }
 384 
 385         ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
 386         r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
 387                              &(*bo_ptr)->tbo.mem, &ctx);
 388         if (r)
 389                 goto error;
 390 
 391         if (cpu_addr) {
 392                 r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
 393                 if (r)
 394                         goto error;
 395         }
 396 
 397         amdgpu_bo_unreserve(*bo_ptr);
 398         return 0;
 399 
 400 error:
 401         amdgpu_bo_unreserve(*bo_ptr);
 402         amdgpu_bo_unref(bo_ptr);
 403         return r;
 404 }
 405 
 406 /**
 407  * amdgpu_bo_free_kernel - free BO for kernel use
 408  *
 409  * @bo: amdgpu BO to free
 410  * @gpu_addr: pointer to where the BO's GPU memory space address was stored
 411  * @cpu_addr: pointer to where the BO's CPU memory space address was stored
 412  *
 413  * unmaps and unpin a BO for kernel internal use.
 414  */
 415 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 416                            void **cpu_addr)
 417 {
 418         if (*bo == NULL)
 419                 return;
 420 
 421         if (likely(amdgpu_bo_reserve(*bo, true) == 0)) {
 422                 if (cpu_addr)
 423                         amdgpu_bo_kunmap(*bo);
 424 
 425                 amdgpu_bo_unpin(*bo);
 426                 amdgpu_bo_unreserve(*bo);
 427         }
 428         amdgpu_bo_unref(bo);
 429 
 430         if (gpu_addr)
 431                 *gpu_addr = 0;
 432 
 433         if (cpu_addr)
 434                 *cpu_addr = NULL;
 435 }
 436 
 437 /* Validate bo size is bit bigger then the request domain */
 438 static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
 439                                           unsigned long size, u32 domain)
 440 {
 441         struct ttm_mem_type_manager *man = NULL;
 442 
 443         /*
 444          * If GTT is part of requested domains the check must succeed to
 445          * allow fall back to GTT
 446          */
 447         if (domain & AMDGPU_GEM_DOMAIN_GTT) {
 448                 man = &adev->mman.bdev.man[TTM_PL_TT];
 449 
 450                 if (size < (man->size << PAGE_SHIFT))
 451                         return true;
 452                 else
 453                         goto fail;
 454         }
 455 
 456         if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
 457                 man = &adev->mman.bdev.man[TTM_PL_VRAM];
 458 
 459                 if (size < (man->size << PAGE_SHIFT))
 460                         return true;
 461                 else
 462                         goto fail;
 463         }
 464 
 465 
 466         /* TODO add more domains checks, such as AMDGPU_GEM_DOMAIN_CPU */
 467         return true;
 468 
 469 fail:
 470         DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
 471                   man->size << PAGE_SHIFT);
 472         return false;
 473 }
 474 
 475 bool amdgpu_bo_support_uswc(u64 bo_flags)
 476 {
 477 
 478 #ifdef CONFIG_X86_32
 479         /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 480          * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 481          */
 482         return false;
 483 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 484         /* Don't try to enable write-combining when it can't work, or things
 485          * may be slow
 486          * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
 487          */
 488 
 489 #ifndef CONFIG_COMPILE_TEST
 490 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 491          thanks to write-combining
 492 #endif
 493 
 494         if (bo_flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
 495                 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
 496                               "better performance thanks to write-combining\n");
 497         return false;
 498 #else
 499         /* For architectures that don't support WC memory,
 500          * mask out the WC flag from the BO
 501          */
 502         if (!drm_arch_can_wc_memory())
 503                 return false;
 504 
 505         return true;
 506 #endif
 507 }
 508 
 509 static int amdgpu_bo_do_create(struct amdgpu_device *adev,
 510                                struct amdgpu_bo_param *bp,
 511                                struct amdgpu_bo **bo_ptr)
 512 {
 513         struct ttm_operation_ctx ctx = {
 514                 .interruptible = (bp->type != ttm_bo_type_kernel),
 515                 .no_wait_gpu = false,
 516                 .resv = bp->resv,
 517                 .flags = bp->type != ttm_bo_type_kernel ?
 518                         TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
 519         };
 520         struct amdgpu_bo *bo;
 521         unsigned long page_align, size = bp->size;
 522         size_t acc_size;
 523         int r;
 524 
 525         /* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
 526         if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
 527                 /* GWS and OA don't need any alignment. */
 528                 page_align = bp->byte_align;
 529                 size <<= PAGE_SHIFT;
 530         } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) {
 531                 /* Both size and alignment must be a multiple of 4. */
 532                 page_align = ALIGN(bp->byte_align, 4);
 533                 size = ALIGN(size, 4) << PAGE_SHIFT;
 534         } else {
 535                 /* Memory should be aligned at least to a page size. */
 536                 page_align = ALIGN(bp->byte_align, PAGE_SIZE) >> PAGE_SHIFT;
 537                 size = ALIGN(size, PAGE_SIZE);
 538         }
 539 
 540         if (!amdgpu_bo_validate_size(adev, size, bp->domain))
 541                 return -ENOMEM;
 542 
 543         *bo_ptr = NULL;
 544 
 545         acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
 546                                        sizeof(struct amdgpu_bo));
 547 
 548         bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
 549         if (bo == NULL)
 550                 return -ENOMEM;
 551         drm_gem_private_object_init(adev->ddev, &bo->tbo.base, size);
 552         INIT_LIST_HEAD(&bo->shadow_list);
 553         bo->vm_bo = NULL;
 554         bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
 555                 bp->domain;
 556         bo->allowed_domains = bo->preferred_domains;
 557         if (bp->type != ttm_bo_type_kernel &&
 558             bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
 559                 bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
 560 
 561         bo->flags = bp->flags;
 562 
 563         if (!amdgpu_bo_support_uswc(bo->flags))
 564                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 565 
 566         bo->tbo.bdev = &adev->mman.bdev;
 567         if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
 568                           AMDGPU_GEM_DOMAIN_GDS))
 569                 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
 570         else
 571                 amdgpu_bo_placement_from_domain(bo, bp->domain);
 572         if (bp->type == ttm_bo_type_kernel)
 573                 bo->tbo.priority = 1;
 574 
 575         r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
 576                                  &bo->placement, page_align, &ctx, acc_size,
 577                                  NULL, bp->resv, &amdgpu_bo_destroy);
 578         if (unlikely(r != 0))
 579                 return r;
 580 
 581         if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
 582             bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 583             bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
 584                 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
 585                                              ctx.bytes_moved);
 586         else
 587                 amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
 588 
 589         if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
 590             bo->tbo.mem.placement & TTM_PL_FLAG_VRAM) {
 591                 struct dma_fence *fence;
 592 
 593                 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
 594                 if (unlikely(r))
 595                         goto fail_unreserve;
 596 
 597                 amdgpu_bo_fence(bo, fence, false);
 598                 dma_fence_put(bo->tbo.moving);
 599                 bo->tbo.moving = dma_fence_get(fence);
 600                 dma_fence_put(fence);
 601         }
 602         if (!bp->resv)
 603                 amdgpu_bo_unreserve(bo);
 604         *bo_ptr = bo;
 605 
 606         trace_amdgpu_bo_create(bo);
 607 
 608         /* Treat CPU_ACCESS_REQUIRED only as a hint if given by UMD */
 609         if (bp->type == ttm_bo_type_device)
 610                 bo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 611 
 612         return 0;
 613 
 614 fail_unreserve:
 615         if (!bp->resv)
 616                 dma_resv_unlock(bo->tbo.base.resv);
 617         amdgpu_bo_unref(&bo);
 618         return r;
 619 }
 620 
 621 static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
 622                                    unsigned long size,
 623                                    struct amdgpu_bo *bo)
 624 {
 625         struct amdgpu_bo_param bp;
 626         int r;
 627 
 628         if (bo->shadow)
 629                 return 0;
 630 
 631         memset(&bp, 0, sizeof(bp));
 632         bp.size = size;
 633         bp.domain = AMDGPU_GEM_DOMAIN_GTT;
 634         bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
 635                 AMDGPU_GEM_CREATE_SHADOW;
 636         bp.type = ttm_bo_type_kernel;
 637         bp.resv = bo->tbo.base.resv;
 638 
 639         r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
 640         if (!r) {
 641                 bo->shadow->parent = amdgpu_bo_ref(bo);
 642                 mutex_lock(&adev->shadow_list_lock);
 643                 list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
 644                 mutex_unlock(&adev->shadow_list_lock);
 645         }
 646 
 647         return r;
 648 }
 649 
 650 /**
 651  * amdgpu_bo_create - create an &amdgpu_bo buffer object
 652  * @adev: amdgpu device object
 653  * @bp: parameters to be used for the buffer object
 654  * @bo_ptr: pointer to the buffer object pointer
 655  *
 656  * Creates an &amdgpu_bo buffer object; and if requested, also creates a
 657  * shadow object.
 658  * Shadow object is used to backup the original buffer object, and is always
 659  * in GTT.
 660  *
 661  * Returns:
 662  * 0 for success or a negative error code on failure.
 663  */
 664 int amdgpu_bo_create(struct amdgpu_device *adev,
 665                      struct amdgpu_bo_param *bp,
 666                      struct amdgpu_bo **bo_ptr)
 667 {
 668         u64 flags = bp->flags;
 669         int r;
 670 
 671         bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
 672         r = amdgpu_bo_do_create(adev, bp, bo_ptr);
 673         if (r)
 674                 return r;
 675 
 676         if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
 677                 if (!bp->resv)
 678                         WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
 679                                                         NULL));
 680 
 681                 r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
 682 
 683                 if (!bp->resv)
 684                         dma_resv_unlock((*bo_ptr)->tbo.base.resv);
 685 
 686                 if (r)
 687                         amdgpu_bo_unref(bo_ptr);
 688         }
 689 
 690         return r;
 691 }
 692 
 693 /**
 694  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
 695  * @bo: pointer to the buffer object
 696  *
 697  * Sets placement according to domain; and changes placement and caching
 698  * policy of the buffer object according to the placement.
 699  * This is used for validating shadow bos.  It calls ttm_bo_validate() to
 700  * make sure the buffer is resident where it needs to be.
 701  *
 702  * Returns:
 703  * 0 for success or a negative error code on failure.
 704  */
 705 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 706 {
 707         struct ttm_operation_ctx ctx = { false, false };
 708         uint32_t domain;
 709         int r;
 710 
 711         if (bo->pin_count)
 712                 return 0;
 713 
 714         domain = bo->preferred_domains;
 715 
 716 retry:
 717         amdgpu_bo_placement_from_domain(bo, domain);
 718         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 719         if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 720                 domain = bo->allowed_domains;
 721                 goto retry;
 722         }
 723 
 724         return r;
 725 }
 726 
 727 /**
 728  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
 729  *
 730  * @shadow: &amdgpu_bo shadow to be restored
 731  * @fence: dma_fence associated with the operation
 732  *
 733  * Copies a buffer object's shadow content back to the object.
 734  * This is used for recovering a buffer from its shadow in case of a gpu
 735  * reset where vram context may be lost.
 736  *
 737  * Returns:
 738  * 0 for success or a negative error code on failure.
 739  */
 740 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
 741 
 742 {
 743         struct amdgpu_device *adev = amdgpu_ttm_adev(shadow->tbo.bdev);
 744         struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
 745         uint64_t shadow_addr, parent_addr;
 746 
 747         shadow_addr = amdgpu_bo_gpu_offset(shadow);
 748         parent_addr = amdgpu_bo_gpu_offset(shadow->parent);
 749 
 750         return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
 751                                   amdgpu_bo_size(shadow), NULL, fence,
 752                                   true, false);
 753 }
 754 
 755 /**
 756  * amdgpu_bo_kmap - map an &amdgpu_bo buffer object
 757  * @bo: &amdgpu_bo buffer object to be mapped
 758  * @ptr: kernel virtual address to be returned
 759  *
 760  * Calls ttm_bo_kmap() to set up the kernel virtual mapping; calls
 761  * amdgpu_bo_kptr() to get the kernel virtual address.
 762  *
 763  * Returns:
 764  * 0 for success or a negative error code on failure.
 765  */
 766 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
 767 {
 768         void *kptr;
 769         long r;
 770 
 771         if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
 772                 return -EPERM;
 773 
 774         kptr = amdgpu_bo_kptr(bo);
 775         if (kptr) {
 776                 if (ptr)
 777                         *ptr = kptr;
 778                 return 0;
 779         }
 780 
 781         r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
 782                                                 MAX_SCHEDULE_TIMEOUT);
 783         if (r < 0)
 784                 return r;
 785 
 786         r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 787         if (r)
 788                 return r;
 789 
 790         if (ptr)
 791                 *ptr = amdgpu_bo_kptr(bo);
 792 
 793         return 0;
 794 }
 795 
 796 /**
 797  * amdgpu_bo_kptr - returns a kernel virtual address of the buffer object
 798  * @bo: &amdgpu_bo buffer object
 799  *
 800  * Calls ttm_kmap_obj_virtual() to get the kernel virtual address
 801  *
 802  * Returns:
 803  * the virtual address of a buffer object area.
 804  */
 805 void *amdgpu_bo_kptr(struct amdgpu_bo *bo)
 806 {
 807         bool is_iomem;
 808 
 809         return ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 810 }
 811 
 812 /**
 813  * amdgpu_bo_kunmap - unmap an &amdgpu_bo buffer object
 814  * @bo: &amdgpu_bo buffer object to be unmapped
 815  *
 816  * Unmaps a kernel map set up by amdgpu_bo_kmap().
 817  */
 818 void amdgpu_bo_kunmap(struct amdgpu_bo *bo)
 819 {
 820         if (bo->kmap.bo)
 821                 ttm_bo_kunmap(&bo->kmap);
 822 }
 823 
 824 /**
 825  * amdgpu_bo_ref - reference an &amdgpu_bo buffer object
 826  * @bo: &amdgpu_bo buffer object
 827  *
 828  * References the contained &ttm_buffer_object.
 829  *
 830  * Returns:
 831  * a refcounted pointer to the &amdgpu_bo buffer object.
 832  */
 833 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
 834 {
 835         if (bo == NULL)
 836                 return NULL;
 837 
 838         ttm_bo_get(&bo->tbo);
 839         return bo;
 840 }
 841 
 842 /**
 843  * amdgpu_bo_unref - unreference an &amdgpu_bo buffer object
 844  * @bo: &amdgpu_bo buffer object
 845  *
 846  * Unreferences the contained &ttm_buffer_object and clear the pointer
 847  */
 848 void amdgpu_bo_unref(struct amdgpu_bo **bo)
 849 {
 850         struct ttm_buffer_object *tbo;
 851 
 852         if ((*bo) == NULL)
 853                 return;
 854 
 855         tbo = &((*bo)->tbo);
 856         ttm_bo_put(tbo);
 857         *bo = NULL;
 858 }
 859 
 860 /**
 861  * amdgpu_bo_pin_restricted - pin an &amdgpu_bo buffer object
 862  * @bo: &amdgpu_bo buffer object to be pinned
 863  * @domain: domain to be pinned to
 864  * @min_offset: the start of requested address range
 865  * @max_offset: the end of requested address range
 866  *
 867  * Pins the buffer object according to requested domain and address range. If
 868  * the memory is unbound gart memory, binds the pages into gart table. Adjusts
 869  * pin_count and pin_size accordingly.
 870  *
 871  * Pinning means to lock pages in memory along with keeping them at a fixed
 872  * offset. It is required when a buffer can not be moved, for example, when
 873  * a display buffer is being scanned out.
 874  *
 875  * Compared with amdgpu_bo_pin(), this function gives more flexibility on
 876  * where to pin a buffer if there are specific restrictions on where a buffer
 877  * must be located.
 878  *
 879  * Returns:
 880  * 0 for success or a negative error code on failure.
 881  */
 882 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 883                              u64 min_offset, u64 max_offset)
 884 {
 885         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 886         struct ttm_operation_ctx ctx = { false, false };
 887         int r, i;
 888 
 889         if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
 890                 return -EPERM;
 891 
 892         if (WARN_ON_ONCE(min_offset > max_offset))
 893                 return -EINVAL;
 894 
 895         /* A shared bo cannot be migrated to VRAM */
 896         if (bo->prime_shared_count) {
 897                 if (domain & AMDGPU_GEM_DOMAIN_GTT)
 898                         domain = AMDGPU_GEM_DOMAIN_GTT;
 899                 else
 900                         return -EINVAL;
 901         }
 902 
 903         /* This assumes only APU display buffers are pinned with (VRAM|GTT).
 904          * See function amdgpu_display_supported_domains()
 905          */
 906         domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
 907 
 908         if (bo->pin_count) {
 909                 uint32_t mem_type = bo->tbo.mem.mem_type;
 910 
 911                 if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
 912                         return -EINVAL;
 913 
 914                 bo->pin_count++;
 915 
 916                 if (max_offset != 0) {
 917                         u64 domain_start = bo->tbo.bdev->man[mem_type].gpu_offset;
 918                         WARN_ON_ONCE(max_offset <
 919                                      (amdgpu_bo_gpu_offset(bo) - domain_start));
 920                 }
 921 
 922                 return 0;
 923         }
 924 
 925         bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 926         /* force to pin into visible video ram */
 927         if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
 928                 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 929         amdgpu_bo_placement_from_domain(bo, domain);
 930         for (i = 0; i < bo->placement.num_placement; i++) {
 931                 unsigned fpfn, lpfn;
 932 
 933                 fpfn = min_offset >> PAGE_SHIFT;
 934                 lpfn = max_offset >> PAGE_SHIFT;
 935 
 936                 if (fpfn > bo->placements[i].fpfn)
 937                         bo->placements[i].fpfn = fpfn;
 938                 if (!bo->placements[i].lpfn ||
 939                     (lpfn && lpfn < bo->placements[i].lpfn))
 940                         bo->placements[i].lpfn = lpfn;
 941                 bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 942         }
 943 
 944         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 945         if (unlikely(r)) {
 946                 dev_err(adev->dev, "%p pin failed\n", bo);
 947                 goto error;
 948         }
 949 
 950         bo->pin_count = 1;
 951 
 952         domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
 953         if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
 954                 atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
 955                 atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
 956                              &adev->visible_pin_size);
 957         } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
 958                 atomic64_add(amdgpu_bo_size(bo), &adev->gart_pin_size);
 959         }
 960 
 961 error:
 962         return r;
 963 }
 964 
 965 /**
 966  * amdgpu_bo_pin - pin an &amdgpu_bo buffer object
 967  * @bo: &amdgpu_bo buffer object to be pinned
 968  * @domain: domain to be pinned to
 969  *
 970  * A simple wrapper to amdgpu_bo_pin_restricted().
 971  * Provides a simpler API for buffers that do not have any strict restrictions
 972  * on where a buffer must be located.
 973  *
 974  * Returns:
 975  * 0 for success or a negative error code on failure.
 976  */
 977 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
 978 {
 979         return amdgpu_bo_pin_restricted(bo, domain, 0, 0);
 980 }
 981 
 982 /**
 983  * amdgpu_bo_unpin - unpin an &amdgpu_bo buffer object
 984  * @bo: &amdgpu_bo buffer object to be unpinned
 985  *
 986  * Decreases the pin_count, and clears the flags if pin_count reaches 0.
 987  * Changes placement and pin size accordingly.
 988  *
 989  * Returns:
 990  * 0 for success or a negative error code on failure.
 991  */
 992 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 993 {
 994         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 995         struct ttm_operation_ctx ctx = { false, false };
 996         int r, i;
 997 
 998         if (WARN_ON_ONCE(!bo->pin_count)) {
 999                 dev_warn(adev->dev, "%p unpin not necessary\n", bo);
1000                 return 0;
1001         }
1002         bo->pin_count--;
1003         if (bo->pin_count)
1004                 return 0;
1005 
1006         amdgpu_bo_subtract_pin_size(bo);
1007 
1008         for (i = 0; i < bo->placement.num_placement; i++) {
1009                 bo->placements[i].lpfn = 0;
1010                 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
1011         }
1012         r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
1013         if (unlikely(r))
1014                 dev_err(adev->dev, "%p validate failed for unpin\n", bo);
1015 
1016         return r;
1017 }
1018 
1019 /**
1020  * amdgpu_bo_evict_vram - evict VRAM buffers
1021  * @adev: amdgpu device object
1022  *
1023  * Evicts all VRAM buffers on the lru list of the memory type.
1024  * Mainly used for evicting vram at suspend time.
1025  *
1026  * Returns:
1027  * 0 for success or a negative error code on failure.
1028  */
1029 int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
1030 {
1031         /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
1032 #ifndef CONFIG_HIBERNATION
1033         if (adev->flags & AMD_IS_APU) {
1034                 /* Useless to evict on IGP chips */
1035                 return 0;
1036         }
1037 #endif
1038         return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
1039 }
1040 
1041 static const char *amdgpu_vram_names[] = {
1042         "UNKNOWN",
1043         "GDDR1",
1044         "DDR2",
1045         "GDDR3",
1046         "GDDR4",
1047         "GDDR5",
1048         "HBM",
1049         "DDR3",
1050         "DDR4",
1051         "GDDR6",
1052 };
1053 
1054 /**
1055  * amdgpu_bo_init - initialize memory manager
1056  * @adev: amdgpu device object
1057  *
1058  * Calls amdgpu_ttm_init() to initialize amdgpu memory manager.
1059  *
1060  * Returns:
1061  * 0 for success or a negative error code on failure.
1062  */
1063 int amdgpu_bo_init(struct amdgpu_device *adev)
1064 {
1065         /* reserve PAT memory space to WC for VRAM */
1066         arch_io_reserve_memtype_wc(adev->gmc.aper_base,
1067                                    adev->gmc.aper_size);
1068 
1069         /* Add an MTRR for the VRAM */
1070         adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
1071                                               adev->gmc.aper_size);
1072         DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
1073                  adev->gmc.mc_vram_size >> 20,
1074                  (unsigned long long)adev->gmc.aper_size >> 20);
1075         DRM_INFO("RAM width %dbits %s\n",
1076                  adev->gmc.vram_width, amdgpu_vram_names[adev->gmc.vram_type]);
1077         return amdgpu_ttm_init(adev);
1078 }
1079 
1080 /**
1081  * amdgpu_bo_late_init - late init
1082  * @adev: amdgpu device object
1083  *
1084  * Calls amdgpu_ttm_late_init() to free resources used earlier during
1085  * initialization.
1086  *
1087  * Returns:
1088  * 0 for success or a negative error code on failure.
1089  */
1090 int amdgpu_bo_late_init(struct amdgpu_device *adev)
1091 {
1092         amdgpu_ttm_late_init(adev);
1093 
1094         return 0;
1095 }
1096 
1097 /**
1098  * amdgpu_bo_fini - tear down memory manager
1099  * @adev: amdgpu device object
1100  *
1101  * Reverses amdgpu_bo_init() to tear down memory manager.
1102  */
1103 void amdgpu_bo_fini(struct amdgpu_device *adev)
1104 {
1105         amdgpu_ttm_fini(adev);
1106         arch_phys_wc_del(adev->gmc.vram_mtrr);
1107         arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
1108 }
1109 
1110 /**
1111  * amdgpu_bo_fbdev_mmap - mmap fbdev memory
1112  * @bo: &amdgpu_bo buffer object
1113  * @vma: vma as input from the fbdev mmap method
1114  *
1115  * Calls ttm_fbdev_mmap() to mmap fbdev memory if it is backed by a bo.
1116  *
1117  * Returns:
1118  * 0 for success or a negative error code on failure.
1119  */
1120 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
1121                              struct vm_area_struct *vma)
1122 {
1123         return ttm_fbdev_mmap(vma, &bo->tbo);
1124 }
1125 
1126 /**
1127  * amdgpu_bo_set_tiling_flags - set tiling flags
1128  * @bo: &amdgpu_bo buffer object
1129  * @tiling_flags: new flags
1130  *
1131  * Sets buffer object's tiling flags with the new one. Used by GEM ioctl or
1132  * kernel driver to set the tiling flags on a buffer.
1133  *
1134  * Returns:
1135  * 0 for success or a negative error code on failure.
1136  */
1137 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
1138 {
1139         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1140 
1141         if (adev->family <= AMDGPU_FAMILY_CZ &&
1142             AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT) > 6)
1143                 return -EINVAL;
1144 
1145         bo->tiling_flags = tiling_flags;
1146         return 0;
1147 }
1148 
1149 /**
1150  * amdgpu_bo_get_tiling_flags - get tiling flags
1151  * @bo: &amdgpu_bo buffer object
1152  * @tiling_flags: returned flags
1153  *
1154  * Gets buffer object's tiling flags. Used by GEM ioctl or kernel driver to
1155  * set the tiling flags on a buffer.
1156  */
1157 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
1158 {
1159         dma_resv_assert_held(bo->tbo.base.resv);
1160 
1161         if (tiling_flags)
1162                 *tiling_flags = bo->tiling_flags;
1163 }
1164 
1165 /**
1166  * amdgpu_bo_set_metadata - set metadata
1167  * @bo: &amdgpu_bo buffer object
1168  * @metadata: new metadata
1169  * @metadata_size: size of the new metadata
1170  * @flags: flags of the new metadata
1171  *
1172  * Sets buffer object's metadata, its size and flags.
1173  * Used via GEM ioctl.
1174  *
1175  * Returns:
1176  * 0 for success or a negative error code on failure.
1177  */
1178 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
1179                             uint32_t metadata_size, uint64_t flags)
1180 {
1181         void *buffer;
1182 
1183         if (!metadata_size) {
1184                 if (bo->metadata_size) {
1185                         kfree(bo->metadata);
1186                         bo->metadata = NULL;
1187                         bo->metadata_size = 0;
1188                 }
1189                 return 0;
1190         }
1191 
1192         if (metadata == NULL)
1193                 return -EINVAL;
1194 
1195         buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
1196         if (buffer == NULL)
1197                 return -ENOMEM;
1198 
1199         kfree(bo->metadata);
1200         bo->metadata_flags = flags;
1201         bo->metadata = buffer;
1202         bo->metadata_size = metadata_size;
1203 
1204         return 0;
1205 }
1206 
1207 /**
1208  * amdgpu_bo_get_metadata - get metadata
1209  * @bo: &amdgpu_bo buffer object
1210  * @buffer: returned metadata
1211  * @buffer_size: size of the buffer
1212  * @metadata_size: size of the returned metadata
1213  * @flags: flags of the returned metadata
1214  *
1215  * Gets buffer object's metadata, its size and flags. buffer_size shall not be
1216  * less than metadata_size.
1217  * Used via GEM ioctl.
1218  *
1219  * Returns:
1220  * 0 for success or a negative error code on failure.
1221  */
1222 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
1223                            size_t buffer_size, uint32_t *metadata_size,
1224                            uint64_t *flags)
1225 {
1226         if (!buffer && !metadata_size)
1227                 return -EINVAL;
1228 
1229         if (buffer) {
1230                 if (buffer_size < bo->metadata_size)
1231                         return -EINVAL;
1232 
1233                 if (bo->metadata_size)
1234                         memcpy(buffer, bo->metadata, bo->metadata_size);
1235         }
1236 
1237         if (metadata_size)
1238                 *metadata_size = bo->metadata_size;
1239         if (flags)
1240                 *flags = bo->metadata_flags;
1241 
1242         return 0;
1243 }
1244 
1245 /**
1246  * amdgpu_bo_move_notify - notification about a memory move
1247  * @bo: pointer to a buffer object
1248  * @evict: if this move is evicting the buffer from the graphics address space
1249  * @new_mem: new information of the bufer object
1250  *
1251  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
1252  * bookkeeping.
1253  * TTM driver callback which is called when ttm moves a buffer.
1254  */
1255 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
1256                            bool evict,
1257                            struct ttm_mem_reg *new_mem)
1258 {
1259         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1260         struct amdgpu_bo *abo;
1261         struct ttm_mem_reg *old_mem = &bo->mem;
1262 
1263         if (!amdgpu_bo_is_amdgpu_bo(bo))
1264                 return;
1265 
1266         abo = ttm_to_amdgpu_bo(bo);
1267         amdgpu_vm_bo_invalidate(adev, abo, evict);
1268 
1269         amdgpu_bo_kunmap(abo);
1270 
1271         /* remember the eviction */
1272         if (evict)
1273                 atomic64_inc(&adev->num_evictions);
1274 
1275         /* update statistics */
1276         if (!new_mem)
1277                 return;
1278 
1279         /* move_notify is called before move happens */
1280         trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
1281 }
1282 
1283 /**
1284  * amdgpu_bo_move_notify - notification about a BO being released
1285  * @bo: pointer to a buffer object
1286  *
1287  * Wipes VRAM buffers whose contents should not be leaked before the
1288  * memory is released.
1289  */
1290 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
1291 {
1292         struct dma_fence *fence = NULL;
1293         struct amdgpu_bo *abo;
1294         int r;
1295 
1296         if (!amdgpu_bo_is_amdgpu_bo(bo))
1297                 return;
1298 
1299         abo = ttm_to_amdgpu_bo(bo);
1300 
1301         if (abo->kfd_bo)
1302                 amdgpu_amdkfd_unreserve_memory_limit(abo);
1303 
1304         if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
1305             !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
1306                 return;
1307 
1308         dma_resv_lock(bo->base.resv, NULL);
1309 
1310         r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
1311         if (!WARN_ON(r)) {
1312                 amdgpu_bo_fence(abo, fence, false);
1313                 dma_fence_put(fence);
1314         }
1315 
1316         dma_resv_unlock(bo->base.resv);
1317 }
1318 
1319 /**
1320  * amdgpu_bo_fault_reserve_notify - notification about a memory fault
1321  * @bo: pointer to a buffer object
1322  *
1323  * Notifies the driver we are taking a fault on this BO and have reserved it,
1324  * also performs bookkeeping.
1325  * TTM driver callback for dealing with vm faults.
1326  *
1327  * Returns:
1328  * 0 for success or a negative error code on failure.
1329  */
1330 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
1331 {
1332         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
1333         struct ttm_operation_ctx ctx = { false, false };
1334         struct amdgpu_bo *abo;
1335         unsigned long offset, size;
1336         int r;
1337 
1338         if (!amdgpu_bo_is_amdgpu_bo(bo))
1339                 return 0;
1340 
1341         abo = ttm_to_amdgpu_bo(bo);
1342 
1343         /* Remember that this BO was accessed by the CPU */
1344         abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
1345 
1346         if (bo->mem.mem_type != TTM_PL_VRAM)
1347                 return 0;
1348 
1349         size = bo->mem.num_pages << PAGE_SHIFT;
1350         offset = bo->mem.start << PAGE_SHIFT;
1351         if ((offset + size) <= adev->gmc.visible_vram_size)
1352                 return 0;
1353 
1354         /* Can't move a pinned BO to visible VRAM */
1355         if (abo->pin_count > 0)
1356                 return -EINVAL;
1357 
1358         /* hurrah the memory is not visible ! */
1359         atomic64_inc(&adev->num_vram_cpu_page_faults);
1360         amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
1361                                         AMDGPU_GEM_DOMAIN_GTT);
1362 
1363         /* Avoid costly evictions; only set GTT as a busy placement */
1364         abo->placement.num_busy_placement = 1;
1365         abo->placement.busy_placement = &abo->placements[1];
1366 
1367         r = ttm_bo_validate(bo, &abo->placement, &ctx);
1368         if (unlikely(r != 0))
1369                 return r;
1370 
1371         offset = bo->mem.start << PAGE_SHIFT;
1372         /* this should never happen */
1373         if (bo->mem.mem_type == TTM_PL_VRAM &&
1374             (offset + size) > adev->gmc.visible_vram_size)
1375                 return -EINVAL;
1376 
1377         return 0;
1378 }
1379 
1380 /**
1381  * amdgpu_bo_fence - add fence to buffer object
1382  *
1383  * @bo: buffer object in question
1384  * @fence: fence to add
1385  * @shared: true if fence should be added shared
1386  *
1387  */
1388 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
1389                      bool shared)
1390 {
1391         struct dma_resv *resv = bo->tbo.base.resv;
1392 
1393         if (shared)
1394                 dma_resv_add_shared_fence(resv, fence);
1395         else
1396                 dma_resv_add_excl_fence(resv, fence);
1397 }
1398 
1399 /**
1400  * amdgpu_sync_wait_resv - Wait for BO reservation fences
1401  *
1402  * @bo: buffer object
1403  * @owner: fence owner
1404  * @intr: Whether the wait is interruptible
1405  *
1406  * Returns:
1407  * 0 on success, errno otherwise.
1408  */
1409 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
1410 {
1411         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
1412         struct amdgpu_sync sync;
1413         int r;
1414 
1415         amdgpu_sync_create(&sync);
1416         amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
1417         r = amdgpu_sync_wait(&sync, intr);
1418         amdgpu_sync_free(&sync);
1419 
1420         return r;
1421 }
1422 
1423 /**
1424  * amdgpu_bo_gpu_offset - return GPU offset of bo
1425  * @bo: amdgpu object for which we query the offset
1426  *
1427  * Note: object should either be pinned or reserved when calling this
1428  * function, it might be useful to add check for this for debugging.
1429  *
1430  * Returns:
1431  * current GPU offset of the object.
1432  */
1433 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
1434 {
1435         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
1436         WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
1437                      !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
1438         WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
1439         WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
1440                      !(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
1441 
1442         return amdgpu_gmc_sign_extend(bo->tbo.offset);
1443 }
1444 
1445 /**
1446  * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
1447  * @adev: amdgpu device object
1448  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
1449  *
1450  * Returns:
1451  * Which of the allowed domains is preferred for pinning the BO for scanout.
1452  */
1453 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
1454                                             uint32_t domain)
1455 {
1456         if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
1457                 domain = AMDGPU_GEM_DOMAIN_VRAM;
1458                 if (adev->gmc.real_vram_size <= AMDGPU_SG_THRESHOLD)
1459                         domain = AMDGPU_GEM_DOMAIN_GTT;
1460         }
1461         return domain;
1462 }

/* [<][>][^][v][top][bottom][index][help] */