root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ttm_to_amdgpu_bo
  2. amdgpu_mem_type_to_domain
  3. amdgpu_bo_reserve
  4. amdgpu_bo_unreserve
  5. amdgpu_bo_size
  6. amdgpu_bo_ngpu_pages
  7. amdgpu_bo_gpu_page_alignment
  8. amdgpu_bo_mmap_offset
  9. amdgpu_bo_in_cpu_visible_vram
  10. amdgpu_bo_explicit_sync
  11. amdgpu_sa_bo_gpu_addr
  12. amdgpu_sa_bo_cpu_addr

   1 /*
   2  * Copyright 2008 Advanced Micro Devices, Inc.
   3  * Copyright 2008 Red Hat Inc.
   4  * Copyright 2009 Jerome Glisse.
   5  *
   6  * Permission is hereby granted, free of charge, to any person obtaining a
   7  * copy of this software and associated documentation files (the "Software"),
   8  * to deal in the Software without restriction, including without limitation
   9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10  * and/or sell copies of the Software, and to permit persons to whom the
  11  * Software is furnished to do so, subject to the following conditions:
  12  *
  13  * The above copyright notice and this permission notice shall be included in
  14  * all copies or substantial portions of the Software.
  15  *
  16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22  * OTHER DEALINGS IN THE SOFTWARE.
  23  *
  24  * Authors: Dave Airlie
  25  *          Alex Deucher
  26  *          Jerome Glisse
  27  */
  28 #ifndef __AMDGPU_OBJECT_H__
  29 #define __AMDGPU_OBJECT_H__
  30 
  31 #include <drm/amdgpu_drm.h>
  32 #include "amdgpu.h"
  33 
  34 #define AMDGPU_BO_INVALID_OFFSET        LONG_MAX
  35 #define AMDGPU_BO_MAX_PLACEMENTS        3
  36 
  37 struct amdgpu_bo_param {
  38         unsigned long                   size;
  39         int                             byte_align;
  40         u32                             domain;
  41         u32                             preferred_domain;
  42         u64                             flags;
  43         enum ttm_bo_type                type;
  44         struct dma_resv *resv;
  45 };
  46 
  47 /* bo virtual addresses in a vm */
  48 struct amdgpu_bo_va_mapping {
  49         struct amdgpu_bo_va             *bo_va;
  50         struct list_head                list;
  51         struct rb_node                  rb;
  52         uint64_t                        start;
  53         uint64_t                        last;
  54         uint64_t                        __subtree_last;
  55         uint64_t                        offset;
  56         uint64_t                        flags;
  57 };
  58 
  59 /* User space allocated BO in a VM */
  60 struct amdgpu_bo_va {
  61         struct amdgpu_vm_bo_base        base;
  62 
  63         /* protected by bo being reserved */
  64         unsigned                        ref_count;
  65 
  66         /* all other members protected by the VM PD being reserved */
  67         struct dma_fence                *last_pt_update;
  68 
  69         /* mappings for this bo_va */
  70         struct list_head                invalids;
  71         struct list_head                valids;
  72 
  73         /* If the mappings are cleared or filled */
  74         bool                            cleared;
  75 
  76         bool                            is_xgmi;
  77 };
  78 
  79 struct amdgpu_bo {
  80         /* Protected by tbo.reserved */
  81         u32                             preferred_domains;
  82         u32                             allowed_domains;
  83         struct ttm_place                placements[AMDGPU_BO_MAX_PLACEMENTS];
  84         struct ttm_placement            placement;
  85         struct ttm_buffer_object        tbo;
  86         struct ttm_bo_kmap_obj          kmap;
  87         u64                             flags;
  88         unsigned                        pin_count;
  89         u64                             tiling_flags;
  90         u64                             metadata_flags;
  91         void                            *metadata;
  92         u32                             metadata_size;
  93         unsigned                        prime_shared_count;
  94         /* per VM structure for page tables and with virtual addresses */
  95         struct amdgpu_vm_bo_base        *vm_bo;
  96         /* Constant after initialization */
  97         struct amdgpu_bo                *parent;
  98         struct amdgpu_bo                *shadow;
  99 
 100         struct ttm_bo_kmap_obj          dma_buf_vmap;
 101         struct amdgpu_mn                *mn;
 102 
 103         union {
 104                 struct list_head        mn_list;
 105                 struct list_head        shadow_list;
 106         };
 107 
 108         struct kgd_mem                  *kfd_bo;
 109 };
 110 
 111 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
 112 {
 113         return container_of(tbo, struct amdgpu_bo, tbo);
 114 }
 115 
 116 /**
 117  * amdgpu_mem_type_to_domain - return domain corresponding to mem_type
 118  * @mem_type:   ttm memory type
 119  *
 120  * Returns corresponding domain of the ttm mem_type
 121  */
 122 static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type)
 123 {
 124         switch (mem_type) {
 125         case TTM_PL_VRAM:
 126                 return AMDGPU_GEM_DOMAIN_VRAM;
 127         case TTM_PL_TT:
 128                 return AMDGPU_GEM_DOMAIN_GTT;
 129         case TTM_PL_SYSTEM:
 130                 return AMDGPU_GEM_DOMAIN_CPU;
 131         case AMDGPU_PL_GDS:
 132                 return AMDGPU_GEM_DOMAIN_GDS;
 133         case AMDGPU_PL_GWS:
 134                 return AMDGPU_GEM_DOMAIN_GWS;
 135         case AMDGPU_PL_OA:
 136                 return AMDGPU_GEM_DOMAIN_OA;
 137         default:
 138                 break;
 139         }
 140         return 0;
 141 }
 142 
 143 /**
 144  * amdgpu_bo_reserve - reserve bo
 145  * @bo:         bo structure
 146  * @no_intr:    don't return -ERESTARTSYS on pending signal
 147  *
 148  * Returns:
 149  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 150  * a signal. Release all buffer reservations and return to user-space.
 151  */
 152 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr)
 153 {
 154         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 155         int r;
 156 
 157         r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
 158         if (unlikely(r != 0)) {
 159                 if (r != -ERESTARTSYS)
 160                         dev_err(adev->dev, "%p reserve failed\n", bo);
 161                 return r;
 162         }
 163         return 0;
 164 }
 165 
 166 static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo)
 167 {
 168         ttm_bo_unreserve(&bo->tbo);
 169 }
 170 
 171 static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo)
 172 {
 173         return bo->tbo.num_pages << PAGE_SHIFT;
 174 }
 175 
 176 static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
 177 {
 178         return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
 179 }
 180 
 181 static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
 182 {
 183         return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
 184 }
 185 
 186 /**
 187  * amdgpu_bo_mmap_offset - return mmap offset of bo
 188  * @bo: amdgpu object for which we query the offset
 189  *
 190  * Returns mmap offset of the object.
 191  */
 192 static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo)
 193 {
 194         return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
 195 }
 196 
 197 /**
 198  * amdgpu_bo_in_cpu_visible_vram - check if BO is (partly) in visible VRAM
 199  */
 200 static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
 201 {
 202         struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
 203         unsigned fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
 204         struct drm_mm_node *node = bo->tbo.mem.mm_node;
 205         unsigned long pages_left;
 206 
 207         if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
 208                 return false;
 209 
 210         for (pages_left = bo->tbo.mem.num_pages; pages_left;
 211              pages_left -= node->size, node++)
 212                 if (node->start < fpfn)
 213                         return true;
 214 
 215         return false;
 216 }
 217 
 218 /**
 219  * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced
 220  */
 221 static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
 222 {
 223         return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
 224 }
 225 
 226 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 227 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 228 
 229 int amdgpu_bo_create(struct amdgpu_device *adev,
 230                      struct amdgpu_bo_param *bp,
 231                      struct amdgpu_bo **bo_ptr);
 232 int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
 233                               unsigned long size, int align,
 234                               u32 domain, struct amdgpu_bo **bo_ptr,
 235                               u64 *gpu_addr, void **cpu_addr);
 236 int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
 237                             unsigned long size, int align,
 238                             u32 domain, struct amdgpu_bo **bo_ptr,
 239                             u64 *gpu_addr, void **cpu_addr);
 240 int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 241                                uint64_t offset, uint64_t size, uint32_t domain,
 242                                struct amdgpu_bo **bo_ptr, void **cpu_addr);
 243 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
 244                            void **cpu_addr);
 245 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 246 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
 247 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
 248 struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo);
 249 void amdgpu_bo_unref(struct amdgpu_bo **bo);
 250 int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
 251 int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 252                              u64 min_offset, u64 max_offset);
 253 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
 254 int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
 255 int amdgpu_bo_init(struct amdgpu_device *adev);
 256 int amdgpu_bo_late_init(struct amdgpu_device *adev);
 257 void amdgpu_bo_fini(struct amdgpu_device *adev);
 258 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
 259                                 struct vm_area_struct *vma);
 260 int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
 261 void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags);
 262 int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
 263                             uint32_t metadata_size, uint64_t flags);
 264 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 265                            size_t buffer_size, uint32_t *metadata_size,
 266                            uint64_t *flags);
 267 void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 268                            bool evict,
 269                            struct ttm_mem_reg *new_mem);
 270 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
 271 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 272 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 273                      bool shared);
 274 int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
 275 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
 276 int amdgpu_bo_validate(struct amdgpu_bo *bo);
 277 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
 278                              struct dma_fence **fence);
 279 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
 280                                             uint32_t domain);
 281 
 282 /*
 283  * sub allocation
 284  */
 285 
 286 static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
 287 {
 288         return sa_bo->manager->gpu_addr + sa_bo->soffset;
 289 }
 290 
 291 static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
 292 {
 293         return sa_bo->manager->cpu_ptr + sa_bo->soffset;
 294 }
 295 
 296 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
 297                                      struct amdgpu_sa_manager *sa_manager,
 298                                      unsigned size, u32 align, u32 domain);
 299 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
 300                                       struct amdgpu_sa_manager *sa_manager);
 301 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
 302                                       struct amdgpu_sa_manager *sa_manager);
 303 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 304                      struct amdgpu_sa_bo **sa_bo,
 305                      unsigned size, unsigned align);
 306 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 307                               struct amdgpu_sa_bo **sa_bo,
 308                               struct dma_fence *fence);
 309 #if defined(CONFIG_DEBUG_FS)
 310 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
 311                                          struct seq_file *m);
 312 #endif
 313 
 314 bool amdgpu_bo_support_uswc(u64 bo_flags);
 315 
 316 
 317 #endif

/* [<][>][^][v][top][bottom][index][help] */