root/drivers/gpu/drm/i915/gem/i915_gem_object.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. i915_gem_object_lookup_rcu
  2. i915_gem_object_lookup
  3. i915_gem_object_get
  4. i915_gem_object_put
  5. i915_gem_object_lock
  6. i915_gem_object_lock_interruptible
  7. i915_gem_object_unlock
  8. i915_gem_object_set_readonly
  9. i915_gem_object_is_readonly
  10. i915_gem_object_has_struct_page
  11. i915_gem_object_is_shrinkable
  12. i915_gem_object_is_proxy
  13. i915_gem_object_never_bind_ggtt
  14. i915_gem_object_needs_async_cancel
  15. i915_gem_object_is_framebuffer
  16. i915_gem_object_get_tiling
  17. i915_gem_object_is_tiled
  18. i915_gem_object_get_stride
  19. i915_gem_tile_height
  20. i915_gem_object_get_tile_height
  21. i915_gem_object_get_tile_row_size
  22. i915_gem_object_pin_pages
  23. i915_gem_object_has_pages
  24. __i915_gem_object_pin_pages
  25. i915_gem_object_has_pinned_pages
  26. __i915_gem_object_unpin_pages
  27. i915_gem_object_unpin_pages
  28. i915_gem_object_flush_map
  29. i915_gem_object_unpin_map
  30. i915_gem_object_finish_access
  31. i915_gem_object_last_write_engine
  32. cpu_write_needs_clflush
  33. __start_cpu_write

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2016 Intel Corporation
   5  */
   6 
   7 #ifndef __I915_GEM_OBJECT_H__
   8 #define __I915_GEM_OBJECT_H__
   9 
  10 #include <drm/drm_gem.h>
  11 #include <drm/drm_file.h>
  12 #include <drm/drm_device.h>
  13 
  14 #include <drm/i915_drm.h>
  15 
  16 #include "i915_gem_object_types.h"
  17 
  18 #include "i915_gem_gtt.h"
  19 
  20 void i915_gem_init__objects(struct drm_i915_private *i915);
  21 
  22 struct drm_i915_gem_object *i915_gem_object_alloc(void);
  23 void i915_gem_object_free(struct drm_i915_gem_object *obj);
  24 
  25 void i915_gem_object_init(struct drm_i915_gem_object *obj,
  26                           const struct drm_i915_gem_object_ops *ops);
  27 struct drm_i915_gem_object *
  28 i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size);
  29 struct drm_i915_gem_object *
  30 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
  31                                        const void *data, size_t size);
  32 
  33 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
  34 void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
  35                                      struct sg_table *pages,
  36                                      bool needs_clflush);
  37 
  38 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
  39 
  40 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
  41 void i915_gem_free_object(struct drm_gem_object *obj);
  42 
  43 void i915_gem_flush_free_objects(struct drm_i915_private *i915);
  44 
  45 struct sg_table *
  46 __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
  47 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
  48 
  49 /**
  50  * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
  51  * @filp: DRM file private date
  52  * @handle: userspace handle
  53  *
  54  * Returns:
  55  *
  56  * A pointer to the object named by the handle if such exists on @filp, NULL
  57  * otherwise. This object is only valid whilst under the RCU read lock, and
  58  * note carefully the object may be in the process of being destroyed.
  59  */
  60 static inline struct drm_i915_gem_object *
  61 i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
  62 {
  63 #ifdef CONFIG_LOCKDEP
  64         WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
  65 #endif
  66         return idr_find(&file->object_idr, handle);
  67 }
  68 
  69 static inline struct drm_i915_gem_object *
  70 i915_gem_object_lookup(struct drm_file *file, u32 handle)
  71 {
  72         struct drm_i915_gem_object *obj;
  73 
  74         rcu_read_lock();
  75         obj = i915_gem_object_lookup_rcu(file, handle);
  76         if (obj && !kref_get_unless_zero(&obj->base.refcount))
  77                 obj = NULL;
  78         rcu_read_unlock();
  79 
  80         return obj;
  81 }
  82 
  83 __deprecated
  84 struct drm_gem_object *
  85 drm_gem_object_lookup(struct drm_file *file, u32 handle);
  86 
  87 __attribute__((nonnull))
  88 static inline struct drm_i915_gem_object *
  89 i915_gem_object_get(struct drm_i915_gem_object *obj)
  90 {
  91         drm_gem_object_get(&obj->base);
  92         return obj;
  93 }
  94 
  95 __attribute__((nonnull))
  96 static inline void
  97 i915_gem_object_put(struct drm_i915_gem_object *obj)
  98 {
  99         __drm_gem_object_put(&obj->base);
 100 }
 101 
 102 #define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
 103 
 104 static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
 105 {
 106         dma_resv_lock(obj->base.resv, NULL);
 107 }
 108 
 109 static inline int
 110 i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
 111 {
 112         return dma_resv_lock_interruptible(obj->base.resv, NULL);
 113 }
 114 
 115 static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
 116 {
 117         dma_resv_unlock(obj->base.resv);
 118 }
 119 
 120 struct dma_fence *
 121 i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
 122 void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
 123                                   struct dma_fence *fence);
 124 
 125 static inline void
 126 i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
 127 {
 128         obj->base.vma_node.readonly = true;
 129 }
 130 
 131 static inline bool
 132 i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
 133 {
 134         return obj->base.vma_node.readonly;
 135 }
 136 
 137 static inline bool
 138 i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 139 {
 140         return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 141 }
 142 
 143 static inline bool
 144 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
 145 {
 146         return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE;
 147 }
 148 
 149 static inline bool
 150 i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
 151 {
 152         return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
 153 }
 154 
 155 static inline bool
 156 i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
 157 {
 158         return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
 159 }
 160 
 161 static inline bool
 162 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 163 {
 164         return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL;
 165 }
 166 
 167 static inline bool
 168 i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
 169 {
 170         return READ_ONCE(obj->frontbuffer);
 171 }
 172 
 173 static inline unsigned int
 174 i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
 175 {
 176         return obj->tiling_and_stride & TILING_MASK;
 177 }
 178 
 179 static inline bool
 180 i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
 181 {
 182         return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
 183 }
 184 
 185 static inline unsigned int
 186 i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
 187 {
 188         return obj->tiling_and_stride & STRIDE_MASK;
 189 }
 190 
 191 static inline unsigned int
 192 i915_gem_tile_height(unsigned int tiling)
 193 {
 194         GEM_BUG_ON(!tiling);
 195         return tiling == I915_TILING_Y ? 32 : 8;
 196 }
 197 
 198 static inline unsigned int
 199 i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
 200 {
 201         return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
 202 }
 203 
 204 static inline unsigned int
 205 i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
 206 {
 207         return (i915_gem_object_get_stride(obj) *
 208                 i915_gem_object_get_tile_height(obj));
 209 }
 210 
 211 int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
 212                                unsigned int tiling, unsigned int stride);
 213 
 214 struct scatterlist *
 215 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
 216                        unsigned int n, unsigned int *offset);
 217 
 218 struct page *
 219 i915_gem_object_get_page(struct drm_i915_gem_object *obj,
 220                          unsigned int n);
 221 
 222 struct page *
 223 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
 224                                unsigned int n);
 225 
 226 dma_addr_t
 227 i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
 228                                     unsigned long n,
 229                                     unsigned int *len);
 230 
 231 dma_addr_t
 232 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
 233                                 unsigned long n);
 234 
 235 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 236                                  struct sg_table *pages,
 237                                  unsigned int sg_page_sizes);
 238 
 239 int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 240 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 241 
 242 static inline int __must_check
 243 i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 244 {
 245         might_lock(&obj->mm.lock);
 246 
 247         if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
 248                 return 0;
 249 
 250         return __i915_gem_object_get_pages(obj);
 251 }
 252 
 253 static inline bool
 254 i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
 255 {
 256         return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
 257 }
 258 
 259 static inline void
 260 __i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
 261 {
 262         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 263 
 264         atomic_inc(&obj->mm.pages_pin_count);
 265 }
 266 
 267 static inline bool
 268 i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
 269 {
 270         return atomic_read(&obj->mm.pages_pin_count);
 271 }
 272 
 273 static inline void
 274 __i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 275 {
 276         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 277         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
 278 
 279         atomic_dec(&obj->mm.pages_pin_count);
 280 }
 281 
 282 static inline void
 283 i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
 284 {
 285         __i915_gem_object_unpin_pages(obj);
 286 }
 287 
 288 enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
 289         I915_MM_NORMAL = 0,
 290         I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
 291 };
 292 
 293 int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 294                                 enum i915_mm_subclass subclass);
 295 void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
 296 void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
 297 
 298 enum i915_map_type {
 299         I915_MAP_WB = 0,
 300         I915_MAP_WC,
 301 #define I915_MAP_OVERRIDE BIT(31)
 302         I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
 303         I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
 304 };
 305 
 306 /**
 307  * i915_gem_object_pin_map - return a contiguous mapping of the entire object
 308  * @obj: the object to map into kernel address space
 309  * @type: the type of mapping, used to select pgprot_t
 310  *
 311  * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 312  * pages and then returns a contiguous mapping of the backing storage into
 313  * the kernel address space. Based on the @type of mapping, the PTE will be
 314  * set to either WriteBack or WriteCombine (via pgprot_t).
 315  *
 316  * The caller is responsible for calling i915_gem_object_unpin_map() when the
 317  * mapping is no longer required.
 318  *
 319  * Returns the pointer through which to access the mapped object, or an
 320  * ERR_PTR() on error.
 321  */
 322 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 323                                            enum i915_map_type type);
 324 
 325 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
 326                                  unsigned long offset,
 327                                  unsigned long size);
 328 static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
 329 {
 330         __i915_gem_object_flush_map(obj, 0, obj->base.size);
 331 }
 332 
 333 /**
 334  * i915_gem_object_unpin_map - releases an earlier mapping
 335  * @obj: the object to unmap
 336  *
 337  * After pinning the object and mapping its pages, once you are finished
 338  * with your access, call i915_gem_object_unpin_map() to release the pin
 339  * upon the mapping. Once the pin count reaches zero, that mapping may be
 340  * removed.
 341  */
 342 static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 343 {
 344         i915_gem_object_unpin_pages(obj);
 345 }
 346 
 347 void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
 348 void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
 349 
 350 void
 351 i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
 352                                    unsigned int flush_domains);
 353 
 354 int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
 355                                  unsigned int *needs_clflush);
 356 int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
 357                                   unsigned int *needs_clflush);
 358 #define CLFLUSH_BEFORE  BIT(0)
 359 #define CLFLUSH_AFTER   BIT(1)
 360 #define CLFLUSH_FLAGS   (CLFLUSH_BEFORE | CLFLUSH_AFTER)
 361 
 362 static inline void
 363 i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
 364 {
 365         i915_gem_object_unpin_pages(obj);
 366         i915_gem_object_unlock(obj);
 367 }
 368 
 369 static inline struct intel_engine_cs *
 370 i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
 371 {
 372         struct intel_engine_cs *engine = NULL;
 373         struct dma_fence *fence;
 374 
 375         rcu_read_lock();
 376         fence = dma_resv_get_excl_rcu(obj->base.resv);
 377         rcu_read_unlock();
 378 
 379         if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
 380                 engine = to_request(fence)->engine;
 381         dma_fence_put(fence);
 382 
 383         return engine;
 384 }
 385 
 386 void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
 387                                          unsigned int cache_level);
 388 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
 389 
 390 int __must_check
 391 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
 392 int __must_check
 393 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
 394 int __must_check
 395 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
 396 struct i915_vma * __must_check
 397 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 398                                      u32 alignment,
 399                                      const struct i915_ggtt_view *view,
 400                                      unsigned int flags);
 401 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
 402 
 403 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
 404 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
 405 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);
 406 
 407 static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 408 {
 409         if (obj->cache_dirty)
 410                 return false;
 411 
 412         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
 413                 return true;
 414 
 415         return obj->pin_global; /* currently in use by HW, keep flushed */
 416 }
 417 
 418 static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
 419 {
 420         obj->read_domains = I915_GEM_DOMAIN_CPU;
 421         obj->write_domain = I915_GEM_DOMAIN_CPU;
 422         if (cpu_write_needs_clflush(obj))
 423                 obj->cache_dirty = true;
 424 }
 425 
 426 int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 427                          unsigned int flags,
 428                          long timeout);
 429 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 430                                   unsigned int flags,
 431                                   const struct i915_sched_attr *attr);
 432 #define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 433 
 434 #endif

/* [<][>][^][v][top][bottom][index][help] */