root/include/drm/ttm/ttm_bo_driver.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. ttm_flag_masked
  2. __ttm_bo_reserve
  3. ttm_bo_reserve
  4. ttm_bo_reserve_slowpath
  5. ttm_bo_unreserve

   1 /**************************************************************************
   2  *
   3  * Copyright (c) 2006-2009 Vmware, Inc., Palo Alto, CA., USA
   4  * All Rights Reserved.
   5  *
   6  * Permission is hereby granted, free of charge, to any person obtaining a
   7  * copy of this software and associated documentation files (the
   8  * "Software"), to deal in the Software without restriction, including
   9  * without limitation the rights to use, copy, modify, merge, publish,
  10  * distribute, sub license, and/or sell copies of the Software, and to
  11  * permit persons to whom the Software is furnished to do so, subject to
  12  * the following conditions:
  13  *
  14  * The above copyright notice and this permission notice (including the
  15  * next paragraph) shall be included in all copies or substantial portions
  16  * of the Software.
  17  *
  18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25  *
  26  **************************************************************************/
  27 /*
  28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29  */
  30 #ifndef _TTM_BO_DRIVER_H_
  31 #define _TTM_BO_DRIVER_H_
  32 
  33 #include <drm/drm_mm.h>
  34 #include <drm/drm_vma_manager.h>
  35 #include <linux/workqueue.h>
  36 #include <linux/fs.h>
  37 #include <linux/spinlock.h>
  38 #include <linux/dma-resv.h>
  39 
  40 #include "ttm_bo_api.h"
  41 #include "ttm_memory.h"
  42 #include "ttm_module.h"
  43 #include "ttm_placement.h"
  44 #include "ttm_tt.h"
  45 
  46 #define TTM_MAX_BO_PRIORITY     4U
  47 
  48 #define TTM_MEMTYPE_FLAG_FIXED         (1 << 0) /* Fixed (on-card) PCI memory */
  49 #define TTM_MEMTYPE_FLAG_MAPPABLE      (1 << 1) /* Memory mappable */
  50 #define TTM_MEMTYPE_FLAG_CMA           (1 << 3) /* Can't map aperture */
  51 
  52 struct ttm_mem_type_manager;
  53 
  54 struct ttm_mem_type_manager_func {
  55         /**
  56          * struct ttm_mem_type_manager member init
  57          *
  58          * @man: Pointer to a memory type manager.
  59          * @p_size: Implementation dependent, but typically the size of the
  60          * range to be managed in pages.
  61          *
  62          * Called to initialize a private range manager. The function is
  63          * expected to initialize the man::priv member.
  64          * Returns 0 on success, negative error code on failure.
  65          */
  66         int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
  67 
  68         /**
  69          * struct ttm_mem_type_manager member takedown
  70          *
  71          * @man: Pointer to a memory type manager.
  72          *
  73          * Called to undo the setup done in init. All allocated resources
  74          * should be freed.
  75          */
  76         int  (*takedown)(struct ttm_mem_type_manager *man);
  77 
  78         /**
  79          * struct ttm_mem_type_manager member get_node
  80          *
  81          * @man: Pointer to a memory type manager.
  82          * @bo: Pointer to the buffer object we're allocating space for.
  83          * @placement: Placement details.
  84          * @flags: Additional placement flags.
  85          * @mem: Pointer to a struct ttm_mem_reg to be filled in.
  86          *
  87          * This function should allocate space in the memory type managed
  88          * by @man. Placement details if
  89          * applicable are given by @placement. If successful,
  90          * @mem::mm_node should be set to a non-null value, and
  91          * @mem::start should be set to a value identifying the beginning
  92          * of the range allocated, and the function should return zero.
  93          * If the memory region accommodate the buffer object, @mem::mm_node
  94          * should be set to NULL, and the function should return 0.
  95          * If a system error occurred, preventing the request to be fulfilled,
  96          * the function should return a negative error code.
  97          *
  98          * Note that @mem::mm_node will only be dereferenced by
  99          * struct ttm_mem_type_manager functions and optionally by the driver,
 100          * which has knowledge of the underlying type.
 101          *
 102          * This function may not be called from within atomic context, so
 103          * an implementation can and must use either a mutex or a spinlock to
 104          * protect any data structures managing the space.
 105          */
 106         int  (*get_node)(struct ttm_mem_type_manager *man,
 107                          struct ttm_buffer_object *bo,
 108                          const struct ttm_place *place,
 109                          struct ttm_mem_reg *mem);
 110 
 111         /**
 112          * struct ttm_mem_type_manager member put_node
 113          *
 114          * @man: Pointer to a memory type manager.
 115          * @mem: Pointer to a struct ttm_mem_reg to be filled in.
 116          *
 117          * This function frees memory type resources previously allocated
 118          * and that are identified by @mem::mm_node and @mem::start. May not
 119          * be called from within atomic context.
 120          */
 121         void (*put_node)(struct ttm_mem_type_manager *man,
 122                          struct ttm_mem_reg *mem);
 123 
 124         /**
 125          * struct ttm_mem_type_manager member debug
 126          *
 127          * @man: Pointer to a memory type manager.
 128          * @printer: Prefix to be used in printout to identify the caller.
 129          *
 130          * This function is called to print out the state of the memory
 131          * type manager to aid debugging of out-of-memory conditions.
 132          * It may not be called from within atomic context.
 133          */
 134         void (*debug)(struct ttm_mem_type_manager *man,
 135                       struct drm_printer *printer);
 136 };
 137 
 138 /**
 139  * struct ttm_mem_type_manager
 140  *
 141  * @has_type: The memory type has been initialized.
 142  * @use_type: The memory type is enabled.
 143  * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
 144  * managed by this memory type.
 145  * @gpu_offset: If used, the GPU offset of the first managed page of
 146  * fixed memory or the first managed location in an aperture.
 147  * @size: Size of the managed region.
 148  * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
 149  * as defined in ttm_placement_common.h
 150  * @default_caching: The default caching policy used for a buffer object
 151  * placed in this memory type if the user doesn't provide one.
 152  * @func: structure pointer implementing the range manager. See above
 153  * @priv: Driver private closure for @func.
 154  * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
 155  * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
 156  * reserved by the TTM vm system.
 157  * @io_reserve_lru: Optional lru list for unreserving io mem regions.
 158  * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
 159  * @move_lock: lock for move fence
 160  * static information. bdev::driver::io_mem_free is never used.
 161  * @lru: The lru list for this memory type.
 162  * @move: The fence of the last pipelined move operation.
 163  *
 164  * This structure is used to identify and manage memory types for a device.
 165  * It's set up by the ttm_bo_driver::init_mem_type method.
 166  */
 167 
 168 
 169 
 170 struct ttm_mem_type_manager {
 171         struct ttm_bo_device *bdev;
 172 
 173         /*
 174          * No protection. Constant from start.
 175          */
 176 
 177         bool has_type;
 178         bool use_type;
 179         uint32_t flags;
 180         uint64_t gpu_offset; /* GPU address space is independent of CPU word size */
 181         uint64_t size;
 182         uint32_t available_caching;
 183         uint32_t default_caching;
 184         const struct ttm_mem_type_manager_func *func;
 185         void *priv;
 186         struct mutex io_reserve_mutex;
 187         bool use_io_reserve_lru;
 188         bool io_reserve_fastpath;
 189         spinlock_t move_lock;
 190 
 191         /*
 192          * Protected by @io_reserve_mutex:
 193          */
 194 
 195         struct list_head io_reserve_lru;
 196 
 197         /*
 198          * Protected by the global->lru_lock.
 199          */
 200 
 201         struct list_head lru[TTM_MAX_BO_PRIORITY];
 202 
 203         /*
 204          * Protected by @move_lock.
 205          */
 206         struct dma_fence *move;
 207 };
 208 
 209 /**
 210  * struct ttm_bo_driver
 211  *
 212  * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
 213  * @invalidate_caches: Callback to invalidate read caches when a buffer object
 214  * has been evicted.
 215  * @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
 216  * structure.
 217  * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
 218  * @move: Callback for a driver to hook in accelerated functions to
 219  * move a buffer.
 220  * If set to NULL, a potentially slow memcpy() move is used.
 221  */
 222 
 223 struct ttm_bo_driver {
 224         /**
 225          * ttm_tt_create
 226          *
 227          * @bo: The buffer object to create the ttm for.
 228          * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
 229          *
 230          * Create a struct ttm_tt to back data with system memory pages.
 231          * No pages are actually allocated.
 232          * Returns:
 233          * NULL: Out of memory.
 234          */
 235         struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
 236                                         uint32_t page_flags);
 237 
 238         /**
 239          * ttm_tt_populate
 240          *
 241          * @ttm: The struct ttm_tt to contain the backing pages.
 242          *
 243          * Allocate all backing pages
 244          * Returns:
 245          * -ENOMEM: Out of memory.
 246          */
 247         int (*ttm_tt_populate)(struct ttm_tt *ttm,
 248                         struct ttm_operation_ctx *ctx);
 249 
 250         /**
 251          * ttm_tt_unpopulate
 252          *
 253          * @ttm: The struct ttm_tt to contain the backing pages.
 254          *
 255          * Free all backing page
 256          */
 257         void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
 258 
 259         /**
 260          * struct ttm_bo_driver member invalidate_caches
 261          *
 262          * @bdev: the buffer object device.
 263          * @flags: new placement of the rebound buffer object.
 264          *
 265          * A previosly evicted buffer has been rebound in a
 266          * potentially new location. Tell the driver that it might
 267          * consider invalidating read (texture) caches on the next command
 268          * submission as a consequence.
 269          */
 270 
 271         int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
 272         int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
 273                              struct ttm_mem_type_manager *man);
 274 
 275         /**
 276          * struct ttm_bo_driver member eviction_valuable
 277          *
 278          * @bo: the buffer object to be evicted
 279          * @place: placement we need room for
 280          *
 281          * Check with the driver if it is valuable to evict a BO to make room
 282          * for a certain placement.
 283          */
 284         bool (*eviction_valuable)(struct ttm_buffer_object *bo,
 285                                   const struct ttm_place *place);
 286         /**
 287          * struct ttm_bo_driver member evict_flags:
 288          *
 289          * @bo: the buffer object to be evicted
 290          *
 291          * Return the bo flags for a buffer which is not mapped to the hardware.
 292          * These will be placed in proposed_flags so that when the move is
 293          * finished, they'll end up in bo->mem.flags
 294          */
 295 
 296         void (*evict_flags)(struct ttm_buffer_object *bo,
 297                             struct ttm_placement *placement);
 298 
 299         /**
 300          * struct ttm_bo_driver member move:
 301          *
 302          * @bo: the buffer to move
 303          * @evict: whether this motion is evicting the buffer from
 304          * the graphics address space
 305          * @ctx: context for this move with parameters
 306          * @new_mem: the new memory region receiving the buffer
 307          *
 308          * Move a buffer between two memory regions.
 309          */
 310         int (*move)(struct ttm_buffer_object *bo, bool evict,
 311                     struct ttm_operation_ctx *ctx,
 312                     struct ttm_mem_reg *new_mem);
 313 
 314         /**
 315          * struct ttm_bo_driver_member verify_access
 316          *
 317          * @bo: Pointer to a buffer object.
 318          * @filp: Pointer to a struct file trying to access the object.
 319          *
 320          * Called from the map / write / read methods to verify that the
 321          * caller is permitted to access the buffer object.
 322          * This member may be set to NULL, which will refuse this kind of
 323          * access for all buffer objects.
 324          * This function should return 0 if access is granted, -EPERM otherwise.
 325          */
 326         int (*verify_access)(struct ttm_buffer_object *bo,
 327                              struct file *filp);
 328 
 329         /**
 330          * Hook to notify driver about a driver move so it
 331          * can do tiling things and book-keeping.
 332          *
 333          * @evict: whether this move is evicting the buffer from the graphics
 334          * address space
 335          */
 336         void (*move_notify)(struct ttm_buffer_object *bo,
 337                             bool evict,
 338                             struct ttm_mem_reg *new_mem);
 339         /* notify the driver we are taking a fault on this BO
 340          * and have reserved it */
 341         int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
 342 
 343         /**
 344          * notify the driver that we're about to swap out this bo
 345          */
 346         void (*swap_notify)(struct ttm_buffer_object *bo);
 347 
 348         /**
 349          * Driver callback on when mapping io memory (for bo_move_memcpy
 350          * for instance). TTM will take care to call io_mem_free whenever
 351          * the mapping is not use anymore. io_mem_reserve & io_mem_free
 352          * are balanced.
 353          */
 354         int (*io_mem_reserve)(struct ttm_bo_device *bdev,
 355                               struct ttm_mem_reg *mem);
 356         void (*io_mem_free)(struct ttm_bo_device *bdev,
 357                             struct ttm_mem_reg *mem);
 358 
 359         /**
 360          * Return the pfn for a given page_offset inside the BO.
 361          *
 362          * @bo: the BO to look up the pfn for
 363          * @page_offset: the offset to look up
 364          */
 365         unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
 366                                     unsigned long page_offset);
 367 
 368         /**
 369          * Read/write memory buffers for ptrace access
 370          *
 371          * @bo: the BO to access
 372          * @offset: the offset from the start of the BO
 373          * @buf: pointer to source/destination buffer
 374          * @len: number of bytes to copy
 375          * @write: whether to read (0) from or write (non-0) to BO
 376          *
 377          * If successful, this function should return the number of
 378          * bytes copied, -EIO otherwise. If the number of bytes
 379          * returned is < len, the function may be called again with
 380          * the remainder of the buffer to copy.
 381          */
 382         int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
 383                              void *buf, int len, int write);
 384 
 385         /**
 386          * struct ttm_bo_driver member del_from_lru_notify
 387          *
 388          * @bo: the buffer object deleted from lru
 389          *
 390          * notify driver that a BO was deleted from LRU.
 391          */
 392         void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
 393 
 394         /**
 395          * Notify the driver that we're about to release a BO
 396          *
 397          * @bo: BO that is about to be released
 398          *
 399          * Gives the driver a chance to do any cleanup, including
 400          * adding fences that may force a delayed delete
 401          */
 402         void (*release_notify)(struct ttm_buffer_object *bo);
 403 };
 404 
 405 /**
 406  * struct ttm_bo_global - Buffer object driver global data.
 407  *
 408  * @mem_glob: Pointer to a struct ttm_mem_global object for accounting.
 409  * @dummy_read_page: Pointer to a dummy page used for mapping requests
 410  * of unpopulated pages.
 411  * @shrink: A shrink callback object used for buffer object swap.
 412  * @device_list_mutex: Mutex protecting the device list.
 413  * This mutex is held while traversing the device list for pm options.
 414  * @lru_lock: Spinlock protecting the bo subsystem lru lists.
 415  * @device_list: List of buffer object devices.
 416  * @swap_lru: Lru list of buffer objects used for swapping.
 417  */
 418 
 419 extern struct ttm_bo_global {
 420 
 421         /**
 422          * Constant after init.
 423          */
 424 
 425         struct kobject kobj;
 426         struct ttm_mem_global *mem_glob;
 427         struct page *dummy_read_page;
 428         spinlock_t lru_lock;
 429 
 430         /**
 431          * Protected by ttm_global_mutex.
 432          */
 433         struct list_head device_list;
 434 
 435         /**
 436          * Protected by the lru_lock.
 437          */
 438         struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
 439 
 440         /**
 441          * Internal protection.
 442          */
 443         atomic_t bo_count;
 444 } ttm_bo_glob;
 445 
 446 
 447 #define TTM_NUM_MEM_TYPES 8
 448 
 449 /**
 450  * struct ttm_bo_device - Buffer object driver device-specific data.
 451  *
 452  * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
 453  * @man: An array of mem_type_managers.
 454  * @vma_manager: Address space manager
 455  * lru_lock: Spinlock that protects the buffer+device lru lists and
 456  * ddestroy lists.
 457  * @dev_mapping: A pointer to the struct address_space representing the
 458  * device address space.
 459  * @wq: Work queue structure for the delayed delete workqueue.
 460  * @no_retry: Don't retry allocation if it fails
 461  *
 462  */
 463 
 464 struct ttm_bo_device {
 465 
 466         /*
 467          * Constant after bo device init / atomic.
 468          */
 469         struct list_head device_list;
 470         struct ttm_bo_global *glob;
 471         struct ttm_bo_driver *driver;
 472         struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
 473 
 474         /*
 475          * Protected by internal locks.
 476          */
 477         struct drm_vma_offset_manager vma_manager;
 478 
 479         /*
 480          * Protected by the global:lru lock.
 481          */
 482         struct list_head ddestroy;
 483 
 484         /*
 485          * Protected by load / firstopen / lastclose /unload sync.
 486          */
 487 
 488         struct address_space *dev_mapping;
 489 
 490         /*
 491          * Internal protection.
 492          */
 493 
 494         struct delayed_work wq;
 495 
 496         bool need_dma32;
 497 
 498         bool no_retry;
 499 };
 500 
 501 /**
 502  * struct ttm_lru_bulk_move_pos
 503  *
 504  * @first: first BO in the bulk move range
 505  * @last: last BO in the bulk move range
 506  *
 507  * Positions for a lru bulk move.
 508  */
 509 struct ttm_lru_bulk_move_pos {
 510         struct ttm_buffer_object *first;
 511         struct ttm_buffer_object *last;
 512 };
 513 
 514 /**
 515  * struct ttm_lru_bulk_move
 516  *
 517  * @tt: first/last lru entry for BOs in the TT domain
 518  * @vram: first/last lru entry for BOs in the VRAM domain
 519  * @swap: first/last lru entry for BOs on the swap list
 520  *
 521  * Helper structure for bulk moves on the LRU list.
 522  */
 523 struct ttm_lru_bulk_move {
 524         struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
 525         struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
 526         struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
 527 };
 528 
 529 /**
 530  * ttm_flag_masked
 531  *
 532  * @old: Pointer to the result and original value.
 533  * @new: New value of bits.
 534  * @mask: Mask of bits to change.
 535  *
 536  * Convenience function to change a number of bits identified by a mask.
 537  */
 538 
 539 static inline uint32_t
 540 ttm_flag_masked(uint32_t *old, uint32_t new, uint32_t mask)
 541 {
 542         *old ^= (*old ^ new) & mask;
 543         return *old;
 544 }
 545 
 546 /*
 547  * ttm_bo.c
 548  */
 549 
 550 /**
 551  * ttm_mem_reg_is_pci
 552  *
 553  * @bdev: Pointer to a struct ttm_bo_device.
 554  * @mem: A valid struct ttm_mem_reg.
 555  *
 556  * Returns true if the memory described by @mem is PCI memory,
 557  * false otherwise.
 558  */
 559 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem);
 560 
 561 /**
 562  * ttm_bo_mem_space
 563  *
 564  * @bo: Pointer to a struct ttm_buffer_object. the data of which
 565  * we want to allocate space for.
 566  * @proposed_placement: Proposed new placement for the buffer object.
 567  * @mem: A struct ttm_mem_reg.
 568  * @interruptible: Sleep interruptible when sliping.
 569  * @no_wait_gpu: Return immediately if the GPU is busy.
 570  *
 571  * Allocate memory space for the buffer object pointed to by @bo, using
 572  * the placement flags in @mem, potentially evicting other idle buffer objects.
 573  * This function may sleep while waiting for space to become available.
 574  * Returns:
 575  * -EBUSY: No space available (only if no_wait == 1).
 576  * -ENOMEM: Could not allocate memory for the buffer object, either due to
 577  * fragmentation or concurrent allocators.
 578  * -ERESTARTSYS: An interruptible sleep was interrupted by a signal.
 579  */
 580 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 581                      struct ttm_placement *placement,
 582                      struct ttm_mem_reg *mem,
 583                      struct ttm_operation_ctx *ctx);
 584 
 585 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem);
 586 void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
 587                            struct ttm_mem_reg *mem);
 588 
 589 int ttm_bo_device_release(struct ttm_bo_device *bdev);
 590 
 591 /**
 592  * ttm_bo_device_init
 593  *
 594  * @bdev: A pointer to a struct ttm_bo_device to initialize.
 595  * @glob: A pointer to an initialized struct ttm_bo_global.
 596  * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
 597  * @mapping: The address space to use for this bo.
 598  * @file_page_offset: Offset into the device address space that is available
 599  * for buffer data. This ensures compatibility with other users of the
 600  * address space.
 601  *
 602  * Initializes a struct ttm_bo_device:
 603  * Returns:
 604  * !0: Failure.
 605  */
 606 int ttm_bo_device_init(struct ttm_bo_device *bdev,
 607                        struct ttm_bo_driver *driver,
 608                        struct address_space *mapping,
 609                        bool need_dma32);
 610 
 611 /**
 612  * ttm_bo_unmap_virtual
 613  *
 614  * @bo: tear down the virtual mappings for this BO
 615  */
 616 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
 617 
 618 /**
 619  * ttm_bo_unmap_virtual
 620  *
 621  * @bo: tear down the virtual mappings for this BO
 622  *
 623  * The caller must take ttm_mem_io_lock before calling this function.
 624  */
 625 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
 626 
 627 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
 628 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
 629 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
 630 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
 631 
 632 void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
 633 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
 634 
 635 /**
 636  * __ttm_bo_reserve:
 637  *
 638  * @bo: A pointer to a struct ttm_buffer_object.
 639  * @interruptible: Sleep interruptible if waiting.
 640  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
 641  * @ticket: ticket used to acquire the ww_mutex.
 642  *
 643  * Will not remove reserved buffers from the lru lists.
 644  * Otherwise identical to ttm_bo_reserve.
 645  *
 646  * Returns:
 647  * -EDEADLK: The reservation may cause a deadlock.
 648  * Release all buffer reservations, wait for @bo to become unreserved and
 649  * try again. (only if use_sequence == 1).
 650  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 651  * a signal. Release all buffer reservations and return to user-space.
 652  * -EBUSY: The function needed to sleep, but @no_wait was true
 653  * -EALREADY: Bo already reserved using @ticket. This error code will only
 654  * be returned if @use_ticket is set to true.
 655  */
 656 static inline int __ttm_bo_reserve(struct ttm_buffer_object *bo,
 657                                    bool interruptible, bool no_wait,
 658                                    struct ww_acquire_ctx *ticket)
 659 {
 660         int ret = 0;
 661 
 662         if (no_wait) {
 663                 bool success;
 664                 if (WARN_ON(ticket))
 665                         return -EBUSY;
 666 
 667                 success = dma_resv_trylock(bo->base.resv);
 668                 return success ? 0 : -EBUSY;
 669         }
 670 
 671         if (interruptible)
 672                 ret = dma_resv_lock_interruptible(bo->base.resv, ticket);
 673         else
 674                 ret = dma_resv_lock(bo->base.resv, ticket);
 675         if (ret == -EINTR)
 676                 return -ERESTARTSYS;
 677         return ret;
 678 }
 679 
 680 /**
 681  * ttm_bo_reserve:
 682  *
 683  * @bo: A pointer to a struct ttm_buffer_object.
 684  * @interruptible: Sleep interruptible if waiting.
 685  * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
 686  * @ticket: ticket used to acquire the ww_mutex.
 687  *
 688  * Locks a buffer object for validation. (Or prevents other processes from
 689  * locking it for validation) and removes it from lru lists, while taking
 690  * a number of measures to prevent deadlocks.
 691  *
 692  * Deadlocks may occur when two processes try to reserve multiple buffers in
 693  * different order, either by will or as a result of a buffer being evicted
 694  * to make room for a buffer already reserved. (Buffers are reserved before
 695  * they are evicted). The following algorithm prevents such deadlocks from
 696  * occurring:
 697  * Processes attempting to reserve multiple buffers other than for eviction,
 698  * (typically execbuf), should first obtain a unique 32-bit
 699  * validation sequence number,
 700  * and call this function with @use_ticket == 1 and @ticket->stamp == the unique
 701  * sequence number. If upon call of this function, the buffer object is already
 702  * reserved, the validation sequence is checked against the validation
 703  * sequence of the process currently reserving the buffer,
 704  * and if the current validation sequence is greater than that of the process
 705  * holding the reservation, the function returns -EDEADLK. Otherwise it sleeps
 706  * waiting for the buffer to become unreserved, after which it retries
 707  * reserving.
 708  * The caller should, when receiving an -EDEADLK error
 709  * release all its buffer reservations, wait for @bo to become unreserved, and
 710  * then rerun the validation with the same validation sequence. This procedure
 711  * will always guarantee that the process with the lowest validation sequence
 712  * will eventually succeed, preventing both deadlocks and starvation.
 713  *
 714  * Returns:
 715  * -EDEADLK: The reservation may cause a deadlock.
 716  * Release all buffer reservations, wait for @bo to become unreserved and
 717  * try again. (only if use_sequence == 1).
 718  * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 719  * a signal. Release all buffer reservations and return to user-space.
 720  * -EBUSY: The function needed to sleep, but @no_wait was true
 721  * -EALREADY: Bo already reserved using @ticket. This error code will only
 722  * be returned if @use_ticket is set to true.
 723  */
 724 static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
 725                                  bool interruptible, bool no_wait,
 726                                  struct ww_acquire_ctx *ticket)
 727 {
 728         int ret;
 729 
 730         WARN_ON(!kref_read(&bo->kref));
 731 
 732         ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
 733         if (likely(ret == 0))
 734                 ttm_bo_del_sub_from_lru(bo);
 735 
 736         return ret;
 737 }
 738 
 739 /**
 740  * ttm_bo_reserve_slowpath:
 741  * @bo: A pointer to a struct ttm_buffer_object.
 742  * @interruptible: Sleep interruptible if waiting.
 743  * @sequence: Set (@bo)->sequence to this value after lock
 744  *
 745  * This is called after ttm_bo_reserve returns -EAGAIN and we backed off
 746  * from all our other reservations. Because there are no other reservations
 747  * held by us, this function cannot deadlock any more.
 748  */
 749 static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
 750                                           bool interruptible,
 751                                           struct ww_acquire_ctx *ticket)
 752 {
 753         int ret = 0;
 754 
 755         WARN_ON(!kref_read(&bo->kref));
 756 
 757         if (interruptible)
 758                 ret = dma_resv_lock_slow_interruptible(bo->base.resv,
 759                                                                  ticket);
 760         else
 761                 dma_resv_lock_slow(bo->base.resv, ticket);
 762 
 763         if (likely(ret == 0))
 764                 ttm_bo_del_sub_from_lru(bo);
 765         else if (ret == -EINTR)
 766                 ret = -ERESTARTSYS;
 767 
 768         return ret;
 769 }
 770 
 771 /**
 772  * ttm_bo_unreserve
 773  *
 774  * @bo: A pointer to a struct ttm_buffer_object.
 775  *
 776  * Unreserve a previous reservation of @bo.
 777  */
 778 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 779 {
 780         spin_lock(&bo->bdev->glob->lru_lock);
 781         if (list_empty(&bo->lru))
 782                 ttm_bo_add_to_lru(bo);
 783         else
 784                 ttm_bo_move_to_lru_tail(bo, NULL);
 785         spin_unlock(&bo->bdev->glob->lru_lock);
 786         dma_resv_unlock(bo->base.resv);
 787 }
 788 
 789 /*
 790  * ttm_bo_util.c
 791  */
 792 
 793 int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
 794                        struct ttm_mem_reg *mem);
 795 void ttm_mem_io_free(struct ttm_bo_device *bdev,
 796                      struct ttm_mem_reg *mem);
 797 /**
 798  * ttm_bo_move_ttm
 799  *
 800  * @bo: A pointer to a struct ttm_buffer_object.
 801  * @interruptible: Sleep interruptible if waiting.
 802  * @no_wait_gpu: Return immediately if the GPU is busy.
 803  * @new_mem: struct ttm_mem_reg indicating where to move.
 804  *
 805  * Optimized move function for a buffer object with both old and
 806  * new placement backed by a TTM. The function will, if successful,
 807  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
 808  * and update the (@bo)->mem placement flags. If unsuccessful, the old
 809  * data remains untouched, and it's up to the caller to free the
 810  * memory space indicated by @new_mem.
 811  * Returns:
 812  * !0: Failure.
 813  */
 814 
 815 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
 816                     struct ttm_operation_ctx *ctx,
 817                     struct ttm_mem_reg *new_mem);
 818 
 819 /**
 820  * ttm_bo_move_memcpy
 821  *
 822  * @bo: A pointer to a struct ttm_buffer_object.
 823  * @interruptible: Sleep interruptible if waiting.
 824  * @no_wait_gpu: Return immediately if the GPU is busy.
 825  * @new_mem: struct ttm_mem_reg indicating where to move.
 826  *
 827  * Fallback move function for a mappable buffer object in mappable memory.
 828  * The function will, if successful,
 829  * free any old aperture space, and set (@new_mem)->mm_node to NULL,
 830  * and update the (@bo)->mem placement flags. If unsuccessful, the old
 831  * data remains untouched, and it's up to the caller to free the
 832  * memory space indicated by @new_mem.
 833  * Returns:
 834  * !0: Failure.
 835  */
 836 
 837 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 838                        struct ttm_operation_ctx *ctx,
 839                        struct ttm_mem_reg *new_mem);
 840 
 841 /**
 842  * ttm_bo_free_old_node
 843  *
 844  * @bo: A pointer to a struct ttm_buffer_object.
 845  *
 846  * Utility function to free an old placement after a successful move.
 847  */
 848 void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
 849 
 850 /**
 851  * ttm_bo_move_accel_cleanup.
 852  *
 853  * @bo: A pointer to a struct ttm_buffer_object.
 854  * @fence: A fence object that signals when moving is complete.
 855  * @evict: This is an evict move. Don't return until the buffer is idle.
 856  * @new_mem: struct ttm_mem_reg indicating where to move.
 857  *
 858  * Accelerated move function to be called when an accelerated move
 859  * has been scheduled. The function will create a new temporary buffer object
 860  * representing the old placement, and put the sync object on both buffer
 861  * objects. After that the newly created buffer object is unref'd to be
 862  * destroyed when the move is complete. This will help pipeline
 863  * buffer moves.
 864  */
 865 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
 866                               struct dma_fence *fence, bool evict,
 867                               struct ttm_mem_reg *new_mem);
 868 
 869 /**
 870  * ttm_bo_pipeline_move.
 871  *
 872  * @bo: A pointer to a struct ttm_buffer_object.
 873  * @fence: A fence object that signals when moving is complete.
 874  * @evict: This is an evict move. Don't return until the buffer is idle.
 875  * @new_mem: struct ttm_mem_reg indicating where to move.
 876  *
 877  * Function for pipelining accelerated moves. Either free the memory
 878  * immediately or hang it on a temporary buffer object.
 879  */
 880 int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
 881                          struct dma_fence *fence, bool evict,
 882                          struct ttm_mem_reg *new_mem);
 883 
 884 /**
 885  * ttm_bo_pipeline_gutting.
 886  *
 887  * @bo: A pointer to a struct ttm_buffer_object.
 888  *
 889  * Pipelined gutting a BO of its backing store.
 890  */
 891 int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
 892 
 893 /**
 894  * ttm_io_prot
 895  *
 896  * @c_state: Caching state.
 897  * @tmp: Page protection flag for a normal, cached mapping.
 898  *
 899  * Utility function that returns the pgprot_t that should be used for
 900  * setting up a PTE with the caching model indicated by @c_state.
 901  */
 902 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
 903 
 904 extern const struct ttm_mem_type_manager_func ttm_bo_manager_func;
 905 
 906 #endif

/* [<][>][^][v][top][bottom][index][help] */