root/drivers/gpu/drm/i915/i915_gem_evict.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. I915_SELFTEST_DECLARE
  2. mark_free
  3. i915_gem_evict_something
  4. i915_gem_evict_for_node
  5. i915_gem_evict_vm

   1 /*
   2  * Copyright © 2008-2010 Intel Corporation
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21  * IN THE SOFTWARE.
  22  *
  23  * Authors:
  24  *    Eric Anholt <eric@anholt.net>
  25  *    Chris Wilson <chris@chris-wilson.co.uuk>
  26  *
  27  */
  28 
  29 #include <drm/i915_drm.h>
  30 
  31 #include "gem/i915_gem_context.h"
  32 
  33 #include "i915_drv.h"
  34 #include "i915_trace.h"
  35 
  36 I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
  37         bool fail_if_busy:1;
  38 } igt_evict_ctl;)
  39 
  40 static int ggtt_flush(struct drm_i915_private *i915)
  41 {
  42         /*
  43          * Not everything in the GGTT is tracked via vma (otherwise we
  44          * could evict as required with minimal stalling) so we are forced
  45          * to idle the GPU and explicitly retire outstanding requests in
  46          * the hopes that we can then remove contexts and the like only
  47          * bound by their active reference.
  48          */
  49         return i915_gem_wait_for_idle(i915,
  50                                       I915_WAIT_INTERRUPTIBLE |
  51                                       I915_WAIT_LOCKED,
  52                                       MAX_SCHEDULE_TIMEOUT);
  53 }
  54 
  55 static bool
  56 mark_free(struct drm_mm_scan *scan,
  57           struct i915_vma *vma,
  58           unsigned int flags,
  59           struct list_head *unwind)
  60 {
  61         if (i915_vma_is_pinned(vma))
  62                 return false;
  63 
  64         list_add(&vma->evict_link, unwind);
  65         return drm_mm_scan_add_block(scan, &vma->node);
  66 }
  67 
  68 /**
  69  * i915_gem_evict_something - Evict vmas to make room for binding a new one
  70  * @vm: address space to evict from
  71  * @min_size: size of the desired free space
  72  * @alignment: alignment constraint of the desired free space
  73  * @cache_level: cache_level for the desired space
  74  * @start: start (inclusive) of the range from which to evict objects
  75  * @end: end (exclusive) of the range from which to evict objects
  76  * @flags: additional flags to control the eviction algorithm
  77  *
  78  * This function will try to evict vmas until a free space satisfying the
  79  * requirements is found. Callers must check first whether any such hole exists
  80  * already before calling this function.
  81  *
  82  * This function is used by the object/vma binding code.
  83  *
  84  * Since this function is only used to free up virtual address space it only
  85  * ignores pinned vmas, and not object where the backing storage itself is
  86  * pinned. Hence obj->pages_pin_count does not protect against eviction.
  87  *
  88  * To clarify: This is for freeing up virtual address space, not for freeing
  89  * memory in e.g. the shrinker.
  90  */
  91 int
  92 i915_gem_evict_something(struct i915_address_space *vm,
  93                          u64 min_size, u64 alignment,
  94                          unsigned cache_level,
  95                          u64 start, u64 end,
  96                          unsigned flags)
  97 {
  98         struct drm_i915_private *dev_priv = vm->i915;
  99         struct drm_mm_scan scan;
 100         struct list_head eviction_list;
 101         struct i915_vma *vma, *next;
 102         struct drm_mm_node *node;
 103         enum drm_mm_insert_mode mode;
 104         struct i915_vma *active;
 105         int ret;
 106 
 107         lockdep_assert_held(&vm->i915->drm.struct_mutex);
 108         trace_i915_gem_evict(vm, min_size, alignment, flags);
 109 
 110         /*
 111          * The goal is to evict objects and amalgamate space in rough LRU order.
 112          * Since both active and inactive objects reside on the same list,
 113          * in a mix of creation and last scanned order, as we process the list
 114          * we sort it into inactive/active, which keeps the active portion
 115          * in a rough MRU order.
 116          *
 117          * The retirement sequence is thus:
 118          *   1. Inactive objects (already retired, random order)
 119          *   2. Active objects (will stall on unbinding, oldest scanned first)
 120          */
 121         mode = DRM_MM_INSERT_BEST;
 122         if (flags & PIN_HIGH)
 123                 mode = DRM_MM_INSERT_HIGH;
 124         if (flags & PIN_MAPPABLE)
 125                 mode = DRM_MM_INSERT_LOW;
 126         drm_mm_scan_init_with_range(&scan, &vm->mm,
 127                                     min_size, alignment, cache_level,
 128                                     start, end, mode);
 129 
 130         /*
 131          * Retire before we search the active list. Although we have
 132          * reasonable accuracy in our retirement lists, we may have
 133          * a stray pin (preventing eviction) that can only be resolved by
 134          * retiring.
 135          */
 136         if (!(flags & PIN_NONBLOCK))
 137                 i915_retire_requests(dev_priv);
 138 
 139 search_again:
 140         active = NULL;
 141         INIT_LIST_HEAD(&eviction_list);
 142         list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
 143                 /*
 144                  * We keep this list in a rough least-recently scanned order
 145                  * of active elements (inactive elements are cheap to reap).
 146                  * New entries are added to the end, and we move anything we
 147                  * scan to the end. The assumption is that the working set
 148                  * of applications is either steady state (and thanks to the
 149                  * userspace bo cache it almost always is) or volatile and
 150                  * frequently replaced after a frame, which are self-evicting!
 151                  * Given that assumption, the MRU order of the scan list is
 152                  * fairly static, and keeping it in least-recently scan order
 153                  * is suitable.
 154                  *
 155                  * To notice when we complete one full cycle, we record the
 156                  * first active element seen, before moving it to the tail.
 157                  */
 158                 if (i915_vma_is_active(vma)) {
 159                         if (vma == active) {
 160                                 if (flags & PIN_NONBLOCK)
 161                                         break;
 162 
 163                                 active = ERR_PTR(-EAGAIN);
 164                         }
 165 
 166                         if (active != ERR_PTR(-EAGAIN)) {
 167                                 if (!active)
 168                                         active = vma;
 169 
 170                                 list_move_tail(&vma->vm_link, &vm->bound_list);
 171                                 continue;
 172                         }
 173                 }
 174 
 175                 if (mark_free(&scan, vma, flags, &eviction_list))
 176                         goto found;
 177         }
 178 
 179         /* Nothing found, clean up and bail out! */
 180         list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 181                 ret = drm_mm_scan_remove_block(&scan, &vma->node);
 182                 BUG_ON(ret);
 183         }
 184 
 185         /*
 186          * Can we unpin some objects such as idle hw contents,
 187          * or pending flips? But since only the GGTT has global entries
 188          * such as scanouts, rinbuffers and contexts, we can skip the
 189          * purge when inspecting per-process local address spaces.
 190          */
 191         if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 192                 return -ENOSPC;
 193 
 194         /*
 195          * Not everything in the GGTT is tracked via VMA using
 196          * i915_vma_move_to_active(), otherwise we could evict as required
 197          * with minimal stalling. Instead we are forced to idle the GPU and
 198          * explicitly retire outstanding requests which will then remove
 199          * the pinning for active objects such as contexts and ring,
 200          * enabling us to evict them on the next iteration.
 201          *
 202          * To ensure that all user contexts are evictable, we perform
 203          * a switch to the perma-pinned kernel context. This all also gives
 204          * us a termination condition, when the last retired context is
 205          * the kernel's there is no more we can evict.
 206          */
 207         if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy))
 208                 return -EBUSY;
 209 
 210         ret = ggtt_flush(dev_priv);
 211         if (ret)
 212                 return ret;
 213 
 214         cond_resched();
 215 
 216         flags |= PIN_NONBLOCK;
 217         goto search_again;
 218 
 219 found:
 220         /* drm_mm doesn't allow any other other operations while
 221          * scanning, therefore store to-be-evicted objects on a
 222          * temporary list and take a reference for all before
 223          * calling unbind (which may remove the active reference
 224          * of any of our objects, thus corrupting the list).
 225          */
 226         list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 227                 if (drm_mm_scan_remove_block(&scan, &vma->node))
 228                         __i915_vma_pin(vma);
 229                 else
 230                         list_del(&vma->evict_link);
 231         }
 232 
 233         /* Unbinding will emit any required flushes */
 234         ret = 0;
 235         list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 236                 __i915_vma_unpin(vma);
 237                 if (ret == 0)
 238                         ret = i915_vma_unbind(vma);
 239         }
 240 
 241         while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) {
 242                 vma = container_of(node, struct i915_vma, node);
 243                 ret = i915_vma_unbind(vma);
 244         }
 245 
 246         return ret;
 247 }
 248 
 249 /**
 250  * i915_gem_evict_for_vma - Evict vmas to make room for binding a new one
 251  * @vm: address space to evict from
 252  * @target: range (and color) to evict for
 253  * @flags: additional flags to control the eviction algorithm
 254  *
 255  * This function will try to evict vmas that overlap the target node.
 256  *
 257  * To clarify: This is for freeing up virtual address space, not for freeing
 258  * memory in e.g. the shrinker.
 259  */
 260 int i915_gem_evict_for_node(struct i915_address_space *vm,
 261                             struct drm_mm_node *target,
 262                             unsigned int flags)
 263 {
 264         LIST_HEAD(eviction_list);
 265         struct drm_mm_node *node;
 266         u64 start = target->start;
 267         u64 end = start + target->size;
 268         struct i915_vma *vma, *next;
 269         bool check_color;
 270         int ret = 0;
 271 
 272         lockdep_assert_held(&vm->i915->drm.struct_mutex);
 273         GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
 274         GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
 275 
 276         trace_i915_gem_evict_node(vm, target, flags);
 277 
 278         /* Retire before we search the active list. Although we have
 279          * reasonable accuracy in our retirement lists, we may have
 280          * a stray pin (preventing eviction) that can only be resolved by
 281          * retiring.
 282          */
 283         if (!(flags & PIN_NONBLOCK))
 284                 i915_retire_requests(vm->i915);
 285 
 286         check_color = vm->mm.color_adjust;
 287         if (check_color) {
 288                 /* Expand search to cover neighbouring guard pages (or lack!) */
 289                 if (start)
 290                         start -= I915_GTT_PAGE_SIZE;
 291 
 292                 /* Always look at the page afterwards to avoid the end-of-GTT */
 293                 end += I915_GTT_PAGE_SIZE;
 294         }
 295         GEM_BUG_ON(start >= end);
 296 
 297         drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
 298                 /* If we find any non-objects (!vma), we cannot evict them */
 299                 if (node->color == I915_COLOR_UNEVICTABLE) {
 300                         ret = -ENOSPC;
 301                         break;
 302                 }
 303 
 304                 GEM_BUG_ON(!node->allocated);
 305                 vma = container_of(node, typeof(*vma), node);
 306 
 307                 /* If we are using coloring to insert guard pages between
 308                  * different cache domains within the address space, we have
 309                  * to check whether the objects on either side of our range
 310                  * abutt and conflict. If they are in conflict, then we evict
 311                  * those as well to make room for our guard pages.
 312                  */
 313                 if (check_color) {
 314                         if (node->start + node->size == target->start) {
 315                                 if (node->color == target->color)
 316                                         continue;
 317                         }
 318                         if (node->start == target->start + target->size) {
 319                                 if (node->color == target->color)
 320                                         continue;
 321                         }
 322                 }
 323 
 324                 if (flags & PIN_NONBLOCK &&
 325                     (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
 326                         ret = -ENOSPC;
 327                         break;
 328                 }
 329 
 330                 /* Overlap of objects in the same batch? */
 331                 if (i915_vma_is_pinned(vma)) {
 332                         ret = -ENOSPC;
 333                         if (vma->exec_flags &&
 334                             *vma->exec_flags & EXEC_OBJECT_PINNED)
 335                                 ret = -EINVAL;
 336                         break;
 337                 }
 338 
 339                 /* Never show fear in the face of dragons!
 340                  *
 341                  * We cannot directly remove this node from within this
 342                  * iterator and as with i915_gem_evict_something() we employ
 343                  * the vma pin_count in order to prevent the action of
 344                  * unbinding one vma from freeing (by dropping its active
 345                  * reference) another in our eviction list.
 346                  */
 347                 __i915_vma_pin(vma);
 348                 list_add(&vma->evict_link, &eviction_list);
 349         }
 350 
 351         list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 352                 __i915_vma_unpin(vma);
 353                 if (ret == 0)
 354                         ret = i915_vma_unbind(vma);
 355         }
 356 
 357         return ret;
 358 }
 359 
 360 /**
 361  * i915_gem_evict_vm - Evict all idle vmas from a vm
 362  * @vm: Address space to cleanse
 363  *
 364  * This function evicts all vmas from a vm.
 365  *
 366  * This is used by the execbuf code as a last-ditch effort to defragment the
 367  * address space.
 368  *
 369  * To clarify: This is for freeing up virtual address space, not for freeing
 370  * memory in e.g. the shrinker.
 371  */
 372 int i915_gem_evict_vm(struct i915_address_space *vm)
 373 {
 374         struct list_head eviction_list;
 375         struct i915_vma *vma, *next;
 376         int ret;
 377 
 378         lockdep_assert_held(&vm->i915->drm.struct_mutex);
 379         trace_i915_gem_evict_vm(vm);
 380 
 381         /* Switch back to the default context in order to unpin
 382          * the existing context objects. However, such objects only
 383          * pin themselves inside the global GTT and performing the
 384          * switch otherwise is ineffective.
 385          */
 386         if (i915_is_ggtt(vm)) {
 387                 ret = ggtt_flush(vm->i915);
 388                 if (ret)
 389                         return ret;
 390         }
 391 
 392         INIT_LIST_HEAD(&eviction_list);
 393         mutex_lock(&vm->mutex);
 394         list_for_each_entry(vma, &vm->bound_list, vm_link) {
 395                 if (i915_vma_is_pinned(vma))
 396                         continue;
 397 
 398                 __i915_vma_pin(vma);
 399                 list_add(&vma->evict_link, &eviction_list);
 400         }
 401         mutex_unlock(&vm->mutex);
 402 
 403         ret = 0;
 404         list_for_each_entry_safe(vma, next, &eviction_list, evict_link) {
 405                 __i915_vma_unpin(vma);
 406                 if (ret == 0)
 407                         ret = i915_vma_unbind(vma);
 408         }
 409         return ret;
 410 }
 411 
 412 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 413 #include "selftests/i915_gem_evict.c"
 414 #endif

/* [<][>][^][v][top][bottom][index][help] */