root/drivers/gpu/drm/i915/i915_active.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. node_from_active
  2. is_barrier
  3. barrier_to_ll
  4. __barrier_to_engine
  5. barrier_to_engine
  6. barrier_from_ll
  7. active_debug_hint
  8. debug_active_init
  9. debug_active_activate
  10. debug_active_deactivate
  11. debug_active_fini
  12. debug_active_assert
  13. debug_active_init
  14. debug_active_activate
  15. debug_active_deactivate
  16. debug_active_fini
  17. debug_active_assert
  18. __active_retire
  19. active_retire
  20. node_retire
  21. node_retire_nolock
  22. active_instance
  23. __i915_active_init
  24. ____active_del_barrier
  25. __active_del_barrier
  26. i915_active_ref
  27. i915_active_acquire
  28. i915_active_release
  29. __active_ungrab
  30. i915_active_trygrab
  31. i915_active_ungrab
  32. i915_active_wait
  33. i915_request_await_active_request
  34. i915_request_await_active
  35. i915_active_fini
  36. is_idle_barrier
  37. reuse_idle_barrier
  38. i915_active_acquire_preallocate_barrier
  39. i915_active_acquire_barrier
  40. i915_request_add_active_barriers
  41. i915_active_request_set
  42. i915_active_retire_noop
  43. i915_global_active_shrink
  44. i915_global_active_exit
  45. i915_global_active_init

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2019 Intel Corporation
   5  */
   6 
   7 #include <linux/debugobjects.h>
   8 
   9 #include "gt/intel_engine_pm.h"
  10 
  11 #include "i915_drv.h"
  12 #include "i915_active.h"
  13 #include "i915_globals.h"
  14 
  15 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
  16 
  17 /*
  18  * Active refs memory management
  19  *
  20  * To be more economical with memory, we reap all the i915_active trees as
  21  * they idle (when we know the active requests are inactive) and allocate the
  22  * nodes from a local slab cache to hopefully reduce the fragmentation.
  23  */
  24 static struct i915_global_active {
  25         struct i915_global base;
  26         struct kmem_cache *slab_cache;
  27 } global;
  28 
  29 struct active_node {
  30         struct i915_active_request base;
  31         struct i915_active *ref;
  32         struct rb_node node;
  33         u64 timeline;
  34 };
  35 
  36 static inline struct active_node *
  37 node_from_active(struct i915_active_request *active)
  38 {
  39         return container_of(active, struct active_node, base);
  40 }
  41 
  42 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
  43 
  44 static inline bool is_barrier(const struct i915_active_request *active)
  45 {
  46         return IS_ERR(rcu_access_pointer(active->request));
  47 }
  48 
  49 static inline struct llist_node *barrier_to_ll(struct active_node *node)
  50 {
  51         GEM_BUG_ON(!is_barrier(&node->base));
  52         return (struct llist_node *)&node->base.link;
  53 }
  54 
  55 static inline struct intel_engine_cs *
  56 __barrier_to_engine(struct active_node *node)
  57 {
  58         return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
  59 }
  60 
  61 static inline struct intel_engine_cs *
  62 barrier_to_engine(struct active_node *node)
  63 {
  64         GEM_BUG_ON(!is_barrier(&node->base));
  65         return __barrier_to_engine(node);
  66 }
  67 
  68 static inline struct active_node *barrier_from_ll(struct llist_node *x)
  69 {
  70         return container_of((struct list_head *)x,
  71                             struct active_node, base.link);
  72 }
  73 
  74 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
  75 
  76 static void *active_debug_hint(void *addr)
  77 {
  78         struct i915_active *ref = addr;
  79 
  80         return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
  81 }
  82 
  83 static struct debug_obj_descr active_debug_desc = {
  84         .name = "i915_active",
  85         .debug_hint = active_debug_hint,
  86 };
  87 
  88 static void debug_active_init(struct i915_active *ref)
  89 {
  90         debug_object_init(ref, &active_debug_desc);
  91 }
  92 
  93 static void debug_active_activate(struct i915_active *ref)
  94 {
  95         debug_object_activate(ref, &active_debug_desc);
  96 }
  97 
  98 static void debug_active_deactivate(struct i915_active *ref)
  99 {
 100         debug_object_deactivate(ref, &active_debug_desc);
 101 }
 102 
 103 static void debug_active_fini(struct i915_active *ref)
 104 {
 105         debug_object_free(ref, &active_debug_desc);
 106 }
 107 
 108 static void debug_active_assert(struct i915_active *ref)
 109 {
 110         debug_object_assert_init(ref, &active_debug_desc);
 111 }
 112 
 113 #else
 114 
 115 static inline void debug_active_init(struct i915_active *ref) { }
 116 static inline void debug_active_activate(struct i915_active *ref) { }
 117 static inline void debug_active_deactivate(struct i915_active *ref) { }
 118 static inline void debug_active_fini(struct i915_active *ref) { }
 119 static inline void debug_active_assert(struct i915_active *ref) { }
 120 
 121 #endif
 122 
 123 static void
 124 __active_retire(struct i915_active *ref, bool lock)
 125 {
 126         struct active_node *it, *n;
 127         struct rb_root root;
 128         bool retire = false;
 129 
 130         lockdep_assert_held(&ref->mutex);
 131 
 132         /* return the unused nodes to our slabcache -- flushing the allocator */
 133         if (atomic_dec_and_test(&ref->count)) {
 134                 debug_active_deactivate(ref);
 135                 root = ref->tree;
 136                 ref->tree = RB_ROOT;
 137                 ref->cache = NULL;
 138                 retire = true;
 139         }
 140 
 141         if (likely(lock))
 142                 mutex_unlock(&ref->mutex);
 143         if (!retire)
 144                 return;
 145 
 146         rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
 147                 GEM_BUG_ON(i915_active_request_isset(&it->base));
 148                 kmem_cache_free(global.slab_cache, it);
 149         }
 150 
 151         /* After the final retire, the entire struct may be freed */
 152         if (ref->retire)
 153                 ref->retire(ref);
 154 }
 155 
 156 static void
 157 active_retire(struct i915_active *ref, bool lock)
 158 {
 159         GEM_BUG_ON(!atomic_read(&ref->count));
 160         if (atomic_add_unless(&ref->count, -1, 1))
 161                 return;
 162 
 163         /* One active may be flushed from inside the acquire of another */
 164         if (likely(lock))
 165                 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
 166         __active_retire(ref, lock);
 167 }
 168 
 169 static void
 170 node_retire(struct i915_active_request *base, struct i915_request *rq)
 171 {
 172         active_retire(node_from_active(base)->ref, true);
 173 }
 174 
 175 static void
 176 node_retire_nolock(struct i915_active_request *base, struct i915_request *rq)
 177 {
 178         active_retire(node_from_active(base)->ref, false);
 179 }
 180 
 181 static struct i915_active_request *
 182 active_instance(struct i915_active *ref, struct intel_timeline *tl)
 183 {
 184         struct active_node *node, *prealloc;
 185         struct rb_node **p, *parent;
 186         u64 idx = tl->fence_context;
 187 
 188         /*
 189          * We track the most recently used timeline to skip a rbtree search
 190          * for the common case, under typical loads we never need the rbtree
 191          * at all. We can reuse the last slot if it is empty, that is
 192          * after the previous activity has been retired, or if it matches the
 193          * current timeline.
 194          */
 195         node = READ_ONCE(ref->cache);
 196         if (node && node->timeline == idx)
 197                 return &node->base;
 198 
 199         /* Preallocate a replacement, just in case */
 200         prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
 201         if (!prealloc)
 202                 return NULL;
 203 
 204         mutex_lock(&ref->mutex);
 205         GEM_BUG_ON(i915_active_is_idle(ref));
 206 
 207         parent = NULL;
 208         p = &ref->tree.rb_node;
 209         while (*p) {
 210                 parent = *p;
 211 
 212                 node = rb_entry(parent, struct active_node, node);
 213                 if (node->timeline == idx) {
 214                         kmem_cache_free(global.slab_cache, prealloc);
 215                         goto out;
 216                 }
 217 
 218                 if (node->timeline < idx)
 219                         p = &parent->rb_right;
 220                 else
 221                         p = &parent->rb_left;
 222         }
 223 
 224         node = prealloc;
 225         i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
 226         node->ref = ref;
 227         node->timeline = idx;
 228 
 229         rb_link_node(&node->node, parent, p);
 230         rb_insert_color(&node->node, &ref->tree);
 231 
 232 out:
 233         ref->cache = node;
 234         mutex_unlock(&ref->mutex);
 235 
 236         BUILD_BUG_ON(offsetof(typeof(*node), base));
 237         return &node->base;
 238 }
 239 
 240 void __i915_active_init(struct drm_i915_private *i915,
 241                         struct i915_active *ref,
 242                         int (*active)(struct i915_active *ref),
 243                         void (*retire)(struct i915_active *ref),
 244                         struct lock_class_key *key)
 245 {
 246         debug_active_init(ref);
 247 
 248         ref->i915 = i915;
 249         ref->flags = 0;
 250         ref->active = active;
 251         ref->retire = retire;
 252         ref->tree = RB_ROOT;
 253         ref->cache = NULL;
 254         init_llist_head(&ref->preallocated_barriers);
 255         atomic_set(&ref->count, 0);
 256         __mutex_init(&ref->mutex, "i915_active", key);
 257 }
 258 
 259 static bool ____active_del_barrier(struct i915_active *ref,
 260                                    struct active_node *node,
 261                                    struct intel_engine_cs *engine)
 262 
 263 {
 264         struct llist_node *head = NULL, *tail = NULL;
 265         struct llist_node *pos, *next;
 266 
 267         GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
 268 
 269         /*
 270          * Rebuild the llist excluding our node. We may perform this
 271          * outside of the kernel_context timeline mutex and so someone
 272          * else may be manipulating the engine->barrier_tasks, in
 273          * which case either we or they will be upset :)
 274          *
 275          * A second __active_del_barrier() will report failure to claim
 276          * the active_node and the caller will just shrug and know not to
 277          * claim ownership of its node.
 278          *
 279          * A concurrent i915_request_add_active_barriers() will miss adding
 280          * any of the tasks, but we will try again on the next -- and since
 281          * we are actively using the barrier, we know that there will be
 282          * at least another opportunity when we idle.
 283          */
 284         llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
 285                 if (node == barrier_from_ll(pos)) {
 286                         node = NULL;
 287                         continue;
 288                 }
 289 
 290                 pos->next = head;
 291                 head = pos;
 292                 if (!tail)
 293                         tail = pos;
 294         }
 295         if (head)
 296                 llist_add_batch(head, tail, &engine->barrier_tasks);
 297 
 298         return !node;
 299 }
 300 
 301 static bool
 302 __active_del_barrier(struct i915_active *ref, struct active_node *node)
 303 {
 304         return ____active_del_barrier(ref, node, barrier_to_engine(node));
 305 }
 306 
 307 int i915_active_ref(struct i915_active *ref,
 308                     struct intel_timeline *tl,
 309                     struct i915_request *rq)
 310 {
 311         struct i915_active_request *active;
 312         int err;
 313 
 314         lockdep_assert_held(&tl->mutex);
 315 
 316         /* Prevent reaping in case we malloc/wait while building the tree */
 317         err = i915_active_acquire(ref);
 318         if (err)
 319                 return err;
 320 
 321         active = active_instance(ref, tl);
 322         if (!active) {
 323                 err = -ENOMEM;
 324                 goto out;
 325         }
 326 
 327         if (is_barrier(active)) { /* proto-node used by our idle barrier */
 328                 /*
 329                  * This request is on the kernel_context timeline, and so
 330                  * we can use it to substitute for the pending idle-barrer
 331                  * request that we want to emit on the kernel_context.
 332                  */
 333                 __active_del_barrier(ref, node_from_active(active));
 334                 RCU_INIT_POINTER(active->request, NULL);
 335                 INIT_LIST_HEAD(&active->link);
 336         } else {
 337                 if (!i915_active_request_isset(active))
 338                         atomic_inc(&ref->count);
 339         }
 340         GEM_BUG_ON(!atomic_read(&ref->count));
 341         __i915_active_request_set(active, rq);
 342 
 343 out:
 344         i915_active_release(ref);
 345         return err;
 346 }
 347 
 348 int i915_active_acquire(struct i915_active *ref)
 349 {
 350         int err;
 351 
 352         debug_active_assert(ref);
 353         if (atomic_add_unless(&ref->count, 1, 0))
 354                 return 0;
 355 
 356         err = mutex_lock_interruptible(&ref->mutex);
 357         if (err)
 358                 return err;
 359 
 360         if (!atomic_read(&ref->count) && ref->active)
 361                 err = ref->active(ref);
 362         if (!err) {
 363                 debug_active_activate(ref);
 364                 atomic_inc(&ref->count);
 365         }
 366 
 367         mutex_unlock(&ref->mutex);
 368 
 369         return err;
 370 }
 371 
 372 void i915_active_release(struct i915_active *ref)
 373 {
 374         debug_active_assert(ref);
 375         active_retire(ref, true);
 376 }
 377 
 378 static void __active_ungrab(struct i915_active *ref)
 379 {
 380         clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
 381 }
 382 
 383 bool i915_active_trygrab(struct i915_active *ref)
 384 {
 385         debug_active_assert(ref);
 386 
 387         if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
 388                 return false;
 389 
 390         if (!atomic_add_unless(&ref->count, 1, 0)) {
 391                 __active_ungrab(ref);
 392                 return false;
 393         }
 394 
 395         return true;
 396 }
 397 
 398 void i915_active_ungrab(struct i915_active *ref)
 399 {
 400         GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
 401 
 402         active_retire(ref, true);
 403         __active_ungrab(ref);
 404 }
 405 
 406 int i915_active_wait(struct i915_active *ref)
 407 {
 408         struct active_node *it, *n;
 409         int err;
 410 
 411         might_sleep();
 412         might_lock(&ref->mutex);
 413 
 414         if (i915_active_is_idle(ref))
 415                 return 0;
 416 
 417         err = mutex_lock_interruptible(&ref->mutex);
 418         if (err)
 419                 return err;
 420 
 421         if (!atomic_add_unless(&ref->count, 1, 0)) {
 422                 mutex_unlock(&ref->mutex);
 423                 return 0;
 424         }
 425 
 426         rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 427                 if (is_barrier(&it->base)) { /* unconnected idle-barrier */
 428                         err = -EBUSY;
 429                         break;
 430                 }
 431 
 432                 err = i915_active_request_retire(&it->base, BKL(ref),
 433                                                  node_retire_nolock);
 434                 if (err)
 435                         break;
 436         }
 437 
 438         __active_retire(ref, true);
 439         if (err)
 440                 return err;
 441 
 442         if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
 443                 return -EINTR;
 444 
 445         if (!i915_active_is_idle(ref))
 446                 return -EBUSY;
 447 
 448         return 0;
 449 }
 450 
 451 int i915_request_await_active_request(struct i915_request *rq,
 452                                       struct i915_active_request *active)
 453 {
 454         struct i915_request *barrier =
 455                 i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
 456 
 457         return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
 458 }
 459 
 460 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
 461 {
 462         struct active_node *it, *n;
 463         int err;
 464 
 465         if (RB_EMPTY_ROOT(&ref->tree))
 466                 return 0;
 467 
 468         /* await allocates and so we need to avoid hitting the shrinker */
 469         err = i915_active_acquire(ref);
 470         if (err)
 471                 return err;
 472 
 473         mutex_lock(&ref->mutex);
 474         rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
 475                 err = i915_request_await_active_request(rq, &it->base);
 476                 if (err)
 477                         break;
 478         }
 479         mutex_unlock(&ref->mutex);
 480 
 481         i915_active_release(ref);
 482         return err;
 483 }
 484 
 485 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 486 void i915_active_fini(struct i915_active *ref)
 487 {
 488         debug_active_fini(ref);
 489         GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
 490         GEM_BUG_ON(atomic_read(&ref->count));
 491         mutex_destroy(&ref->mutex);
 492 }
 493 #endif
 494 
 495 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
 496 {
 497         return node->timeline == idx && !i915_active_request_isset(&node->base);
 498 }
 499 
 500 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
 501 {
 502         struct rb_node *prev, *p;
 503 
 504         if (RB_EMPTY_ROOT(&ref->tree))
 505                 return NULL;
 506 
 507         mutex_lock(&ref->mutex);
 508         GEM_BUG_ON(i915_active_is_idle(ref));
 509 
 510         /*
 511          * Try to reuse any existing barrier nodes already allocated for this
 512          * i915_active, due to overlapping active phases there is likely a
 513          * node kept alive (as we reuse before parking). We prefer to reuse
 514          * completely idle barriers (less hassle in manipulating the llists),
 515          * but otherwise any will do.
 516          */
 517         if (ref->cache && is_idle_barrier(ref->cache, idx)) {
 518                 p = &ref->cache->node;
 519                 goto match;
 520         }
 521 
 522         prev = NULL;
 523         p = ref->tree.rb_node;
 524         while (p) {
 525                 struct active_node *node =
 526                         rb_entry(p, struct active_node, node);
 527 
 528                 if (is_idle_barrier(node, idx))
 529                         goto match;
 530 
 531                 prev = p;
 532                 if (node->timeline < idx)
 533                         p = p->rb_right;
 534                 else
 535                         p = p->rb_left;
 536         }
 537 
 538         /*
 539          * No quick match, but we did find the leftmost rb_node for the
 540          * kernel_context. Walk the rb_tree in-order to see if there were
 541          * any idle-barriers on this timeline that we missed, or just use
 542          * the first pending barrier.
 543          */
 544         for (p = prev; p; p = rb_next(p)) {
 545                 struct active_node *node =
 546                         rb_entry(p, struct active_node, node);
 547                 struct intel_engine_cs *engine;
 548 
 549                 if (node->timeline > idx)
 550                         break;
 551 
 552                 if (node->timeline < idx)
 553                         continue;
 554 
 555                 if (is_idle_barrier(node, idx))
 556                         goto match;
 557 
 558                 /*
 559                  * The list of pending barriers is protected by the
 560                  * kernel_context timeline, which notably we do not hold
 561                  * here. i915_request_add_active_barriers() may consume
 562                  * the barrier before we claim it, so we have to check
 563                  * for success.
 564                  */
 565                 engine = __barrier_to_engine(node);
 566                 smp_rmb(); /* serialise with add_active_barriers */
 567                 if (is_barrier(&node->base) &&
 568                     ____active_del_barrier(ref, node, engine))
 569                         goto match;
 570         }
 571 
 572         mutex_unlock(&ref->mutex);
 573 
 574         return NULL;
 575 
 576 match:
 577         rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
 578         if (p == &ref->cache->node)
 579                 ref->cache = NULL;
 580         mutex_unlock(&ref->mutex);
 581 
 582         return rb_entry(p, struct active_node, node);
 583 }
 584 
 585 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
 586                                             struct intel_engine_cs *engine)
 587 {
 588         struct drm_i915_private *i915 = engine->i915;
 589         intel_engine_mask_t tmp, mask = engine->mask;
 590         struct llist_node *pos, *next;
 591         int err;
 592 
 593         GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
 594 
 595         /*
 596          * Preallocate a node for each physical engine supporting the target
 597          * engine (remember virtual engines have more than one sibling).
 598          * We can then use the preallocated nodes in
 599          * i915_active_acquire_barrier()
 600          */
 601         for_each_engine_masked(engine, i915, mask, tmp) {
 602                 u64 idx = engine->kernel_context->timeline->fence_context;
 603                 struct active_node *node;
 604 
 605                 node = reuse_idle_barrier(ref, idx);
 606                 if (!node) {
 607                         node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
 608                         if (!node) {
 609                                 err = ENOMEM;
 610                                 goto unwind;
 611                         }
 612 
 613 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 614                         node->base.lock =
 615                                 &engine->kernel_context->timeline->mutex;
 616 #endif
 617                         RCU_INIT_POINTER(node->base.request, NULL);
 618                         node->base.retire = node_retire;
 619                         node->timeline = idx;
 620                         node->ref = ref;
 621                 }
 622 
 623                 if (!i915_active_request_isset(&node->base)) {
 624                         /*
 625                          * Mark this as being *our* unconnected proto-node.
 626                          *
 627                          * Since this node is not in any list, and we have
 628                          * decoupled it from the rbtree, we can reuse the
 629                          * request to indicate this is an idle-barrier node
 630                          * and then we can use the rb_node and list pointers
 631                          * for our tracking of the pending barrier.
 632                          */
 633                         RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
 634                         node->base.link.prev = (void *)engine;
 635                         atomic_inc(&ref->count);
 636                 }
 637 
 638                 GEM_BUG_ON(barrier_to_engine(node) != engine);
 639                 llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
 640                 intel_engine_pm_get(engine);
 641         }
 642 
 643         return 0;
 644 
 645 unwind:
 646         llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
 647                 struct active_node *node = barrier_from_ll(pos);
 648 
 649                 atomic_dec(&ref->count);
 650                 intel_engine_pm_put(barrier_to_engine(node));
 651 
 652                 kmem_cache_free(global.slab_cache, node);
 653         }
 654         return err;
 655 }
 656 
 657 void i915_active_acquire_barrier(struct i915_active *ref)
 658 {
 659         struct llist_node *pos, *next;
 660 
 661         GEM_BUG_ON(i915_active_is_idle(ref));
 662 
 663         /*
 664          * Transfer the list of preallocated barriers into the
 665          * i915_active rbtree, but only as proto-nodes. They will be
 666          * populated by i915_request_add_active_barriers() to point to the
 667          * request that will eventually release them.
 668          */
 669         mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
 670         llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
 671                 struct active_node *node = barrier_from_ll(pos);
 672                 struct intel_engine_cs *engine = barrier_to_engine(node);
 673                 struct rb_node **p, *parent;
 674 
 675                 parent = NULL;
 676                 p = &ref->tree.rb_node;
 677                 while (*p) {
 678                         struct active_node *it;
 679 
 680                         parent = *p;
 681 
 682                         it = rb_entry(parent, struct active_node, node);
 683                         if (it->timeline < node->timeline)
 684                                 p = &parent->rb_right;
 685                         else
 686                                 p = &parent->rb_left;
 687                 }
 688                 rb_link_node(&node->node, parent, p);
 689                 rb_insert_color(&node->node, &ref->tree);
 690 
 691                 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
 692                 intel_engine_pm_put(engine);
 693         }
 694         mutex_unlock(&ref->mutex);
 695 }
 696 
 697 void i915_request_add_active_barriers(struct i915_request *rq)
 698 {
 699         struct intel_engine_cs *engine = rq->engine;
 700         struct llist_node *node, *next;
 701 
 702         GEM_BUG_ON(intel_engine_is_virtual(engine));
 703         GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
 704 
 705         /*
 706          * Attach the list of proto-fences to the in-flight request such
 707          * that the parent i915_active will be released when this request
 708          * is retired.
 709          */
 710         llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
 711                 RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
 712                 smp_wmb(); /* serialise with reuse_idle_barrier */
 713                 list_add_tail((struct list_head *)node, &rq->active_list);
 714         }
 715 }
 716 
 717 int i915_active_request_set(struct i915_active_request *active,
 718                             struct i915_request *rq)
 719 {
 720         int err;
 721 
 722 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
 723         lockdep_assert_held(active->lock);
 724 #endif
 725 
 726         /* Must maintain ordering wrt previous active requests */
 727         err = i915_request_await_active_request(rq, active);
 728         if (err)
 729                 return err;
 730 
 731         __i915_active_request_set(active, rq);
 732         return 0;
 733 }
 734 
 735 void i915_active_retire_noop(struct i915_active_request *active,
 736                              struct i915_request *request)
 737 {
 738         /* Space left intentionally blank */
 739 }
 740 
 741 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 742 #include "selftests/i915_active.c"
 743 #endif
 744 
 745 static void i915_global_active_shrink(void)
 746 {
 747         kmem_cache_shrink(global.slab_cache);
 748 }
 749 
 750 static void i915_global_active_exit(void)
 751 {
 752         kmem_cache_destroy(global.slab_cache);
 753 }
 754 
 755 static struct i915_global_active global = { {
 756         .shrink = i915_global_active_shrink,
 757         .exit = i915_global_active_exit,
 758 } };
 759 
 760 int __init i915_global_active_init(void)
 761 {
 762         global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
 763         if (!global.slab_cache)
 764                 return -ENOMEM;
 765 
 766         i915_global_register(&global.base);
 767         return 0;
 768 }

/* [<][>][^][v][top][bottom][index][help] */