root/drivers/gpu/drm/i915/i915_scheduler.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. node_to_request
  2. node_started
  3. node_signaled
  4. to_priolist
  5. assert_priolists
  6. i915_sched_lookup_priolist
  7. __i915_priolist_free
  8. sched_lock_engine
  9. rq_prio
  10. need_preempt
  11. kick_submission
  12. __i915_schedule
  13. i915_schedule
  14. __bump_priority
  15. i915_schedule_bump_priority
  16. i915_sched_node_init
  17. i915_dependency_alloc
  18. i915_dependency_free
  19. __i915_sched_node_add_dependency
  20. i915_sched_node_add_dependency
  21. i915_sched_node_fini
  22. i915_global_scheduler_shrink
  23. i915_global_scheduler_exit
  24. i915_global_scheduler_init

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2018 Intel Corporation
   5  */
   6 
   7 #include <linux/mutex.h>
   8 
   9 #include "i915_drv.h"
  10 #include "i915_globals.h"
  11 #include "i915_request.h"
  12 #include "i915_scheduler.h"
  13 
  14 static struct i915_global_scheduler {
  15         struct i915_global base;
  16         struct kmem_cache *slab_dependencies;
  17         struct kmem_cache *slab_priorities;
  18 } global;
  19 
  20 static DEFINE_SPINLOCK(schedule_lock);
  21 
  22 static const struct i915_request *
  23 node_to_request(const struct i915_sched_node *node)
  24 {
  25         return container_of(node, const struct i915_request, sched);
  26 }
  27 
  28 static inline bool node_started(const struct i915_sched_node *node)
  29 {
  30         return i915_request_started(node_to_request(node));
  31 }
  32 
  33 static inline bool node_signaled(const struct i915_sched_node *node)
  34 {
  35         return i915_request_completed(node_to_request(node));
  36 }
  37 
  38 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
  39 {
  40         return rb_entry(rb, struct i915_priolist, node);
  41 }
  42 
  43 static void assert_priolists(struct intel_engine_execlists * const execlists)
  44 {
  45         struct rb_node *rb;
  46         long last_prio, i;
  47 
  48         if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
  49                 return;
  50 
  51         GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
  52                    rb_first(&execlists->queue.rb_root));
  53 
  54         last_prio = (INT_MAX >> I915_USER_PRIORITY_SHIFT) + 1;
  55         for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
  56                 const struct i915_priolist *p = to_priolist(rb);
  57 
  58                 GEM_BUG_ON(p->priority >= last_prio);
  59                 last_prio = p->priority;
  60 
  61                 GEM_BUG_ON(!p->used);
  62                 for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
  63                         if (list_empty(&p->requests[i]))
  64                                 continue;
  65 
  66                         GEM_BUG_ON(!(p->used & BIT(i)));
  67                 }
  68         }
  69 }
  70 
  71 struct list_head *
  72 i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
  73 {
  74         struct intel_engine_execlists * const execlists = &engine->execlists;
  75         struct i915_priolist *p;
  76         struct rb_node **parent, *rb;
  77         bool first = true;
  78         int idx, i;
  79 
  80         lockdep_assert_held(&engine->active.lock);
  81         assert_priolists(execlists);
  82 
  83         /* buckets sorted from highest [in slot 0] to lowest priority */
  84         idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
  85         prio >>= I915_USER_PRIORITY_SHIFT;
  86         if (unlikely(execlists->no_priolist))
  87                 prio = I915_PRIORITY_NORMAL;
  88 
  89 find_priolist:
  90         /* most positive priority is scheduled first, equal priorities fifo */
  91         rb = NULL;
  92         parent = &execlists->queue.rb_root.rb_node;
  93         while (*parent) {
  94                 rb = *parent;
  95                 p = to_priolist(rb);
  96                 if (prio > p->priority) {
  97                         parent = &rb->rb_left;
  98                 } else if (prio < p->priority) {
  99                         parent = &rb->rb_right;
 100                         first = false;
 101                 } else {
 102                         goto out;
 103                 }
 104         }
 105 
 106         if (prio == I915_PRIORITY_NORMAL) {
 107                 p = &execlists->default_priolist;
 108         } else {
 109                 p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
 110                 /* Convert an allocation failure to a priority bump */
 111                 if (unlikely(!p)) {
 112                         prio = I915_PRIORITY_NORMAL; /* recurses just once */
 113 
 114                         /* To maintain ordering with all rendering, after an
 115                          * allocation failure we have to disable all scheduling.
 116                          * Requests will then be executed in fifo, and schedule
 117                          * will ensure that dependencies are emitted in fifo.
 118                          * There will be still some reordering with existing
 119                          * requests, so if userspace lied about their
 120                          * dependencies that reordering may be visible.
 121                          */
 122                         execlists->no_priolist = true;
 123                         goto find_priolist;
 124                 }
 125         }
 126 
 127         p->priority = prio;
 128         for (i = 0; i < ARRAY_SIZE(p->requests); i++)
 129                 INIT_LIST_HEAD(&p->requests[i]);
 130         rb_link_node(&p->node, rb, parent);
 131         rb_insert_color_cached(&p->node, &execlists->queue, first);
 132         p->used = 0;
 133 
 134 out:
 135         p->used |= BIT(idx);
 136         return &p->requests[idx];
 137 }
 138 
 139 void __i915_priolist_free(struct i915_priolist *p)
 140 {
 141         kmem_cache_free(global.slab_priorities, p);
 142 }
 143 
 144 struct sched_cache {
 145         struct list_head *priolist;
 146 };
 147 
 148 static struct intel_engine_cs *
 149 sched_lock_engine(const struct i915_sched_node *node,
 150                   struct intel_engine_cs *locked,
 151                   struct sched_cache *cache)
 152 {
 153         const struct i915_request *rq = node_to_request(node);
 154         struct intel_engine_cs *engine;
 155 
 156         GEM_BUG_ON(!locked);
 157 
 158         /*
 159          * Virtual engines complicate acquiring the engine timeline lock,
 160          * as their rq->engine pointer is not stable until under that
 161          * engine lock. The simple ploy we use is to take the lock then
 162          * check that the rq still belongs to the newly locked engine.
 163          */
 164         while (locked != (engine = READ_ONCE(rq->engine))) {
 165                 spin_unlock(&locked->active.lock);
 166                 memset(cache, 0, sizeof(*cache));
 167                 spin_lock(&engine->active.lock);
 168                 locked = engine;
 169         }
 170 
 171         GEM_BUG_ON(locked != engine);
 172         return locked;
 173 }
 174 
 175 static inline int rq_prio(const struct i915_request *rq)
 176 {
 177         return rq->sched.attr.priority | __NO_PREEMPTION;
 178 }
 179 
 180 static inline bool need_preempt(int prio, int active)
 181 {
 182         /*
 183          * Allow preemption of low -> normal -> high, but we do
 184          * not allow low priority tasks to preempt other low priority
 185          * tasks under the impression that latency for low priority
 186          * tasks does not matter (as much as background throughput),
 187          * so kiss.
 188          */
 189         return prio >= max(I915_PRIORITY_NORMAL, active);
 190 }
 191 
 192 static void kick_submission(struct intel_engine_cs *engine,
 193                             const struct i915_request *rq,
 194                             int prio)
 195 {
 196         const struct i915_request *inflight;
 197 
 198         /*
 199          * We only need to kick the tasklet once for the high priority
 200          * new context we add into the queue.
 201          */
 202         if (prio <= engine->execlists.queue_priority_hint)
 203                 return;
 204 
 205         rcu_read_lock();
 206 
 207         /* Nothing currently active? We're overdue for a submission! */
 208         inflight = execlists_active(&engine->execlists);
 209         if (!inflight)
 210                 goto unlock;
 211 
 212         /*
 213          * If we are already the currently executing context, don't
 214          * bother evaluating if we should preempt ourselves, or if
 215          * we expect nothing to change as a result of running the
 216          * tasklet, i.e. we have not change the priority queue
 217          * sufficiently to oust the running context.
 218          */
 219         if (inflight->hw_context == rq->hw_context)
 220                 goto unlock;
 221 
 222         engine->execlists.queue_priority_hint = prio;
 223         if (need_preempt(prio, rq_prio(inflight)))
 224                 tasklet_hi_schedule(&engine->execlists.tasklet);
 225 
 226 unlock:
 227         rcu_read_unlock();
 228 }
 229 
 230 static void __i915_schedule(struct i915_sched_node *node,
 231                             const struct i915_sched_attr *attr)
 232 {
 233         struct intel_engine_cs *engine;
 234         struct i915_dependency *dep, *p;
 235         struct i915_dependency stack;
 236         const int prio = attr->priority;
 237         struct sched_cache cache;
 238         LIST_HEAD(dfs);
 239 
 240         /* Needed in order to use the temporary link inside i915_dependency */
 241         lockdep_assert_held(&schedule_lock);
 242         GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
 243 
 244         if (prio <= READ_ONCE(node->attr.priority))
 245                 return;
 246 
 247         if (node_signaled(node))
 248                 return;
 249 
 250         stack.signaler = node;
 251         list_add(&stack.dfs_link, &dfs);
 252 
 253         /*
 254          * Recursively bump all dependent priorities to match the new request.
 255          *
 256          * A naive approach would be to use recursion:
 257          * static void update_priorities(struct i915_sched_node *node, prio) {
 258          *      list_for_each_entry(dep, &node->signalers_list, signal_link)
 259          *              update_priorities(dep->signal, prio)
 260          *      queue_request(node);
 261          * }
 262          * but that may have unlimited recursion depth and so runs a very
 263          * real risk of overunning the kernel stack. Instead, we build
 264          * a flat list of all dependencies starting with the current request.
 265          * As we walk the list of dependencies, we add all of its dependencies
 266          * to the end of the list (this may include an already visited
 267          * request) and continue to walk onwards onto the new dependencies. The
 268          * end result is a topological list of requests in reverse order, the
 269          * last element in the list is the request we must execute first.
 270          */
 271         list_for_each_entry(dep, &dfs, dfs_link) {
 272                 struct i915_sched_node *node = dep->signaler;
 273 
 274                 /* If we are already flying, we know we have no signalers */
 275                 if (node_started(node))
 276                         continue;
 277 
 278                 /*
 279                  * Within an engine, there can be no cycle, but we may
 280                  * refer to the same dependency chain multiple times
 281                  * (redundant dependencies are not eliminated) and across
 282                  * engines.
 283                  */
 284                 list_for_each_entry(p, &node->signalers_list, signal_link) {
 285                         GEM_BUG_ON(p == dep); /* no cycles! */
 286 
 287                         if (node_signaled(p->signaler))
 288                                 continue;
 289 
 290                         if (prio > READ_ONCE(p->signaler->attr.priority))
 291                                 list_move_tail(&p->dfs_link, &dfs);
 292                 }
 293         }
 294 
 295         /*
 296          * If we didn't need to bump any existing priorities, and we haven't
 297          * yet submitted this request (i.e. there is no potential race with
 298          * execlists_submit_request()), we can set our own priority and skip
 299          * acquiring the engine locks.
 300          */
 301         if (node->attr.priority == I915_PRIORITY_INVALID) {
 302                 GEM_BUG_ON(!list_empty(&node->link));
 303                 node->attr = *attr;
 304 
 305                 if (stack.dfs_link.next == stack.dfs_link.prev)
 306                         return;
 307 
 308                 __list_del_entry(&stack.dfs_link);
 309         }
 310 
 311         memset(&cache, 0, sizeof(cache));
 312         engine = node_to_request(node)->engine;
 313         spin_lock(&engine->active.lock);
 314 
 315         /* Fifo and depth-first replacement ensure our deps execute before us */
 316         engine = sched_lock_engine(node, engine, &cache);
 317         list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
 318                 INIT_LIST_HEAD(&dep->dfs_link);
 319 
 320                 node = dep->signaler;
 321                 engine = sched_lock_engine(node, engine, &cache);
 322                 lockdep_assert_held(&engine->active.lock);
 323 
 324                 /* Recheck after acquiring the engine->timeline.lock */
 325                 if (prio <= node->attr.priority || node_signaled(node))
 326                         continue;
 327 
 328                 GEM_BUG_ON(node_to_request(node)->engine != engine);
 329 
 330                 node->attr.priority = prio;
 331 
 332                 if (list_empty(&node->link)) {
 333                         /*
 334                          * If the request is not in the priolist queue because
 335                          * it is not yet runnable, then it doesn't contribute
 336                          * to our preemption decisions. On the other hand,
 337                          * if the request is on the HW, it too is not in the
 338                          * queue; but in that case we may still need to reorder
 339                          * the inflight requests.
 340                          */
 341                         continue;
 342                 }
 343 
 344                 if (!intel_engine_is_virtual(engine) &&
 345                     !i915_request_is_active(node_to_request(node))) {
 346                         if (!cache.priolist)
 347                                 cache.priolist =
 348                                         i915_sched_lookup_priolist(engine,
 349                                                                    prio);
 350                         list_move_tail(&node->link, cache.priolist);
 351                 }
 352 
 353                 /* Defer (tasklet) submission until after all of our updates. */
 354                 kick_submission(engine, node_to_request(node), prio);
 355         }
 356 
 357         spin_unlock(&engine->active.lock);
 358 }
 359 
 360 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
 361 {
 362         spin_lock_irq(&schedule_lock);
 363         __i915_schedule(&rq->sched, attr);
 364         spin_unlock_irq(&schedule_lock);
 365 }
 366 
 367 static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
 368 {
 369         struct i915_sched_attr attr = node->attr;
 370 
 371         attr.priority |= bump;
 372         __i915_schedule(node, &attr);
 373 }
 374 
 375 void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
 376 {
 377         unsigned long flags;
 378 
 379         GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
 380         if (READ_ONCE(rq->sched.attr.priority) & bump)
 381                 return;
 382 
 383         spin_lock_irqsave(&schedule_lock, flags);
 384         __bump_priority(&rq->sched, bump);
 385         spin_unlock_irqrestore(&schedule_lock, flags);
 386 }
 387 
 388 void i915_sched_node_init(struct i915_sched_node *node)
 389 {
 390         INIT_LIST_HEAD(&node->signalers_list);
 391         INIT_LIST_HEAD(&node->waiters_list);
 392         INIT_LIST_HEAD(&node->link);
 393         node->attr.priority = I915_PRIORITY_INVALID;
 394         node->semaphores = 0;
 395         node->flags = 0;
 396 }
 397 
 398 static struct i915_dependency *
 399 i915_dependency_alloc(void)
 400 {
 401         return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
 402 }
 403 
 404 static void
 405 i915_dependency_free(struct i915_dependency *dep)
 406 {
 407         kmem_cache_free(global.slab_dependencies, dep);
 408 }
 409 
 410 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
 411                                       struct i915_sched_node *signal,
 412                                       struct i915_dependency *dep,
 413                                       unsigned long flags)
 414 {
 415         bool ret = false;
 416 
 417         spin_lock_irq(&schedule_lock);
 418 
 419         if (!node_signaled(signal)) {
 420                 INIT_LIST_HEAD(&dep->dfs_link);
 421                 dep->signaler = signal;
 422                 dep->waiter = node;
 423                 dep->flags = flags;
 424 
 425                 /* Keep track of whether anyone on this chain has a semaphore */
 426                 if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
 427                     !node_started(signal))
 428                         node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
 429 
 430                 /* All set, now publish. Beware the lockless walkers. */
 431                 list_add(&dep->signal_link, &node->signalers_list);
 432                 list_add_rcu(&dep->wait_link, &signal->waiters_list);
 433 
 434                 /*
 435                  * As we do not allow WAIT to preempt inflight requests,
 436                  * once we have executed a request, along with triggering
 437                  * any execution callbacks, we must preserve its ordering
 438                  * within the non-preemptible FIFO.
 439                  */
 440                 BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK);
 441                 if (flags & I915_DEPENDENCY_EXTERNAL)
 442                         __bump_priority(signal, __NO_PREEMPTION);
 443 
 444                 ret = true;
 445         }
 446 
 447         spin_unlock_irq(&schedule_lock);
 448 
 449         return ret;
 450 }
 451 
 452 int i915_sched_node_add_dependency(struct i915_sched_node *node,
 453                                    struct i915_sched_node *signal)
 454 {
 455         struct i915_dependency *dep;
 456 
 457         dep = i915_dependency_alloc();
 458         if (!dep)
 459                 return -ENOMEM;
 460 
 461         if (!__i915_sched_node_add_dependency(node, signal, dep,
 462                                               I915_DEPENDENCY_EXTERNAL |
 463                                               I915_DEPENDENCY_ALLOC))
 464                 i915_dependency_free(dep);
 465 
 466         return 0;
 467 }
 468 
 469 void i915_sched_node_fini(struct i915_sched_node *node)
 470 {
 471         struct i915_dependency *dep, *tmp;
 472 
 473         spin_lock_irq(&schedule_lock);
 474 
 475         /*
 476          * Everyone we depended upon (the fences we wait to be signaled)
 477          * should retire before us and remove themselves from our list.
 478          * However, retirement is run independently on each timeline and
 479          * so we may be called out-of-order.
 480          */
 481         list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
 482                 GEM_BUG_ON(!node_signaled(dep->signaler));
 483                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
 484 
 485                 list_del(&dep->wait_link);
 486                 if (dep->flags & I915_DEPENDENCY_ALLOC)
 487                         i915_dependency_free(dep);
 488         }
 489 
 490         /* Remove ourselves from everyone who depends upon us */
 491         list_for_each_entry_safe(dep, tmp, &node->waiters_list, wait_link) {
 492                 GEM_BUG_ON(dep->signaler != node);
 493                 GEM_BUG_ON(!list_empty(&dep->dfs_link));
 494 
 495                 list_del(&dep->signal_link);
 496                 if (dep->flags & I915_DEPENDENCY_ALLOC)
 497                         i915_dependency_free(dep);
 498         }
 499 
 500         spin_unlock_irq(&schedule_lock);
 501 }
 502 
 503 static void i915_global_scheduler_shrink(void)
 504 {
 505         kmem_cache_shrink(global.slab_dependencies);
 506         kmem_cache_shrink(global.slab_priorities);
 507 }
 508 
 509 static void i915_global_scheduler_exit(void)
 510 {
 511         kmem_cache_destroy(global.slab_dependencies);
 512         kmem_cache_destroy(global.slab_priorities);
 513 }
 514 
 515 static struct i915_global_scheduler global = { {
 516         .shrink = i915_global_scheduler_shrink,
 517         .exit = i915_global_scheduler_exit,
 518 } };
 519 
 520 int __init i915_global_scheduler_init(void)
 521 {
 522         global.slab_dependencies = KMEM_CACHE(i915_dependency,
 523                                               SLAB_HWCACHE_ALIGN);
 524         if (!global.slab_dependencies)
 525                 return -ENOMEM;
 526 
 527         global.slab_priorities = KMEM_CACHE(i915_priolist,
 528                                             SLAB_HWCACHE_ALIGN);
 529         if (!global.slab_priorities)
 530                 goto err_priorities;
 531 
 532         i915_global_register(&global.base);
 533         return 0;
 534 
 535 err_priorities:
 536         kmem_cache_destroy(global.slab_priorities);
 537         return -ENOMEM;
 538 }

/* [<][>][^][v][top][bottom][index][help] */