root/drivers/gpu/drm/scheduler/sched_main.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. drm_sched_rq_init
  2. drm_sched_rq_add_entity
  3. drm_sched_rq_remove_entity
  4. drm_sched_rq_select_entity
  5. drm_sched_dependency_optimized
  6. drm_sched_start_timeout
  7. drm_sched_fault
  8. drm_sched_suspend_timeout
  9. drm_sched_resume_timeout
  10. drm_sched_job_begin
  11. drm_sched_job_timedout
  12. drm_sched_increase_karma
  13. drm_sched_stop
  14. drm_sched_start
  15. drm_sched_resubmit_jobs
  16. drm_sched_job_init
  17. drm_sched_job_cleanup
  18. drm_sched_ready
  19. drm_sched_wakeup
  20. drm_sched_select_entity
  21. drm_sched_process_job
  22. drm_sched_get_cleanup_job
  23. drm_sched_blocked
  24. drm_sched_main
  25. drm_sched_init
  26. drm_sched_fini

   1 /*
   2  * Copyright 2015 Advanced Micro Devices, Inc.
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice shall be included in
  12  * all copies or substantial portions of the Software.
  13  *
  14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20  * OTHER DEALINGS IN THE SOFTWARE.
  21  *
  22  */
  23 
  24 /**
  25  * DOC: Overview
  26  *
  27  * The GPU scheduler provides entities which allow userspace to push jobs
  28  * into software queues which are then scheduled on a hardware run queue.
  29  * The software queues have a priority among them. The scheduler selects the entities
  30  * from the run queue using a FIFO. The scheduler provides dependency handling
  31  * features among jobs. The driver is supposed to provide callback functions for
  32  * backend operations to the scheduler like submitting a job to hardware run queue,
  33  * returning the dependencies of a job etc.
  34  *
  35  * The organisation of the scheduler is the following:
  36  *
  37  * 1. Each hw run queue has one scheduler
  38  * 2. Each scheduler has multiple run queues with different priorities
  39  *    (e.g., HIGH_HW,HIGH_SW, KERNEL, NORMAL)
  40  * 3. Each scheduler run queue has a queue of entities to schedule
  41  * 4. Entities themselves maintain a queue of jobs that will be scheduled on
  42  *    the hardware.
  43  *
  44  * The jobs in a entity are always scheduled in the order that they were pushed.
  45  */
  46 
  47 #include <linux/kthread.h>
  48 #include <linux/wait.h>
  49 #include <linux/sched.h>
  50 #include <uapi/linux/sched/types.h>
  51 
  52 #include <drm/drm_print.h>
  53 #include <drm/gpu_scheduler.h>
  54 #include <drm/spsc_queue.h>
  55 
  56 #define CREATE_TRACE_POINTS
  57 #include "gpu_scheduler_trace.h"
  58 
  59 #define to_drm_sched_job(sched_job)             \
  60                 container_of((sched_job), struct drm_sched_job, queue_node)
  61 
  62 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb);
  63 
  64 /**
  65  * drm_sched_rq_init - initialize a given run queue struct
  66  *
  67  * @rq: scheduler run queue
  68  *
  69  * Initializes a scheduler runqueue.
  70  */
  71 static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
  72                               struct drm_sched_rq *rq)
  73 {
  74         spin_lock_init(&rq->lock);
  75         INIT_LIST_HEAD(&rq->entities);
  76         rq->current_entity = NULL;
  77         rq->sched = sched;
  78 }
  79 
  80 /**
  81  * drm_sched_rq_add_entity - add an entity
  82  *
  83  * @rq: scheduler run queue
  84  * @entity: scheduler entity
  85  *
  86  * Adds a scheduler entity to the run queue.
  87  */
  88 void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
  89                              struct drm_sched_entity *entity)
  90 {
  91         if (!list_empty(&entity->list))
  92                 return;
  93         spin_lock(&rq->lock);
  94         list_add_tail(&entity->list, &rq->entities);
  95         spin_unlock(&rq->lock);
  96 }
  97 
  98 /**
  99  * drm_sched_rq_remove_entity - remove an entity
 100  *
 101  * @rq: scheduler run queue
 102  * @entity: scheduler entity
 103  *
 104  * Removes a scheduler entity from the run queue.
 105  */
 106 void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 107                                 struct drm_sched_entity *entity)
 108 {
 109         if (list_empty(&entity->list))
 110                 return;
 111         spin_lock(&rq->lock);
 112         list_del_init(&entity->list);
 113         if (rq->current_entity == entity)
 114                 rq->current_entity = NULL;
 115         spin_unlock(&rq->lock);
 116 }
 117 
 118 /**
 119  * drm_sched_rq_select_entity - Select an entity which could provide a job to run
 120  *
 121  * @rq: scheduler run queue to check.
 122  *
 123  * Try to find a ready entity, returns NULL if none found.
 124  */
 125 static struct drm_sched_entity *
 126 drm_sched_rq_select_entity(struct drm_sched_rq *rq)
 127 {
 128         struct drm_sched_entity *entity;
 129 
 130         spin_lock(&rq->lock);
 131 
 132         entity = rq->current_entity;
 133         if (entity) {
 134                 list_for_each_entry_continue(entity, &rq->entities, list) {
 135                         if (drm_sched_entity_is_ready(entity)) {
 136                                 rq->current_entity = entity;
 137                                 spin_unlock(&rq->lock);
 138                                 return entity;
 139                         }
 140                 }
 141         }
 142 
 143         list_for_each_entry(entity, &rq->entities, list) {
 144 
 145                 if (drm_sched_entity_is_ready(entity)) {
 146                         rq->current_entity = entity;
 147                         spin_unlock(&rq->lock);
 148                         return entity;
 149                 }
 150 
 151                 if (entity == rq->current_entity)
 152                         break;
 153         }
 154 
 155         spin_unlock(&rq->lock);
 156 
 157         return NULL;
 158 }
 159 
 160 /**
 161  * drm_sched_dependency_optimized
 162  *
 163  * @fence: the dependency fence
 164  * @entity: the entity which depends on the above fence
 165  *
 166  * Returns true if the dependency can be optimized and false otherwise
 167  */
 168 bool drm_sched_dependency_optimized(struct dma_fence* fence,
 169                                     struct drm_sched_entity *entity)
 170 {
 171         struct drm_gpu_scheduler *sched = entity->rq->sched;
 172         struct drm_sched_fence *s_fence;
 173 
 174         if (!fence || dma_fence_is_signaled(fence))
 175                 return false;
 176         if (fence->context == entity->fence_context)
 177                 return true;
 178         s_fence = to_drm_sched_fence(fence);
 179         if (s_fence && s_fence->sched == sched)
 180                 return true;
 181 
 182         return false;
 183 }
 184 EXPORT_SYMBOL(drm_sched_dependency_optimized);
 185 
 186 /**
 187  * drm_sched_start_timeout - start timeout for reset worker
 188  *
 189  * @sched: scheduler instance to start the worker for
 190  *
 191  * Start the timeout for the given scheduler.
 192  */
 193 static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched)
 194 {
 195         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
 196             !list_empty(&sched->ring_mirror_list))
 197                 schedule_delayed_work(&sched->work_tdr, sched->timeout);
 198 }
 199 
 200 /**
 201  * drm_sched_fault - immediately start timeout handler
 202  *
 203  * @sched: scheduler where the timeout handling should be started.
 204  *
 205  * Start timeout handling immediately when the driver detects a hardware fault.
 206  */
 207 void drm_sched_fault(struct drm_gpu_scheduler *sched)
 208 {
 209         mod_delayed_work(system_wq, &sched->work_tdr, 0);
 210 }
 211 EXPORT_SYMBOL(drm_sched_fault);
 212 
 213 /**
 214  * drm_sched_suspend_timeout - Suspend scheduler job timeout
 215  *
 216  * @sched: scheduler instance for which to suspend the timeout
 217  *
 218  * Suspend the delayed work timeout for the scheduler. This is done by
 219  * modifying the delayed work timeout to an arbitrary large value,
 220  * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
 221  * called from an IRQ context.
 222  *
 223  * Returns the timeout remaining
 224  *
 225  */
 226 unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched)
 227 {
 228         unsigned long sched_timeout, now = jiffies;
 229 
 230         sched_timeout = sched->work_tdr.timer.expires;
 231 
 232         /*
 233          * Modify the timeout to an arbitrarily large value. This also prevents
 234          * the timeout to be restarted when new submissions arrive
 235          */
 236         if (mod_delayed_work(system_wq, &sched->work_tdr, MAX_SCHEDULE_TIMEOUT)
 237                         && time_after(sched_timeout, now))
 238                 return sched_timeout - now;
 239         else
 240                 return sched->timeout;
 241 }
 242 EXPORT_SYMBOL(drm_sched_suspend_timeout);
 243 
 244 /**
 245  * drm_sched_resume_timeout - Resume scheduler job timeout
 246  *
 247  * @sched: scheduler instance for which to resume the timeout
 248  * @remaining: remaining timeout
 249  *
 250  * Resume the delayed work timeout for the scheduler. Note that
 251  * this function can be called from an IRQ context.
 252  */
 253 void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 254                 unsigned long remaining)
 255 {
 256         unsigned long flags;
 257 
 258         spin_lock_irqsave(&sched->job_list_lock, flags);
 259 
 260         if (list_empty(&sched->ring_mirror_list))
 261                 cancel_delayed_work(&sched->work_tdr);
 262         else
 263                 mod_delayed_work(system_wq, &sched->work_tdr, remaining);
 264 
 265         spin_unlock_irqrestore(&sched->job_list_lock, flags);
 266 }
 267 EXPORT_SYMBOL(drm_sched_resume_timeout);
 268 
 269 static void drm_sched_job_begin(struct drm_sched_job *s_job)
 270 {
 271         struct drm_gpu_scheduler *sched = s_job->sched;
 272         unsigned long flags;
 273 
 274         spin_lock_irqsave(&sched->job_list_lock, flags);
 275         list_add_tail(&s_job->node, &sched->ring_mirror_list);
 276         drm_sched_start_timeout(sched);
 277         spin_unlock_irqrestore(&sched->job_list_lock, flags);
 278 }
 279 
 280 static void drm_sched_job_timedout(struct work_struct *work)
 281 {
 282         struct drm_gpu_scheduler *sched;
 283         struct drm_sched_job *job;
 284         unsigned long flags;
 285 
 286         sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
 287         job = list_first_entry_or_null(&sched->ring_mirror_list,
 288                                        struct drm_sched_job, node);
 289 
 290         if (job) {
 291                 job->sched->ops->timedout_job(job);
 292 
 293                 /*
 294                  * Guilty job did complete and hence needs to be manually removed
 295                  * See drm_sched_stop doc.
 296                  */
 297                 if (sched->free_guilty) {
 298                         job->sched->ops->free_job(job);
 299                         sched->free_guilty = false;
 300                 }
 301         }
 302 
 303         spin_lock_irqsave(&sched->job_list_lock, flags);
 304         drm_sched_start_timeout(sched);
 305         spin_unlock_irqrestore(&sched->job_list_lock, flags);
 306 }
 307 
 308  /**
 309   * drm_sched_increase_karma - Update sched_entity guilty flag
 310   *
 311   * @bad: The job guilty of time out
 312   *
 313   * Increment on every hang caused by the 'bad' job. If this exceeds the hang
 314   * limit of the scheduler then the respective sched entity is marked guilty and
 315   * jobs from it will not be scheduled further
 316   */
 317 void drm_sched_increase_karma(struct drm_sched_job *bad)
 318 {
 319         int i;
 320         struct drm_sched_entity *tmp;
 321         struct drm_sched_entity *entity;
 322         struct drm_gpu_scheduler *sched = bad->sched;
 323 
 324         /* don't increase @bad's karma if it's from KERNEL RQ,
 325          * because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
 326          * corrupt but keep in mind that kernel jobs always considered good.
 327          */
 328         if (bad->s_priority != DRM_SCHED_PRIORITY_KERNEL) {
 329                 atomic_inc(&bad->karma);
 330                 for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_KERNEL;
 331                      i++) {
 332                         struct drm_sched_rq *rq = &sched->sched_rq[i];
 333 
 334                         spin_lock(&rq->lock);
 335                         list_for_each_entry_safe(entity, tmp, &rq->entities, list) {
 336                                 if (bad->s_fence->scheduled.context ==
 337                                     entity->fence_context) {
 338                                         if (atomic_read(&bad->karma) >
 339                                             bad->sched->hang_limit)
 340                                                 if (entity->guilty)
 341                                                         atomic_set(entity->guilty, 1);
 342                                         break;
 343                                 }
 344                         }
 345                         spin_unlock(&rq->lock);
 346                         if (&entity->list != &rq->entities)
 347                                 break;
 348                 }
 349         }
 350 }
 351 EXPORT_SYMBOL(drm_sched_increase_karma);
 352 
 353 /**
 354  * drm_sched_stop - stop the scheduler
 355  *
 356  * @sched: scheduler instance
 357  * @bad: job which caused the time out
 358  *
 359  * Stop the scheduler and also removes and frees all completed jobs.
 360  * Note: bad job will not be freed as it might be used later and so it's
 361  * callers responsibility to release it manually if it's not part of the
 362  * mirror list any more.
 363  *
 364  */
 365 void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
 366 {
 367         struct drm_sched_job *s_job, *tmp;
 368         unsigned long flags;
 369 
 370         kthread_park(sched->thread);
 371 
 372         /*
 373          * Iterate the job list from later to  earlier one and either deactive
 374          * their HW callbacks or remove them from mirror list if they already
 375          * signaled.
 376          * This iteration is thread safe as sched thread is stopped.
 377          */
 378         list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) {
 379                 if (s_job->s_fence->parent &&
 380                     dma_fence_remove_callback(s_job->s_fence->parent,
 381                                               &s_job->cb)) {
 382                         atomic_dec(&sched->hw_rq_count);
 383                 } else {
 384                         /*
 385                          * remove job from ring_mirror_list.
 386                          * Locking here is for concurrent resume timeout
 387                          */
 388                         spin_lock_irqsave(&sched->job_list_lock, flags);
 389                         list_del_init(&s_job->node);
 390                         spin_unlock_irqrestore(&sched->job_list_lock, flags);
 391 
 392                         /*
 393                          * Wait for job's HW fence callback to finish using s_job
 394                          * before releasing it.
 395                          *
 396                          * Job is still alive so fence refcount at least 1
 397                          */
 398                         dma_fence_wait(&s_job->s_fence->finished, false);
 399 
 400                         /*
 401                          * We must keep bad job alive for later use during
 402                          * recovery by some of the drivers but leave a hint
 403                          * that the guilty job must be released.
 404                          */
 405                         if (bad != s_job)
 406                                 sched->ops->free_job(s_job);
 407                         else
 408                                 sched->free_guilty = true;
 409                 }
 410         }
 411 
 412         /*
 413          * Stop pending timer in flight as we rearm it in  drm_sched_start. This
 414          * avoids the pending timeout work in progress to fire right away after
 415          * this TDR finished and before the newly restarted jobs had a
 416          * chance to complete.
 417          */
 418         cancel_delayed_work(&sched->work_tdr);
 419 }
 420 
 421 EXPORT_SYMBOL(drm_sched_stop);
 422 
 423 /**
 424  * drm_sched_job_recovery - recover jobs after a reset
 425  *
 426  * @sched: scheduler instance
 427  * @full_recovery: proceed with complete sched restart
 428  *
 429  */
 430 void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
 431 {
 432         struct drm_sched_job *s_job, *tmp;
 433         unsigned long flags;
 434         int r;
 435 
 436         /*
 437          * Locking the list is not required here as the sched thread is parked
 438          * so no new jobs are being inserted or removed. Also concurrent
 439          * GPU recovers can't run in parallel.
 440          */
 441         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 442                 struct dma_fence *fence = s_job->s_fence->parent;
 443 
 444                 atomic_inc(&sched->hw_rq_count);
 445 
 446                 if (!full_recovery)
 447                         continue;
 448 
 449                 if (fence) {
 450                         r = dma_fence_add_callback(fence, &s_job->cb,
 451                                                    drm_sched_process_job);
 452                         if (r == -ENOENT)
 453                                 drm_sched_process_job(fence, &s_job->cb);
 454                         else if (r)
 455                                 DRM_ERROR("fence add callback failed (%d)\n",
 456                                           r);
 457                 } else
 458                         drm_sched_process_job(NULL, &s_job->cb);
 459         }
 460 
 461         if (full_recovery) {
 462                 spin_lock_irqsave(&sched->job_list_lock, flags);
 463                 drm_sched_start_timeout(sched);
 464                 spin_unlock_irqrestore(&sched->job_list_lock, flags);
 465         }
 466 
 467         kthread_unpark(sched->thread);
 468 }
 469 EXPORT_SYMBOL(drm_sched_start);
 470 
 471 /**
 472  * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list
 473  *
 474  * @sched: scheduler instance
 475  *
 476  */
 477 void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
 478 {
 479         struct drm_sched_job *s_job, *tmp;
 480         uint64_t guilty_context;
 481         bool found_guilty = false;
 482         struct dma_fence *fence;
 483 
 484         list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
 485                 struct drm_sched_fence *s_fence = s_job->s_fence;
 486 
 487                 if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) {
 488                         found_guilty = true;
 489                         guilty_context = s_job->s_fence->scheduled.context;
 490                 }
 491 
 492                 if (found_guilty && s_job->s_fence->scheduled.context == guilty_context)
 493                         dma_fence_set_error(&s_fence->finished, -ECANCELED);
 494 
 495                 dma_fence_put(s_job->s_fence->parent);
 496                 fence = sched->ops->run_job(s_job);
 497 
 498                 if (IS_ERR_OR_NULL(fence)) {
 499                         s_job->s_fence->parent = NULL;
 500                         dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
 501                 } else {
 502                         s_job->s_fence->parent = fence;
 503                 }
 504 
 505 
 506         }
 507 }
 508 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
 509 
 510 /**
 511  * drm_sched_job_init - init a scheduler job
 512  *
 513  * @job: scheduler job to init
 514  * @entity: scheduler entity to use
 515  * @owner: job owner for debugging
 516  *
 517  * Refer to drm_sched_entity_push_job() documentation
 518  * for locking considerations.
 519  *
 520  * Returns 0 for success, negative error code otherwise.
 521  */
 522 int drm_sched_job_init(struct drm_sched_job *job,
 523                        struct drm_sched_entity *entity,
 524                        void *owner)
 525 {
 526         struct drm_gpu_scheduler *sched;
 527 
 528         drm_sched_entity_select_rq(entity);
 529         if (!entity->rq)
 530                 return -ENOENT;
 531 
 532         sched = entity->rq->sched;
 533 
 534         job->sched = sched;
 535         job->entity = entity;
 536         job->s_priority = entity->rq - sched->sched_rq;
 537         job->s_fence = drm_sched_fence_create(entity, owner);
 538         if (!job->s_fence)
 539                 return -ENOMEM;
 540         job->id = atomic64_inc_return(&sched->job_id_count);
 541 
 542         INIT_LIST_HEAD(&job->node);
 543 
 544         return 0;
 545 }
 546 EXPORT_SYMBOL(drm_sched_job_init);
 547 
 548 /**
 549  * drm_sched_job_cleanup - clean up scheduler job resources
 550  *
 551  * @job: scheduler job to clean up
 552  */
 553 void drm_sched_job_cleanup(struct drm_sched_job *job)
 554 {
 555         dma_fence_put(&job->s_fence->finished);
 556         job->s_fence = NULL;
 557 }
 558 EXPORT_SYMBOL(drm_sched_job_cleanup);
 559 
 560 /**
 561  * drm_sched_ready - is the scheduler ready
 562  *
 563  * @sched: scheduler instance
 564  *
 565  * Return true if we can push more jobs to the hw, otherwise false.
 566  */
 567 static bool drm_sched_ready(struct drm_gpu_scheduler *sched)
 568 {
 569         return atomic_read(&sched->hw_rq_count) <
 570                 sched->hw_submission_limit;
 571 }
 572 
 573 /**
 574  * drm_sched_wakeup - Wake up the scheduler when it is ready
 575  *
 576  * @sched: scheduler instance
 577  *
 578  */
 579 void drm_sched_wakeup(struct drm_gpu_scheduler *sched)
 580 {
 581         if (drm_sched_ready(sched))
 582                 wake_up_interruptible(&sched->wake_up_worker);
 583 }
 584 
 585 /**
 586  * drm_sched_select_entity - Select next entity to process
 587  *
 588  * @sched: scheduler instance
 589  *
 590  * Returns the entity to process or NULL if none are found.
 591  */
 592 static struct drm_sched_entity *
 593 drm_sched_select_entity(struct drm_gpu_scheduler *sched)
 594 {
 595         struct drm_sched_entity *entity;
 596         int i;
 597 
 598         if (!drm_sched_ready(sched))
 599                 return NULL;
 600 
 601         /* Kernel run queue has higher priority than normal run queue*/
 602         for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
 603                 entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
 604                 if (entity)
 605                         break;
 606         }
 607 
 608         return entity;
 609 }
 610 
 611 /**
 612  * drm_sched_process_job - process a job
 613  *
 614  * @f: fence
 615  * @cb: fence callbacks
 616  *
 617  * Called after job has finished execution.
 618  */
 619 static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 620 {
 621         struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb);
 622         struct drm_sched_fence *s_fence = s_job->s_fence;
 623         struct drm_gpu_scheduler *sched = s_fence->sched;
 624 
 625         atomic_dec(&sched->hw_rq_count);
 626         atomic_dec(&sched->num_jobs);
 627 
 628         trace_drm_sched_process_job(s_fence);
 629 
 630         dma_fence_get(&s_fence->finished);
 631         drm_sched_fence_finished(s_fence);
 632         dma_fence_put(&s_fence->finished);
 633         wake_up_interruptible(&sched->wake_up_worker);
 634 }
 635 
 636 /**
 637  * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
 638  *
 639  * @sched: scheduler instance
 640  *
 641  * Returns the next finished job from the mirror list (if there is one)
 642  * ready for it to be destroyed.
 643  */
 644 static struct drm_sched_job *
 645 drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 646 {
 647         struct drm_sched_job *job;
 648         unsigned long flags;
 649 
 650         /* Don't destroy jobs while the timeout worker is running */
 651         if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
 652             !cancel_delayed_work(&sched->work_tdr))
 653                 return NULL;
 654 
 655         spin_lock_irqsave(&sched->job_list_lock, flags);
 656 
 657         job = list_first_entry_or_null(&sched->ring_mirror_list,
 658                                        struct drm_sched_job, node);
 659 
 660         if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
 661                 /* remove job from ring_mirror_list */
 662                 list_del_init(&job->node);
 663         } else {
 664                 job = NULL;
 665                 /* queue timeout for next job */
 666                 drm_sched_start_timeout(sched);
 667         }
 668 
 669         spin_unlock_irqrestore(&sched->job_list_lock, flags);
 670 
 671         return job;
 672 }
 673 
 674 /**
 675  * drm_sched_blocked - check if the scheduler is blocked
 676  *
 677  * @sched: scheduler instance
 678  *
 679  * Returns true if blocked, otherwise false.
 680  */
 681 static bool drm_sched_blocked(struct drm_gpu_scheduler *sched)
 682 {
 683         if (kthread_should_park()) {
 684                 kthread_parkme();
 685                 return true;
 686         }
 687 
 688         return false;
 689 }
 690 
 691 /**
 692  * drm_sched_main - main scheduler thread
 693  *
 694  * @param: scheduler instance
 695  *
 696  * Returns 0.
 697  */
 698 static int drm_sched_main(void *param)
 699 {
 700         struct sched_param sparam = {.sched_priority = 1};
 701         struct drm_gpu_scheduler *sched = (struct drm_gpu_scheduler *)param;
 702         int r;
 703 
 704         sched_setscheduler(current, SCHED_FIFO, &sparam);
 705 
 706         while (!kthread_should_stop()) {
 707                 struct drm_sched_entity *entity = NULL;
 708                 struct drm_sched_fence *s_fence;
 709                 struct drm_sched_job *sched_job;
 710                 struct dma_fence *fence;
 711                 struct drm_sched_job *cleanup_job = NULL;
 712 
 713                 wait_event_interruptible(sched->wake_up_worker,
 714                                          (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
 715                                          (!drm_sched_blocked(sched) &&
 716                                           (entity = drm_sched_select_entity(sched))) ||
 717                                          kthread_should_stop());
 718 
 719                 if (cleanup_job) {
 720                         sched->ops->free_job(cleanup_job);
 721                         /* queue timeout for next job */
 722                         drm_sched_start_timeout(sched);
 723                 }
 724 
 725                 if (!entity)
 726                         continue;
 727 
 728                 sched_job = drm_sched_entity_pop_job(entity);
 729                 if (!sched_job)
 730                         continue;
 731 
 732                 s_fence = sched_job->s_fence;
 733 
 734                 atomic_inc(&sched->hw_rq_count);
 735                 drm_sched_job_begin(sched_job);
 736 
 737                 fence = sched->ops->run_job(sched_job);
 738                 drm_sched_fence_scheduled(s_fence);
 739 
 740                 if (!IS_ERR_OR_NULL(fence)) {
 741                         s_fence->parent = dma_fence_get(fence);
 742                         r = dma_fence_add_callback(fence, &sched_job->cb,
 743                                                    drm_sched_process_job);
 744                         if (r == -ENOENT)
 745                                 drm_sched_process_job(fence, &sched_job->cb);
 746                         else if (r)
 747                                 DRM_ERROR("fence add callback failed (%d)\n",
 748                                           r);
 749                         dma_fence_put(fence);
 750                 } else {
 751 
 752                         dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
 753                         drm_sched_process_job(NULL, &sched_job->cb);
 754                 }
 755 
 756                 wake_up(&sched->job_scheduled);
 757         }
 758         return 0;
 759 }
 760 
 761 /**
 762  * drm_sched_init - Init a gpu scheduler instance
 763  *
 764  * @sched: scheduler instance
 765  * @ops: backend operations for this scheduler
 766  * @hw_submission: number of hw submissions that can be in flight
 767  * @hang_limit: number of times to allow a job to hang before dropping it
 768  * @timeout: timeout value in jiffies for the scheduler
 769  * @name: name used for debugging
 770  *
 771  * Return 0 on success, otherwise error code.
 772  */
 773 int drm_sched_init(struct drm_gpu_scheduler *sched,
 774                    const struct drm_sched_backend_ops *ops,
 775                    unsigned hw_submission,
 776                    unsigned hang_limit,
 777                    long timeout,
 778                    const char *name)
 779 {
 780         int i, ret;
 781         sched->ops = ops;
 782         sched->hw_submission_limit = hw_submission;
 783         sched->name = name;
 784         sched->timeout = timeout;
 785         sched->hang_limit = hang_limit;
 786         for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_MAX; i++)
 787                 drm_sched_rq_init(sched, &sched->sched_rq[i]);
 788 
 789         init_waitqueue_head(&sched->wake_up_worker);
 790         init_waitqueue_head(&sched->job_scheduled);
 791         INIT_LIST_HEAD(&sched->ring_mirror_list);
 792         spin_lock_init(&sched->job_list_lock);
 793         atomic_set(&sched->hw_rq_count, 0);
 794         INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
 795         atomic_set(&sched->num_jobs, 0);
 796         atomic64_set(&sched->job_id_count, 0);
 797 
 798         /* Each scheduler will run on a seperate kernel thread */
 799         sched->thread = kthread_run(drm_sched_main, sched, sched->name);
 800         if (IS_ERR(sched->thread)) {
 801                 ret = PTR_ERR(sched->thread);
 802                 sched->thread = NULL;
 803                 DRM_ERROR("Failed to create scheduler for %s.\n", name);
 804                 return ret;
 805         }
 806 
 807         sched->ready = true;
 808         return 0;
 809 }
 810 EXPORT_SYMBOL(drm_sched_init);
 811 
 812 /**
 813  * drm_sched_fini - Destroy a gpu scheduler
 814  *
 815  * @sched: scheduler instance
 816  *
 817  * Tears down and cleans up the scheduler.
 818  */
 819 void drm_sched_fini(struct drm_gpu_scheduler *sched)
 820 {
 821         if (sched->thread)
 822                 kthread_stop(sched->thread);
 823 
 824         sched->ready = false;
 825 }
 826 EXPORT_SYMBOL(drm_sched_fini);

/* [<][>][^][v][top][bottom][index][help] */