root/drivers/gpu/drm/i915/gem/i915_gem_pm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. call_idle_barriers
  2. i915_gem_park
  3. idle_work_handler
  4. retire_work_handler
  5. pm_notifier
  6. switch_to_kernel_context_sync
  7. i915_gem_load_power_context
  8. i915_gem_suspend
  9. first_mm_object
  10. i915_gem_suspend_late
  11. i915_gem_resume
  12. i915_gem_init__pm

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2019 Intel Corporation
   5  */
   6 
   7 #include "gem/i915_gem_pm.h"
   8 #include "gt/intel_gt.h"
   9 #include "gt/intel_gt_pm.h"
  10 
  11 #include "i915_drv.h"
  12 #include "i915_globals.h"
  13 
  14 static void call_idle_barriers(struct intel_engine_cs *engine)
  15 {
  16         struct llist_node *node, *next;
  17 
  18         llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
  19                 struct i915_active_request *active =
  20                         container_of((struct list_head *)node,
  21                                      typeof(*active), link);
  22 
  23                 INIT_LIST_HEAD(&active->link);
  24                 RCU_INIT_POINTER(active->request, NULL);
  25 
  26                 active->retire(active, NULL);
  27         }
  28 }
  29 
  30 static void i915_gem_park(struct drm_i915_private *i915)
  31 {
  32         struct intel_engine_cs *engine;
  33         enum intel_engine_id id;
  34 
  35         lockdep_assert_held(&i915->drm.struct_mutex);
  36 
  37         for_each_engine(engine, i915, id)
  38                 call_idle_barriers(engine); /* cleanup after wedging */
  39 
  40         i915_vma_parked(i915);
  41 
  42         i915_globals_park();
  43 }
  44 
  45 static void idle_work_handler(struct work_struct *work)
  46 {
  47         struct drm_i915_private *i915 =
  48                 container_of(work, typeof(*i915), gem.idle_work);
  49         bool park;
  50 
  51         cancel_delayed_work_sync(&i915->gem.retire_work);
  52         mutex_lock(&i915->drm.struct_mutex);
  53 
  54         intel_wakeref_lock(&i915->gt.wakeref);
  55         park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
  56                 !work_pending(work));
  57         intel_wakeref_unlock(&i915->gt.wakeref);
  58         if (park)
  59                 i915_gem_park(i915);
  60         else
  61                 queue_delayed_work(i915->wq,
  62                                    &i915->gem.retire_work,
  63                                    round_jiffies_up_relative(HZ));
  64 
  65         mutex_unlock(&i915->drm.struct_mutex);
  66 }
  67 
  68 static void retire_work_handler(struct work_struct *work)
  69 {
  70         struct drm_i915_private *i915 =
  71                 container_of(work, typeof(*i915), gem.retire_work.work);
  72 
  73         /* Come back later if the device is busy... */
  74         if (mutex_trylock(&i915->drm.struct_mutex)) {
  75                 i915_retire_requests(i915);
  76                 mutex_unlock(&i915->drm.struct_mutex);
  77         }
  78 
  79         queue_delayed_work(i915->wq,
  80                            &i915->gem.retire_work,
  81                            round_jiffies_up_relative(HZ));
  82 }
  83 
  84 static int pm_notifier(struct notifier_block *nb,
  85                        unsigned long action,
  86                        void *data)
  87 {
  88         struct drm_i915_private *i915 =
  89                 container_of(nb, typeof(*i915), gem.pm_notifier);
  90 
  91         switch (action) {
  92         case INTEL_GT_UNPARK:
  93                 i915_globals_unpark();
  94                 queue_delayed_work(i915->wq,
  95                                    &i915->gem.retire_work,
  96                                    round_jiffies_up_relative(HZ));
  97                 break;
  98 
  99         case INTEL_GT_PARK:
 100                 queue_work(i915->wq, &i915->gem.idle_work);
 101                 break;
 102         }
 103 
 104         return NOTIFY_OK;
 105 }
 106 
 107 static bool switch_to_kernel_context_sync(struct intel_gt *gt)
 108 {
 109         bool result = !intel_gt_is_wedged(gt);
 110 
 111         do {
 112                 if (i915_gem_wait_for_idle(gt->i915,
 113                                            I915_WAIT_LOCKED |
 114                                            I915_WAIT_FOR_IDLE_BOOST,
 115                                            I915_GEM_IDLE_TIMEOUT) == -ETIME) {
 116                         /* XXX hide warning from gem_eio */
 117                         if (i915_modparams.reset) {
 118                                 dev_err(gt->i915->drm.dev,
 119                                         "Failed to idle engines, declaring wedged!\n");
 120                                 GEM_TRACE_DUMP();
 121                         }
 122 
 123                         /*
 124                          * Forcibly cancel outstanding work and leave
 125                          * the gpu quiet.
 126                          */
 127                         intel_gt_set_wedged(gt);
 128                         result = false;
 129                 }
 130         } while (i915_retire_requests(gt->i915) && result);
 131 
 132         if (intel_gt_pm_wait_for_idle(gt))
 133                 result = false;
 134 
 135         return result;
 136 }
 137 
 138 bool i915_gem_load_power_context(struct drm_i915_private *i915)
 139 {
 140         return switch_to_kernel_context_sync(&i915->gt);
 141 }
 142 
 143 void i915_gem_suspend(struct drm_i915_private *i915)
 144 {
 145         GEM_TRACE("\n");
 146 
 147         intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
 148         flush_workqueue(i915->wq);
 149 
 150         mutex_lock(&i915->drm.struct_mutex);
 151 
 152         /*
 153          * We have to flush all the executing contexts to main memory so
 154          * that they can saved in the hibernation image. To ensure the last
 155          * context image is coherent, we have to switch away from it. That
 156          * leaves the i915->kernel_context still active when
 157          * we actually suspend, and its image in memory may not match the GPU
 158          * state. Fortunately, the kernel_context is disposable and we do
 159          * not rely on its state.
 160          */
 161         switch_to_kernel_context_sync(&i915->gt);
 162 
 163         mutex_unlock(&i915->drm.struct_mutex);
 164 
 165         cancel_delayed_work_sync(&i915->gt.hangcheck.work);
 166 
 167         i915_gem_drain_freed_objects(i915);
 168 
 169         intel_uc_suspend(&i915->gt.uc);
 170 }
 171 
 172 static struct drm_i915_gem_object *first_mm_object(struct list_head *list)
 173 {
 174         return list_first_entry_or_null(list,
 175                                         struct drm_i915_gem_object,
 176                                         mm.link);
 177 }
 178 
 179 void i915_gem_suspend_late(struct drm_i915_private *i915)
 180 {
 181         struct drm_i915_gem_object *obj;
 182         struct list_head *phases[] = {
 183                 &i915->mm.shrink_list,
 184                 &i915->mm.purge_list,
 185                 NULL
 186         }, **phase;
 187         unsigned long flags;
 188 
 189         /*
 190          * Neither the BIOS, ourselves or any other kernel
 191          * expects the system to be in execlists mode on startup,
 192          * so we need to reset the GPU back to legacy mode. And the only
 193          * known way to disable logical contexts is through a GPU reset.
 194          *
 195          * So in order to leave the system in a known default configuration,
 196          * always reset the GPU upon unload and suspend. Afterwards we then
 197          * clean up the GEM state tracking, flushing off the requests and
 198          * leaving the system in a known idle state.
 199          *
 200          * Note that is of the upmost importance that the GPU is idle and
 201          * all stray writes are flushed *before* we dismantle the backing
 202          * storage for the pinned objects.
 203          *
 204          * However, since we are uncertain that resetting the GPU on older
 205          * machines is a good idea, we don't - just in case it leaves the
 206          * machine in an unusable condition.
 207          */
 208 
 209         spin_lock_irqsave(&i915->mm.obj_lock, flags);
 210         for (phase = phases; *phase; phase++) {
 211                 LIST_HEAD(keep);
 212 
 213                 while ((obj = first_mm_object(*phase))) {
 214                         list_move_tail(&obj->mm.link, &keep);
 215 
 216                         /* Beware the background _i915_gem_free_objects */
 217                         if (!kref_get_unless_zero(&obj->base.refcount))
 218                                 continue;
 219 
 220                         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 221 
 222                         i915_gem_object_lock(obj);
 223                         WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
 224                         i915_gem_object_unlock(obj);
 225                         i915_gem_object_put(obj);
 226 
 227                         spin_lock_irqsave(&i915->mm.obj_lock, flags);
 228                 }
 229 
 230                 list_splice_tail(&keep, *phase);
 231         }
 232         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 233 
 234         i915_gem_sanitize(i915);
 235 }
 236 
 237 void i915_gem_resume(struct drm_i915_private *i915)
 238 {
 239         GEM_TRACE("\n");
 240 
 241         mutex_lock(&i915->drm.struct_mutex);
 242         intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 243 
 244         if (i915_gem_init_hw(i915))
 245                 goto err_wedged;
 246 
 247         /*
 248          * As we didn't flush the kernel context before suspend, we cannot
 249          * guarantee that the context image is complete. So let's just reset
 250          * it and start again.
 251          */
 252         if (intel_gt_resume(&i915->gt))
 253                 goto err_wedged;
 254 
 255         intel_uc_resume(&i915->gt.uc);
 256 
 257         /* Always reload a context for powersaving. */
 258         if (!i915_gem_load_power_context(i915))
 259                 goto err_wedged;
 260 
 261 out_unlock:
 262         intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 263         mutex_unlock(&i915->drm.struct_mutex);
 264         return;
 265 
 266 err_wedged:
 267         if (!intel_gt_is_wedged(&i915->gt)) {
 268                 dev_err(i915->drm.dev,
 269                         "Failed to re-initialize GPU, declaring it wedged!\n");
 270                 intel_gt_set_wedged(&i915->gt);
 271         }
 272         goto out_unlock;
 273 }
 274 
 275 void i915_gem_init__pm(struct drm_i915_private *i915)
 276 {
 277         INIT_WORK(&i915->gem.idle_work, idle_work_handler);
 278         INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler);
 279 
 280         i915->gem.pm_notifier.notifier_call = pm_notifier;
 281         blocking_notifier_chain_register(&i915->gt.pm_notifications,
 282                                          &i915->gem.pm_notifier);
 283 }

/* [<][>][^][v][top][bottom][index][help] */