root/drivers/gpu/drm/i915/intel_runtime_pm.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __save_depot_stack
  2. __print_depot_stack
  3. init_intel_runtime_pm_wakeref
  4. track_intel_runtime_pm_wakeref
  5. untrack_intel_runtime_pm_wakeref
  6. cmphandle
  7. __print_intel_runtime_pm_wakeref
  8. __untrack_all_wakerefs
  9. dump_and_free_wakeref_tracking
  10. __intel_wakeref_dec_and_check_tracking
  11. untrack_all_intel_runtime_pm_wakerefs
  12. print_intel_runtime_pm_wakeref
  13. init_intel_runtime_pm_wakeref
  14. track_intel_runtime_pm_wakeref
  15. untrack_intel_runtime_pm_wakeref
  16. __intel_wakeref_dec_and_check_tracking
  17. untrack_all_intel_runtime_pm_wakerefs
  18. intel_runtime_pm_acquire
  19. intel_runtime_pm_release
  20. __intel_runtime_pm_get
  21. intel_runtime_pm_get_raw
  22. intel_runtime_pm_get
  23. intel_runtime_pm_get_if_in_use
  24. intel_runtime_pm_get_noresume
  25. __intel_runtime_pm_put
  26. intel_runtime_pm_put_raw
  27. intel_runtime_pm_put_unchecked
  28. intel_runtime_pm_put
  29. intel_runtime_pm_enable
  30. intel_runtime_pm_disable
  31. intel_runtime_pm_driver_release
  32. intel_runtime_pm_init_early

   1 /*
   2  * Copyright © 2012-2014 Intel Corporation
   3  *
   4  * Permission is hereby granted, free of charge, to any person obtaining a
   5  * copy of this software and associated documentation files (the "Software"),
   6  * to deal in the Software without restriction, including without limitation
   7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8  * and/or sell copies of the Software, and to permit persons to whom the
   9  * Software is furnished to do so, subject to the following conditions:
  10  *
  11  * The above copyright notice and this permission notice (including the next
  12  * paragraph) shall be included in all copies or substantial portions of the
  13  * Software.
  14  *
  15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21  * IN THE SOFTWARE.
  22  *
  23  * Authors:
  24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
  25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
  26  *
  27  */
  28 
  29 #include <linux/pm_runtime.h>
  30 #include <linux/vgaarb.h>
  31 
  32 #include <drm/drm_print.h>
  33 
  34 #include "i915_drv.h"
  35 #include "i915_trace.h"
  36 
  37 /**
  38  * DOC: runtime pm
  39  *
  40  * The i915 driver supports dynamic enabling and disabling of entire hardware
  41  * blocks at runtime. This is especially important on the display side where
  42  * software is supposed to control many power gates manually on recent hardware,
  43  * since on the GT side a lot of the power management is done by the hardware.
  44  * But even there some manual control at the device level is required.
  45  *
  46  * Since i915 supports a diverse set of platforms with a unified codebase and
  47  * hardware engineers just love to shuffle functionality around between power
  48  * domains there's a sizeable amount of indirection required. This file provides
  49  * generic functions to the driver for grabbing and releasing references for
  50  * abstract power domains. It then maps those to the actual power wells
  51  * present for a given platform.
  52  */
  53 
  54 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
  55 
  56 #include <linux/sort.h>
  57 
  58 #define STACKDEPTH 8
  59 
  60 static noinline depot_stack_handle_t __save_depot_stack(void)
  61 {
  62         unsigned long entries[STACKDEPTH];
  63         unsigned int n;
  64 
  65         n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
  66         return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
  67 }
  68 
  69 static void __print_depot_stack(depot_stack_handle_t stack,
  70                                 char *buf, int sz, int indent)
  71 {
  72         unsigned long *entries;
  73         unsigned int nr_entries;
  74 
  75         nr_entries = stack_depot_fetch(stack, &entries);
  76         stack_trace_snprint(buf, sz, entries, nr_entries, indent);
  77 }
  78 
  79 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
  80 {
  81         spin_lock_init(&rpm->debug.lock);
  82 }
  83 
  84 static noinline depot_stack_handle_t
  85 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
  86 {
  87         depot_stack_handle_t stack, *stacks;
  88         unsigned long flags;
  89 
  90         if (!rpm->available)
  91                 return -1;
  92 
  93         stack = __save_depot_stack();
  94         if (!stack)
  95                 return -1;
  96 
  97         spin_lock_irqsave(&rpm->debug.lock, flags);
  98 
  99         if (!rpm->debug.count)
 100                 rpm->debug.last_acquire = stack;
 101 
 102         stacks = krealloc(rpm->debug.owners,
 103                           (rpm->debug.count + 1) * sizeof(*stacks),
 104                           GFP_NOWAIT | __GFP_NOWARN);
 105         if (stacks) {
 106                 stacks[rpm->debug.count++] = stack;
 107                 rpm->debug.owners = stacks;
 108         } else {
 109                 stack = -1;
 110         }
 111 
 112         spin_unlock_irqrestore(&rpm->debug.lock, flags);
 113 
 114         return stack;
 115 }
 116 
 117 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 118                                              depot_stack_handle_t stack)
 119 {
 120         unsigned long flags, n;
 121         bool found = false;
 122 
 123         if (unlikely(stack == -1))
 124                 return;
 125 
 126         spin_lock_irqsave(&rpm->debug.lock, flags);
 127         for (n = rpm->debug.count; n--; ) {
 128                 if (rpm->debug.owners[n] == stack) {
 129                         memmove(rpm->debug.owners + n,
 130                                 rpm->debug.owners + n + 1,
 131                                 (--rpm->debug.count - n) * sizeof(stack));
 132                         found = true;
 133                         break;
 134                 }
 135         }
 136         spin_unlock_irqrestore(&rpm->debug.lock, flags);
 137 
 138         if (WARN(!found,
 139                  "Unmatched wakeref (tracking %lu), count %u\n",
 140                  rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
 141                 char *buf;
 142 
 143                 buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
 144                 if (!buf)
 145                         return;
 146 
 147                 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 148                 DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
 149 
 150                 stack = READ_ONCE(rpm->debug.last_release);
 151                 if (stack) {
 152                         __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 153                         DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
 154                 }
 155 
 156                 kfree(buf);
 157         }
 158 }
 159 
 160 static int cmphandle(const void *_a, const void *_b)
 161 {
 162         const depot_stack_handle_t * const a = _a, * const b = _b;
 163 
 164         if (*a < *b)
 165                 return -1;
 166         else if (*a > *b)
 167                 return 1;
 168         else
 169                 return 0;
 170 }
 171 
 172 static void
 173 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
 174                                  const struct intel_runtime_pm_debug *dbg)
 175 {
 176         unsigned long i;
 177         char *buf;
 178 
 179         buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
 180         if (!buf)
 181                 return;
 182 
 183         if (dbg->last_acquire) {
 184                 __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
 185                 drm_printf(p, "Wakeref last acquired:\n%s", buf);
 186         }
 187 
 188         if (dbg->last_release) {
 189                 __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
 190                 drm_printf(p, "Wakeref last released:\n%s", buf);
 191         }
 192 
 193         drm_printf(p, "Wakeref count: %lu\n", dbg->count);
 194 
 195         sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
 196 
 197         for (i = 0; i < dbg->count; i++) {
 198                 depot_stack_handle_t stack = dbg->owners[i];
 199                 unsigned long rep;
 200 
 201                 rep = 1;
 202                 while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
 203                         rep++, i++;
 204                 __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 205                 drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
 206         }
 207 
 208         kfree(buf);
 209 }
 210 
 211 static noinline void
 212 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
 213                        struct intel_runtime_pm_debug *saved)
 214 {
 215         *saved = *debug;
 216 
 217         debug->owners = NULL;
 218         debug->count = 0;
 219         debug->last_release = __save_depot_stack();
 220 }
 221 
 222 static void
 223 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
 224 {
 225         if (debug->count) {
 226                 struct drm_printer p = drm_debug_printer("i915");
 227 
 228                 __print_intel_runtime_pm_wakeref(&p, debug);
 229         }
 230 
 231         kfree(debug->owners);
 232 }
 233 
 234 static noinline void
 235 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
 236 {
 237         struct intel_runtime_pm_debug dbg = {};
 238         unsigned long flags;
 239 
 240         if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
 241                                          &rpm->debug.lock,
 242                                          flags))
 243                 return;
 244 
 245         __untrack_all_wakerefs(&rpm->debug, &dbg);
 246         spin_unlock_irqrestore(&rpm->debug.lock, flags);
 247 
 248         dump_and_free_wakeref_tracking(&dbg);
 249 }
 250 
 251 static noinline void
 252 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
 253 {
 254         struct intel_runtime_pm_debug dbg = {};
 255         unsigned long flags;
 256 
 257         spin_lock_irqsave(&rpm->debug.lock, flags);
 258         __untrack_all_wakerefs(&rpm->debug, &dbg);
 259         spin_unlock_irqrestore(&rpm->debug.lock, flags);
 260 
 261         dump_and_free_wakeref_tracking(&dbg);
 262 }
 263 
 264 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 265                                     struct drm_printer *p)
 266 {
 267         struct intel_runtime_pm_debug dbg = {};
 268 
 269         do {
 270                 unsigned long alloc = dbg.count;
 271                 depot_stack_handle_t *s;
 272 
 273                 spin_lock_irq(&rpm->debug.lock);
 274                 dbg.count = rpm->debug.count;
 275                 if (dbg.count <= alloc) {
 276                         memcpy(dbg.owners,
 277                                rpm->debug.owners,
 278                                dbg.count * sizeof(*s));
 279                 }
 280                 dbg.last_acquire = rpm->debug.last_acquire;
 281                 dbg.last_release = rpm->debug.last_release;
 282                 spin_unlock_irq(&rpm->debug.lock);
 283                 if (dbg.count <= alloc)
 284                         break;
 285 
 286                 s = krealloc(dbg.owners,
 287                              dbg.count * sizeof(*s),
 288                              GFP_NOWAIT | __GFP_NOWARN);
 289                 if (!s)
 290                         goto out;
 291 
 292                 dbg.owners = s;
 293         } while (1);
 294 
 295         __print_intel_runtime_pm_wakeref(p, &dbg);
 296 
 297 out:
 298         kfree(dbg.owners);
 299 }
 300 
 301 #else
 302 
 303 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 304 {
 305 }
 306 
 307 static depot_stack_handle_t
 308 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 309 {
 310         return -1;
 311 }
 312 
 313 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 314                                              intel_wakeref_t wref)
 315 {
 316 }
 317 
 318 static void
 319 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
 320 {
 321         atomic_dec(&rpm->wakeref_count);
 322 }
 323 
 324 static void
 325 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
 326 {
 327 }
 328 
 329 #endif
 330 
 331 static void
 332 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
 333 {
 334         if (wakelock) {
 335                 atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
 336                 assert_rpm_wakelock_held(rpm);
 337         } else {
 338                 atomic_inc(&rpm->wakeref_count);
 339                 assert_rpm_raw_wakeref_held(rpm);
 340         }
 341 }
 342 
 343 static void
 344 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
 345 {
 346         if (wakelock) {
 347                 assert_rpm_wakelock_held(rpm);
 348                 atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
 349         } else {
 350                 assert_rpm_raw_wakeref_held(rpm);
 351         }
 352 
 353         __intel_wakeref_dec_and_check_tracking(rpm);
 354 }
 355 
 356 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
 357                                               bool wakelock)
 358 {
 359         int ret;
 360 
 361         ret = pm_runtime_get_sync(rpm->kdev);
 362         WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 363 
 364         intel_runtime_pm_acquire(rpm, wakelock);
 365 
 366         return track_intel_runtime_pm_wakeref(rpm);
 367 }
 368 
 369 /**
 370  * intel_runtime_pm_get_raw - grab a raw runtime pm reference
 371  * @rpm: the intel_runtime_pm structure
 372  *
 373  * This is the unlocked version of intel_display_power_is_enabled() and should
 374  * only be used from error capture and recovery code where deadlocks are
 375  * possible.
 376  * This function grabs a device-level runtime pm reference (mostly used for
 377  * asynchronous PM management from display code) and ensures that it is powered
 378  * up. Raw references are not considered during wakelock assert checks.
 379  *
 380  * Any runtime pm reference obtained by this function must have a symmetric
 381  * call to intel_runtime_pm_put_raw() to release the reference again.
 382  *
 383  * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
 384  * as True if the wakeref was acquired, or False otherwise.
 385  */
 386 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
 387 {
 388         return __intel_runtime_pm_get(rpm, false);
 389 }
 390 
 391 /**
 392  * intel_runtime_pm_get - grab a runtime pm reference
 393  * @rpm: the intel_runtime_pm structure
 394  *
 395  * This function grabs a device-level runtime pm reference (mostly used for GEM
 396  * code to ensure the GTT or GT is on) and ensures that it is powered up.
 397  *
 398  * Any runtime pm reference obtained by this function must have a symmetric
 399  * call to intel_runtime_pm_put() to release the reference again.
 400  *
 401  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
 402  */
 403 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
 404 {
 405         return __intel_runtime_pm_get(rpm, true);
 406 }
 407 
 408 /**
 409  * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
 410  * @rpm: the intel_runtime_pm structure
 411  *
 412  * This function grabs a device-level runtime pm reference if the device is
 413  * already in use and ensures that it is powered up. It is illegal to try
 414  * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
 415  *
 416  * Any runtime pm reference obtained by this function must have a symmetric
 417  * call to intel_runtime_pm_put() to release the reference again.
 418  *
 419  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
 420  * as True if the wakeref was acquired, or False otherwise.
 421  */
 422 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
 423 {
 424         if (IS_ENABLED(CONFIG_PM)) {
 425                 /*
 426                  * In cases runtime PM is disabled by the RPM core and we get
 427                  * an -EINVAL return value we are not supposed to call this
 428                  * function, since the power state is undefined. This applies
 429                  * atm to the late/early system suspend/resume handlers.
 430                  */
 431                 if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
 432                         return 0;
 433         }
 434 
 435         intel_runtime_pm_acquire(rpm, true);
 436 
 437         return track_intel_runtime_pm_wakeref(rpm);
 438 }
 439 
 440 /**
 441  * intel_runtime_pm_get_noresume - grab a runtime pm reference
 442  * @rpm: the intel_runtime_pm structure
 443  *
 444  * This function grabs a device-level runtime pm reference (mostly used for GEM
 445  * code to ensure the GTT or GT is on).
 446  *
 447  * It will _not_ power up the device but instead only check that it's powered
 448  * on.  Therefore it is only valid to call this functions from contexts where
 449  * the device is known to be powered up and where trying to power it up would
 450  * result in hilarity and deadlocks. That pretty much means only the system
 451  * suspend/resume code where this is used to grab runtime pm references for
 452  * delayed setup down in work items.
 453  *
 454  * Any runtime pm reference obtained by this function must have a symmetric
 455  * call to intel_runtime_pm_put() to release the reference again.
 456  *
 457  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
 458  */
 459 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
 460 {
 461         assert_rpm_wakelock_held(rpm);
 462         pm_runtime_get_noresume(rpm->kdev);
 463 
 464         intel_runtime_pm_acquire(rpm, true);
 465 
 466         return track_intel_runtime_pm_wakeref(rpm);
 467 }
 468 
 469 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
 470                                    intel_wakeref_t wref,
 471                                    bool wakelock)
 472 {
 473         struct device *kdev = rpm->kdev;
 474 
 475         untrack_intel_runtime_pm_wakeref(rpm, wref);
 476 
 477         intel_runtime_pm_release(rpm, wakelock);
 478 
 479         pm_runtime_mark_last_busy(kdev);
 480         pm_runtime_put_autosuspend(kdev);
 481 }
 482 
 483 /**
 484  * intel_runtime_pm_put_raw - release a raw runtime pm reference
 485  * @rpm: the intel_runtime_pm structure
 486  * @wref: wakeref acquired for the reference that is being released
 487  *
 488  * This function drops the device-level runtime pm reference obtained by
 489  * intel_runtime_pm_get_raw() and might power down the corresponding
 490  * hardware block right away if this is the last reference.
 491  */
 492 void
 493 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
 494 {
 495         __intel_runtime_pm_put(rpm, wref, false);
 496 }
 497 
 498 /**
 499  * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
 500  * @rpm: the intel_runtime_pm structure
 501  *
 502  * This function drops the device-level runtime pm reference obtained by
 503  * intel_runtime_pm_get() and might power down the corresponding
 504  * hardware block right away if this is the last reference.
 505  *
 506  * This function exists only for historical reasons and should be avoided in
 507  * new code, as the correctness of its use cannot be checked. Always use
 508  * intel_runtime_pm_put() instead.
 509  */
 510 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
 511 {
 512         __intel_runtime_pm_put(rpm, -1, true);
 513 }
 514 
 515 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 516 /**
 517  * intel_runtime_pm_put - release a runtime pm reference
 518  * @rpm: the intel_runtime_pm structure
 519  * @wref: wakeref acquired for the reference that is being released
 520  *
 521  * This function drops the device-level runtime pm reference obtained by
 522  * intel_runtime_pm_get() and might power down the corresponding
 523  * hardware block right away if this is the last reference.
 524  */
 525 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
 526 {
 527         __intel_runtime_pm_put(rpm, wref, true);
 528 }
 529 #endif
 530 
 531 /**
 532  * intel_runtime_pm_enable - enable runtime pm
 533  * @rpm: the intel_runtime_pm structure
 534  *
 535  * This function enables runtime pm at the end of the driver load sequence.
 536  *
 537  * Note that this function does currently not enable runtime pm for the
 538  * subordinate display power domains. That is done by
 539  * intel_power_domains_enable().
 540  */
 541 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
 542 {
 543         struct device *kdev = rpm->kdev;
 544 
 545         /*
 546          * Disable the system suspend direct complete optimization, which can
 547          * leave the device suspended skipping the driver's suspend handlers
 548          * if the device was already runtime suspended. This is needed due to
 549          * the difference in our runtime and system suspend sequence and
 550          * becaue the HDA driver may require us to enable the audio power
 551          * domain during system suspend.
 552          */
 553         dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
 554 
 555         pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
 556         pm_runtime_mark_last_busy(kdev);
 557 
 558         /*
 559          * Take a permanent reference to disable the RPM functionality and drop
 560          * it only when unloading the driver. Use the low level get/put helpers,
 561          * so the driver's own RPM reference tracking asserts also work on
 562          * platforms without RPM support.
 563          */
 564         if (!rpm->available) {
 565                 int ret;
 566 
 567                 pm_runtime_dont_use_autosuspend(kdev);
 568                 ret = pm_runtime_get_sync(kdev);
 569                 WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
 570         } else {
 571                 pm_runtime_use_autosuspend(kdev);
 572         }
 573 
 574         /*
 575          * The core calls the driver load handler with an RPM reference held.
 576          * We drop that here and will reacquire it during unloading in
 577          * intel_power_domains_fini().
 578          */
 579         pm_runtime_put_autosuspend(kdev);
 580 }
 581 
 582 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
 583 {
 584         struct device *kdev = rpm->kdev;
 585 
 586         /* Transfer rpm ownership back to core */
 587         WARN(pm_runtime_get_sync(kdev) < 0,
 588              "Failed to pass rpm ownership back to core\n");
 589 
 590         pm_runtime_dont_use_autosuspend(kdev);
 591 
 592         if (!rpm->available)
 593                 pm_runtime_put(kdev);
 594 }
 595 
 596 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
 597 {
 598         int count = atomic_read(&rpm->wakeref_count);
 599 
 600         WARN(count,
 601              "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
 602              intel_rpm_raw_wakeref_count(count),
 603              intel_rpm_wakelock_count(count));
 604 
 605         untrack_all_intel_runtime_pm_wakerefs(rpm);
 606 }
 607 
 608 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
 609 {
 610         struct drm_i915_private *i915 =
 611                         container_of(rpm, struct drm_i915_private, runtime_pm);
 612         struct pci_dev *pdev = i915->drm.pdev;
 613         struct device *kdev = &pdev->dev;
 614 
 615         rpm->kdev = kdev;
 616         rpm->available = HAS_RUNTIME_PM(i915);
 617 
 618         init_intel_runtime_pm_wakeref(rpm);
 619 }

/* [<][>][^][v][top][bottom][index][help] */