root/drivers/gpu/drm/i915/intel_wakeref.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rpm_get
  2. rpm_put
  3. __intel_wakeref_get_first
  4. ____intel_wakeref_put_last
  5. __intel_wakeref_put_last
  6. __intel_wakeref_put_work
  7. __intel_wakeref_init
  8. intel_wakeref_wait_for_idle
  9. wakeref_auto_timeout
  10. intel_wakeref_auto_init
  11. intel_wakeref_auto
  12. intel_wakeref_auto_fini

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2019 Intel Corporation
   5  */
   6 
   7 #include <linux/wait_bit.h>
   8 
   9 #include "intel_runtime_pm.h"
  10 #include "intel_wakeref.h"
  11 
  12 static void rpm_get(struct intel_wakeref *wf)
  13 {
  14         wf->wakeref = intel_runtime_pm_get(wf->rpm);
  15 }
  16 
  17 static void rpm_put(struct intel_wakeref *wf)
  18 {
  19         intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
  20 
  21         intel_runtime_pm_put(wf->rpm, wakeref);
  22         INTEL_WAKEREF_BUG_ON(!wakeref);
  23 }
  24 
  25 int __intel_wakeref_get_first(struct intel_wakeref *wf)
  26 {
  27         /*
  28          * Treat get/put as different subclasses, as we may need to run
  29          * the put callback from under the shrinker and do not want to
  30          * cross-contanimate that callback with any extra work performed
  31          * upon acquiring the wakeref.
  32          */
  33         mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
  34         if (!atomic_read(&wf->count)) {
  35                 int err;
  36 
  37                 rpm_get(wf);
  38 
  39                 err = wf->ops->get(wf);
  40                 if (unlikely(err)) {
  41                         rpm_put(wf);
  42                         mutex_unlock(&wf->mutex);
  43                         return err;
  44                 }
  45 
  46                 smp_mb__before_atomic(); /* release wf->count */
  47         }
  48         atomic_inc(&wf->count);
  49         mutex_unlock(&wf->mutex);
  50 
  51         INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
  52         return 0;
  53 }
  54 
  55 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
  56 {
  57         if (!atomic_dec_and_test(&wf->count))
  58                 goto unlock;
  59 
  60         /* ops->put() must reschedule its own release on error/deferral */
  61         if (likely(!wf->ops->put(wf))) {
  62                 rpm_put(wf);
  63                 wake_up_var(&wf->wakeref);
  64         }
  65 
  66 unlock:
  67         mutex_unlock(&wf->mutex);
  68 }
  69 
  70 void __intel_wakeref_put_last(struct intel_wakeref *wf)
  71 {
  72         INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
  73 
  74         /* Assume we are not in process context and so cannot sleep. */
  75         if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
  76             !mutex_trylock(&wf->mutex)) {
  77                 schedule_work(&wf->work);
  78                 return;
  79         }
  80 
  81         ____intel_wakeref_put_last(wf);
  82 }
  83 
  84 static void __intel_wakeref_put_work(struct work_struct *wrk)
  85 {
  86         struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
  87 
  88         if (atomic_add_unless(&wf->count, -1, 1))
  89                 return;
  90 
  91         mutex_lock(&wf->mutex);
  92         ____intel_wakeref_put_last(wf);
  93 }
  94 
  95 void __intel_wakeref_init(struct intel_wakeref *wf,
  96                           struct intel_runtime_pm *rpm,
  97                           const struct intel_wakeref_ops *ops,
  98                           struct lock_class_key *key)
  99 {
 100         wf->rpm = rpm;
 101         wf->ops = ops;
 102 
 103         __mutex_init(&wf->mutex, "wakeref", key);
 104         atomic_set(&wf->count, 0);
 105         wf->wakeref = 0;
 106 
 107         INIT_WORK(&wf->work, __intel_wakeref_put_work);
 108 }
 109 
 110 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
 111 {
 112         return wait_var_event_killable(&wf->wakeref,
 113                                        !intel_wakeref_is_active(wf));
 114 }
 115 
 116 static void wakeref_auto_timeout(struct timer_list *t)
 117 {
 118         struct intel_wakeref_auto *wf = from_timer(wf, t, timer);
 119         intel_wakeref_t wakeref;
 120         unsigned long flags;
 121 
 122         if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
 123                 return;
 124 
 125         wakeref = fetch_and_zero(&wf->wakeref);
 126         spin_unlock_irqrestore(&wf->lock, flags);
 127 
 128         intel_runtime_pm_put(wf->rpm, wakeref);
 129 }
 130 
 131 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
 132                              struct intel_runtime_pm *rpm)
 133 {
 134         spin_lock_init(&wf->lock);
 135         timer_setup(&wf->timer, wakeref_auto_timeout, 0);
 136         refcount_set(&wf->count, 0);
 137         wf->wakeref = 0;
 138         wf->rpm = rpm;
 139 }
 140 
 141 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
 142 {
 143         unsigned long flags;
 144 
 145         if (!timeout) {
 146                 if (del_timer_sync(&wf->timer))
 147                         wakeref_auto_timeout(&wf->timer);
 148                 return;
 149         }
 150 
 151         /* Our mission is that we only extend an already active wakeref */
 152         assert_rpm_wakelock_held(wf->rpm);
 153 
 154         if (!refcount_inc_not_zero(&wf->count)) {
 155                 spin_lock_irqsave(&wf->lock, flags);
 156                 if (!refcount_inc_not_zero(&wf->count)) {
 157                         INTEL_WAKEREF_BUG_ON(wf->wakeref);
 158                         wf->wakeref = intel_runtime_pm_get_if_in_use(wf->rpm);
 159                         refcount_set(&wf->count, 1);
 160                 }
 161                 spin_unlock_irqrestore(&wf->lock, flags);
 162         }
 163 
 164         /*
 165          * If we extend a pending timer, we will only get a single timer
 166          * callback and so need to cancel the local inc by running the
 167          * elided callback to keep the wf->count balanced.
 168          */
 169         if (mod_timer(&wf->timer, jiffies + timeout))
 170                 wakeref_auto_timeout(&wf->timer);
 171 }
 172 
 173 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
 174 {
 175         intel_wakeref_auto(wf, 0);
 176         INTEL_WAKEREF_BUG_ON(wf->wakeref);
 177 }

/* [<][>][^][v][top][bottom][index][help] */