root/drivers/gpu/drm/i915/intel_wakeref.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. intel_wakeref_get
  2. intel_wakeref_get_if_active
  3. intel_wakeref_put
  4. intel_wakeref_lock
  5. intel_wakeref_unlock
  6. intel_wakeref_is_active
  7. __intel_wakeref_defer_park

   1 /*
   2  * SPDX-License-Identifier: MIT
   3  *
   4  * Copyright © 2019 Intel Corporation
   5  */
   6 
   7 #ifndef INTEL_WAKEREF_H
   8 #define INTEL_WAKEREF_H
   9 
  10 #include <linux/atomic.h>
  11 #include <linux/bits.h>
  12 #include <linux/mutex.h>
  13 #include <linux/refcount.h>
  14 #include <linux/stackdepot.h>
  15 #include <linux/timer.h>
  16 #include <linux/workqueue.h>
  17 
  18 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
  19 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
  20 #else
  21 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
  22 #endif
  23 
  24 struct intel_runtime_pm;
  25 struct intel_wakeref;
  26 
  27 typedef depot_stack_handle_t intel_wakeref_t;
  28 
  29 struct intel_wakeref_ops {
  30         int (*get)(struct intel_wakeref *wf);
  31         int (*put)(struct intel_wakeref *wf);
  32 
  33         unsigned long flags;
  34 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
  35 };
  36 
  37 struct intel_wakeref {
  38         atomic_t count;
  39         struct mutex mutex;
  40 
  41         intel_wakeref_t wakeref;
  42 
  43         struct intel_runtime_pm *rpm;
  44         const struct intel_wakeref_ops *ops;
  45 
  46         struct work_struct work;
  47 };
  48 
  49 void __intel_wakeref_init(struct intel_wakeref *wf,
  50                           struct intel_runtime_pm *rpm,
  51                           const struct intel_wakeref_ops *ops,
  52                           struct lock_class_key *key);
  53 #define intel_wakeref_init(wf, rpm, ops) do {                           \
  54         static struct lock_class_key __key;                             \
  55                                                                         \
  56         __intel_wakeref_init((wf), (rpm), (ops), &__key);               \
  57 } while (0)
  58 
  59 int __intel_wakeref_get_first(struct intel_wakeref *wf);
  60 void __intel_wakeref_put_last(struct intel_wakeref *wf);
  61 
  62 /**
  63  * intel_wakeref_get: Acquire the wakeref
  64  * @i915: the drm_i915_private device
  65  * @wf: the wakeref
  66  * @fn: callback for acquired the wakeref, called only on first acquire.
  67  *
  68  * Acquire a hold on the wakeref. The first user to do so, will acquire
  69  * the runtime pm wakeref and then call the @fn underneath the wakeref
  70  * mutex.
  71  *
  72  * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
  73  * will be released and the acquisition unwound, and an error reported.
  74  *
  75  * Returns: 0 if the wakeref was acquired successfully, or a negative error
  76  * code otherwise.
  77  */
  78 static inline int
  79 intel_wakeref_get(struct intel_wakeref *wf)
  80 {
  81         if (unlikely(!atomic_inc_not_zero(&wf->count)))
  82                 return __intel_wakeref_get_first(wf);
  83 
  84         return 0;
  85 }
  86 
  87 /**
  88  * intel_wakeref_get_if_in_use: Acquire the wakeref
  89  * @wf: the wakeref
  90  *
  91  * Acquire a hold on the wakeref, but only if the wakeref is already
  92  * active.
  93  *
  94  * Returns: true if the wakeref was acquired, false otherwise.
  95  */
  96 static inline bool
  97 intel_wakeref_get_if_active(struct intel_wakeref *wf)
  98 {
  99         return atomic_inc_not_zero(&wf->count);
 100 }
 101 
 102 /**
 103  * intel_wakeref_put: Release the wakeref
 104  * @i915: the drm_i915_private device
 105  * @wf: the wakeref
 106  * @fn: callback for releasing the wakeref, called only on final release.
 107  *
 108  * Release our hold on the wakeref. When there are no more users,
 109  * the runtime pm wakeref will be released after the @fn callback is called
 110  * underneath the wakeref mutex.
 111  *
 112  * Note that @fn is allowed to fail, in which case the runtime-pm wakeref
 113  * is retained and an error reported.
 114  *
 115  * Returns: 0 if the wakeref was released successfully, or a negative error
 116  * code otherwise.
 117  */
 118 static inline void
 119 intel_wakeref_put(struct intel_wakeref *wf)
 120 {
 121         INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
 122         if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
 123                 __intel_wakeref_put_last(wf);
 124 }
 125 
 126 /**
 127  * intel_wakeref_lock: Lock the wakeref (mutex)
 128  * @wf: the wakeref
 129  *
 130  * Locks the wakeref to prevent it being acquired or released. New users
 131  * can still adjust the counter, but the wakeref itself (and callback)
 132  * cannot be acquired or released.
 133  */
 134 static inline void
 135 intel_wakeref_lock(struct intel_wakeref *wf)
 136         __acquires(wf->mutex)
 137 {
 138         mutex_lock(&wf->mutex);
 139 }
 140 
 141 /**
 142  * intel_wakeref_unlock: Unlock the wakeref
 143  * @wf: the wakeref
 144  *
 145  * Releases a previously acquired intel_wakeref_lock().
 146  */
 147 static inline void
 148 intel_wakeref_unlock(struct intel_wakeref *wf)
 149         __releases(wf->mutex)
 150 {
 151         mutex_unlock(&wf->mutex);
 152 }
 153 
 154 /**
 155  * intel_wakeref_is_active: Query whether the wakeref is currently held
 156  * @wf: the wakeref
 157  *
 158  * Returns: true if the wakeref is currently held.
 159  */
 160 static inline bool
 161 intel_wakeref_is_active(const struct intel_wakeref *wf)
 162 {
 163         return READ_ONCE(wf->wakeref);
 164 }
 165 
 166 /**
 167  * __intel_wakeref_defer_park: Defer the current park callback
 168  * @wf: the wakeref
 169  */
 170 static inline void
 171 __intel_wakeref_defer_park(struct intel_wakeref *wf)
 172 {
 173         INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
 174         atomic_set_release(&wf->count, 1);
 175 }
 176 
 177 /**
 178  * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
 179  * @wf: the wakeref
 180  *
 181  * Wait for the earlier asynchronous release of the wakeref. Note
 182  * this will wait for any third party as well, so make sure you only wait
 183  * when you have control over the wakeref and trust no one else is acquiring
 184  * it.
 185  *
 186  * Return: 0 on success, error code if killed.
 187  */
 188 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
 189 
 190 struct intel_wakeref_auto {
 191         struct intel_runtime_pm *rpm;
 192         struct timer_list timer;
 193         intel_wakeref_t wakeref;
 194         spinlock_t lock;
 195         refcount_t count;
 196 };
 197 
 198 /**
 199  * intel_wakeref_auto: Delay the runtime-pm autosuspend
 200  * @wf: the wakeref
 201  * @timeout: relative timeout in jiffies
 202  *
 203  * The runtime-pm core uses a suspend delay after the last wakeref
 204  * is released before triggering runtime suspend of the device. That
 205  * delay is configurable via sysfs with little regard to the device
 206  * characteristics. Instead, we want to tune the autosuspend based on our
 207  * HW knowledge. intel_wakeref_auto() delays the sleep by the supplied
 208  * timeout.
 209  *
 210  * Pass @timeout = 0 to cancel a previous autosuspend by executing the
 211  * suspend immediately.
 212  */
 213 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
 214 
 215 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
 216                              struct intel_runtime_pm *rpm);
 217 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
 218 
 219 #endif /* INTEL_WAKEREF_H */

/* [<][>][^][v][top][bottom][index][help] */