This source file includes following definitions.
- intel_wakeref_get
- intel_wakeref_get_if_active
- intel_wakeref_put
- intel_wakeref_lock
- intel_wakeref_unlock
- intel_wakeref_is_active
- __intel_wakeref_defer_park
1
2
3
4
5
6
7 #ifndef INTEL_WAKEREF_H
8 #define INTEL_WAKEREF_H
9
10 #include <linux/atomic.h>
11 #include <linux/bits.h>
12 #include <linux/mutex.h>
13 #include <linux/refcount.h>
14 #include <linux/stackdepot.h>
15 #include <linux/timer.h>
16 #include <linux/workqueue.h>
17
18 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
19 #define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
20 #else
21 #define INTEL_WAKEREF_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
22 #endif
23
24 struct intel_runtime_pm;
25 struct intel_wakeref;
26
27 typedef depot_stack_handle_t intel_wakeref_t;
28
29 struct intel_wakeref_ops {
30 int (*get)(struct intel_wakeref *wf);
31 int (*put)(struct intel_wakeref *wf);
32
33 unsigned long flags;
34 #define INTEL_WAKEREF_PUT_ASYNC BIT(0)
35 };
36
37 struct intel_wakeref {
38 atomic_t count;
39 struct mutex mutex;
40
41 intel_wakeref_t wakeref;
42
43 struct intel_runtime_pm *rpm;
44 const struct intel_wakeref_ops *ops;
45
46 struct work_struct work;
47 };
48
49 void __intel_wakeref_init(struct intel_wakeref *wf,
50 struct intel_runtime_pm *rpm,
51 const struct intel_wakeref_ops *ops,
52 struct lock_class_key *key);
53 #define intel_wakeref_init(wf, rpm, ops) do { \
54 static struct lock_class_key __key; \
55 \
56 __intel_wakeref_init((wf), (rpm), (ops), &__key); \
57 } while (0)
58
59 int __intel_wakeref_get_first(struct intel_wakeref *wf);
60 void __intel_wakeref_put_last(struct intel_wakeref *wf);
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78 static inline int
79 intel_wakeref_get(struct intel_wakeref *wf)
80 {
81 if (unlikely(!atomic_inc_not_zero(&wf->count)))
82 return __intel_wakeref_get_first(wf);
83
84 return 0;
85 }
86
87
88
89
90
91
92
93
94
95
96 static inline bool
97 intel_wakeref_get_if_active(struct intel_wakeref *wf)
98 {
99 return atomic_inc_not_zero(&wf->count);
100 }
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118 static inline void
119 intel_wakeref_put(struct intel_wakeref *wf)
120 {
121 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
122 if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
123 __intel_wakeref_put_last(wf);
124 }
125
126
127
128
129
130
131
132
133
134 static inline void
135 intel_wakeref_lock(struct intel_wakeref *wf)
136 __acquires(wf->mutex)
137 {
138 mutex_lock(&wf->mutex);
139 }
140
141
142
143
144
145
146
147 static inline void
148 intel_wakeref_unlock(struct intel_wakeref *wf)
149 __releases(wf->mutex)
150 {
151 mutex_unlock(&wf->mutex);
152 }
153
154
155
156
157
158
159
160 static inline bool
161 intel_wakeref_is_active(const struct intel_wakeref *wf)
162 {
163 return READ_ONCE(wf->wakeref);
164 }
165
166
167
168
169
170 static inline void
171 __intel_wakeref_defer_park(struct intel_wakeref *wf)
172 {
173 INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
174 atomic_set_release(&wf->count, 1);
175 }
176
177
178
179
180
181
182
183
184
185
186
187
188 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
189
190 struct intel_wakeref_auto {
191 struct intel_runtime_pm *rpm;
192 struct timer_list timer;
193 intel_wakeref_t wakeref;
194 spinlock_t lock;
195 refcount_t count;
196 };
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout);
214
215 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
216 struct intel_runtime_pm *rpm);
217 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf);
218
219 #endif