This source file includes following definitions.
- dev_pm_attach_wake_irq
- dev_pm_set_wake_irq
- dev_pm_clear_wake_irq
- handle_threaded_wake_irq
- dev_pm_set_dedicated_wake_irq
- dev_pm_enable_wake_irq
- dev_pm_disable_wake_irq
- dev_pm_enable_wake_irq_check
- dev_pm_disable_wake_irq_check
- dev_pm_arm_wake_irq
- dev_pm_disarm_wake_irq
1
2
3 #include <linux/device.h>
4 #include <linux/interrupt.h>
5 #include <linux/irq.h>
6 #include <linux/slab.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/pm_wakeirq.h>
9
10 #include "power.h"
11
12
13
14
15
16
17
18
19
20
21 static int dev_pm_attach_wake_irq(struct device *dev, int irq,
22 struct wake_irq *wirq)
23 {
24 unsigned long flags;
25
26 if (!dev || !wirq)
27 return -EINVAL;
28
29 spin_lock_irqsave(&dev->power.lock, flags);
30 if (dev_WARN_ONCE(dev, dev->power.wakeirq,
31 "wake irq already initialized\n")) {
32 spin_unlock_irqrestore(&dev->power.lock, flags);
33 return -EEXIST;
34 }
35
36 dev->power.wakeirq = wirq;
37 device_wakeup_attach_irq(dev, wirq);
38
39 spin_unlock_irqrestore(&dev->power.lock, flags);
40 return 0;
41 }
42
43
44
45
46
47
48
49
50
51
52
53 int dev_pm_set_wake_irq(struct device *dev, int irq)
54 {
55 struct wake_irq *wirq;
56 int err;
57
58 if (irq < 0)
59 return -EINVAL;
60
61 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
62 if (!wirq)
63 return -ENOMEM;
64
65 wirq->dev = dev;
66 wirq->irq = irq;
67
68 err = dev_pm_attach_wake_irq(dev, irq, wirq);
69 if (err)
70 kfree(wirq);
71
72 return err;
73 }
74 EXPORT_SYMBOL_GPL(dev_pm_set_wake_irq);
75
76
77
78
79
80
81
82
83
84
85
86
87 void dev_pm_clear_wake_irq(struct device *dev)
88 {
89 struct wake_irq *wirq = dev->power.wakeirq;
90 unsigned long flags;
91
92 if (!wirq)
93 return;
94
95 spin_lock_irqsave(&dev->power.lock, flags);
96 device_wakeup_detach_irq(dev);
97 dev->power.wakeirq = NULL;
98 spin_unlock_irqrestore(&dev->power.lock, flags);
99
100 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED) {
101 free_irq(wirq->irq, wirq);
102 wirq->status &= ~WAKE_IRQ_DEDICATED_MASK;
103 }
104 kfree(wirq->name);
105 kfree(wirq);
106 }
107 EXPORT_SYMBOL_GPL(dev_pm_clear_wake_irq);
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127 static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
128 {
129 struct wake_irq *wirq = _wirq;
130 int res;
131
132
133 if (irqd_is_wakeup_set(irq_get_irq_data(irq))) {
134 pm_wakeup_event(wirq->dev, 0);
135
136 return IRQ_HANDLED;
137 }
138
139
140 res = pm_runtime_resume(wirq->dev);
141 if (res < 0)
142 dev_warn(wirq->dev,
143 "wake IRQ with no resume: %i\n", res);
144
145 return IRQ_HANDLED;
146 }
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165 int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
166 {
167 struct wake_irq *wirq;
168 int err;
169
170 if (irq < 0)
171 return -EINVAL;
172
173 wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
174 if (!wirq)
175 return -ENOMEM;
176
177 wirq->name = kasprintf(GFP_KERNEL, "%s:wakeup", dev_name(dev));
178 if (!wirq->name) {
179 err = -ENOMEM;
180 goto err_free;
181 }
182
183 wirq->dev = dev;
184 wirq->irq = irq;
185 irq_set_status_flags(irq, IRQ_NOAUTOEN);
186
187
188 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
189
190
191
192
193
194 err = request_threaded_irq(irq, NULL, handle_threaded_wake_irq,
195 IRQF_ONESHOT, wirq->name, wirq);
196 if (err)
197 goto err_free_name;
198
199 err = dev_pm_attach_wake_irq(dev, irq, wirq);
200 if (err)
201 goto err_free_irq;
202
203 wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
204
205 return err;
206
207 err_free_irq:
208 free_irq(irq, wirq);
209 err_free_name:
210 kfree(wirq->name);
211 err_free:
212 kfree(wirq);
213
214 return err;
215 }
216 EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
217
218
219
220
221
222
223
224
225
226
227
228
229
230 void dev_pm_enable_wake_irq(struct device *dev)
231 {
232 struct wake_irq *wirq = dev->power.wakeirq;
233
234 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
235 enable_irq(wirq->irq);
236 }
237 EXPORT_SYMBOL_GPL(dev_pm_enable_wake_irq);
238
239
240
241
242
243
244
245
246
247 void dev_pm_disable_wake_irq(struct device *dev)
248 {
249 struct wake_irq *wirq = dev->power.wakeirq;
250
251 if (wirq && (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED))
252 disable_irq_nosync(wirq->irq);
253 }
254 EXPORT_SYMBOL_GPL(dev_pm_disable_wake_irq);
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270 void dev_pm_enable_wake_irq_check(struct device *dev,
271 bool can_change_status)
272 {
273 struct wake_irq *wirq = dev->power.wakeirq;
274
275 if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
276 return;
277
278 if (likely(wirq->status & WAKE_IRQ_DEDICATED_MANAGED)) {
279 goto enable;
280 } else if (can_change_status) {
281 wirq->status |= WAKE_IRQ_DEDICATED_MANAGED;
282 goto enable;
283 }
284
285 return;
286
287 enable:
288 enable_irq(wirq->irq);
289 }
290
291
292
293
294
295
296
297
298 void dev_pm_disable_wake_irq_check(struct device *dev)
299 {
300 struct wake_irq *wirq = dev->power.wakeirq;
301
302 if (!wirq || !((wirq->status & WAKE_IRQ_DEDICATED_MASK)))
303 return;
304
305 if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
306 disable_irq_nosync(wirq->irq);
307 }
308
309
310
311
312
313
314
315
316 void dev_pm_arm_wake_irq(struct wake_irq *wirq)
317 {
318 if (!wirq)
319 return;
320
321 if (device_may_wakeup(wirq->dev)) {
322 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
323 !pm_runtime_status_suspended(wirq->dev))
324 enable_irq(wirq->irq);
325
326 enable_irq_wake(wirq->irq);
327 }
328 }
329
330
331
332
333
334
335
336
337 void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
338 {
339 if (!wirq)
340 return;
341
342 if (device_may_wakeup(wirq->dev)) {
343 disable_irq_wake(wirq->irq);
344
345 if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
346 !pm_runtime_status_suspended(wirq->dev))
347 disable_irq_nosync(wirq->irq);
348 }
349 }