This source file includes following definitions.
- pm_verb
- device_pm_sleep_init
- device_pm_lock
- device_pm_unlock
- device_pm_add
- device_pm_remove
- device_pm_move_before
- device_pm_move_after
- device_pm_move_last
- initcall_debug_start
- initcall_debug_report
- dpm_wait
- dpm_wait_fn
- dpm_wait_for_children
- dpm_wait_for_suppliers
- dpm_wait_for_superior
- dpm_wait_for_consumers
- dpm_wait_for_subordinate
- pm_op
- pm_late_early_op
- pm_noirq_op
- pm_dev_dbg
- pm_dev_err
- dpm_show_time
- dpm_run_callback
- dpm_watchdog_handler
- dpm_watchdog_set
- dpm_watchdog_clear
- suspend_event
- dev_pm_may_skip_resume
- dpm_subsys_resume_noirq_cb
- device_resume_noirq
- is_async
- dpm_async_fn
- async_resume_noirq
- dpm_noirq_resume_devices
- dpm_resume_noirq
- dpm_subsys_resume_early_cb
- device_resume_early
- async_resume_early
- dpm_resume_early
- dpm_resume_start
- device_resume
- async_resume
- dpm_resume
- device_complete
- dpm_complete
- dpm_resume_end
- resume_event
- dpm_superior_set_must_resume
- dpm_subsys_suspend_noirq_cb
- device_must_resume
- __device_suspend_noirq
- async_suspend_noirq
- device_suspend_noirq
- dpm_noirq_suspend_devices
- dpm_suspend_noirq
- dpm_propagate_wakeup_to_parent
- dpm_subsys_suspend_late_cb
- __device_suspend_late
- async_suspend_late
- device_suspend_late
- dpm_suspend_late
- dpm_suspend_end
- legacy_suspend
- dpm_clear_superiors_direct_complete
- __device_suspend
- async_suspend
- device_suspend
- dpm_suspend
- device_prepare
- dpm_prepare
- dpm_suspend_start
- __suspend_report_result
- device_pm_wait_for_dev
- dpm_for_each_dev
- pm_ops_is_empty
- device_pm_check_callbacks
- dev_pm_smart_suspend_and_suspended
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18 #define pr_fmt(fmt) "PM: " fmt
19
20 #include <linux/device.h>
21 #include <linux/export.h>
22 #include <linux/mutex.h>
23 #include <linux/pm.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/pm-trace.h>
26 #include <linux/pm_wakeirq.h>
27 #include <linux/interrupt.h>
28 #include <linux/sched.h>
29 #include <linux/sched/debug.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/devfreq.h>
36 #include <linux/timer.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43
44
45
46
47
48
49
50
51
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static const char *pm_verb(int event)
66 {
67 switch (event) {
68 case PM_EVENT_SUSPEND:
69 return "suspend";
70 case PM_EVENT_RESUME:
71 return "resume";
72 case PM_EVENT_FREEZE:
73 return "freeze";
74 case PM_EVENT_QUIESCE:
75 return "quiesce";
76 case PM_EVENT_HIBERNATE:
77 return "hibernate";
78 case PM_EVENT_THAW:
79 return "thaw";
80 case PM_EVENT_RESTORE:
81 return "restore";
82 case PM_EVENT_RECOVER:
83 return "recover";
84 default:
85 return "(unknown PM event)";
86 }
87 }
88
89
90
91
92
93 void device_pm_sleep_init(struct device *dev)
94 {
95 dev->power.is_prepared = false;
96 dev->power.is_suspended = false;
97 dev->power.is_noirq_suspended = false;
98 dev->power.is_late_suspended = false;
99 init_completion(&dev->power.completion);
100 complete_all(&dev->power.completion);
101 dev->power.wakeup = NULL;
102 INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105
106
107
108 void device_pm_lock(void)
109 {
110 mutex_lock(&dpm_list_mtx);
111 }
112
113
114
115
116 void device_pm_unlock(void)
117 {
118 mutex_unlock(&dpm_list_mtx);
119 }
120
121
122
123
124
125 void device_pm_add(struct device *dev)
126 {
127
128 if (device_pm_not_required(dev))
129 return;
130
131 pr_debug("Adding info for %s:%s\n",
132 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
133 device_pm_check_callbacks(dev);
134 mutex_lock(&dpm_list_mtx);
135 if (dev->parent && dev->parent->power.is_prepared)
136 dev_warn(dev, "parent %s should not be sleeping\n",
137 dev_name(dev->parent));
138 list_add_tail(&dev->power.entry, &dpm_list);
139 dev->power.in_dpm_list = true;
140 mutex_unlock(&dpm_list_mtx);
141 }
142
143
144
145
146
147 void device_pm_remove(struct device *dev)
148 {
149 if (device_pm_not_required(dev))
150 return;
151
152 pr_debug("Removing info for %s:%s\n",
153 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
154 complete_all(&dev->power.completion);
155 mutex_lock(&dpm_list_mtx);
156 list_del_init(&dev->power.entry);
157 dev->power.in_dpm_list = false;
158 mutex_unlock(&dpm_list_mtx);
159 device_wakeup_disable(dev);
160 pm_runtime_remove(dev);
161 device_pm_check_callbacks(dev);
162 }
163
164
165
166
167
168
169 void device_pm_move_before(struct device *deva, struct device *devb)
170 {
171 pr_debug("Moving %s:%s before %s:%s\n",
172 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
173 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
174
175 list_move_tail(&deva->power.entry, &devb->power.entry);
176 }
177
178
179
180
181
182
183 void device_pm_move_after(struct device *deva, struct device *devb)
184 {
185 pr_debug("Moving %s:%s after %s:%s\n",
186 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
187 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
188
189 list_move(&deva->power.entry, &devb->power.entry);
190 }
191
192
193
194
195
196 void device_pm_move_last(struct device *dev)
197 {
198 pr_debug("Moving %s:%s to end of list\n",
199 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
200 list_move_tail(&dev->power.entry, &dpm_list);
201 }
202
203 static ktime_t initcall_debug_start(struct device *dev, void *cb)
204 {
205 if (!pm_print_times_enabled)
206 return 0;
207
208 dev_info(dev, "calling %pS @ %i, parent: %s\n", cb,
209 task_pid_nr(current),
210 dev->parent ? dev_name(dev->parent) : "none");
211 return ktime_get();
212 }
213
214 static void initcall_debug_report(struct device *dev, ktime_t calltime,
215 void *cb, int error)
216 {
217 ktime_t rettime;
218 s64 nsecs;
219
220 if (!pm_print_times_enabled)
221 return;
222
223 rettime = ktime_get();
224 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
225
226 dev_info(dev, "%pS returned %d after %Ld usecs\n", cb, error,
227 (unsigned long long)nsecs >> 10);
228 }
229
230
231
232
233
234
235 static void dpm_wait(struct device *dev, bool async)
236 {
237 if (!dev)
238 return;
239
240 if (async || (pm_async_enabled && dev->power.async_suspend))
241 wait_for_completion(&dev->power.completion);
242 }
243
244 static int dpm_wait_fn(struct device *dev, void *async_ptr)
245 {
246 dpm_wait(dev, *((bool *)async_ptr));
247 return 0;
248 }
249
250 static void dpm_wait_for_children(struct device *dev, bool async)
251 {
252 device_for_each_child(dev, &async, dpm_wait_fn);
253 }
254
255 static void dpm_wait_for_suppliers(struct device *dev, bool async)
256 {
257 struct device_link *link;
258 int idx;
259
260 idx = device_links_read_lock();
261
262
263
264
265
266
267
268
269 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
270 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
271 dpm_wait(link->supplier, async);
272
273 device_links_read_unlock(idx);
274 }
275
276 static bool dpm_wait_for_superior(struct device *dev, bool async)
277 {
278 struct device *parent;
279
280
281
282
283
284
285
286
287 mutex_lock(&dpm_list_mtx);
288
289 if (!device_pm_initialized(dev)) {
290 mutex_unlock(&dpm_list_mtx);
291 return false;
292 }
293
294 parent = get_device(dev->parent);
295
296 mutex_unlock(&dpm_list_mtx);
297
298 dpm_wait(parent, async);
299 put_device(parent);
300
301 dpm_wait_for_suppliers(dev, async);
302
303
304
305
306
307 return device_pm_initialized(dev);
308 }
309
310 static void dpm_wait_for_consumers(struct device *dev, bool async)
311 {
312 struct device_link *link;
313 int idx;
314
315 idx = device_links_read_lock();
316
317
318
319
320
321
322
323
324
325
326 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
327 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
328 dpm_wait(link->consumer, async);
329
330 device_links_read_unlock(idx);
331 }
332
333 static void dpm_wait_for_subordinate(struct device *dev, bool async)
334 {
335 dpm_wait_for_children(dev, async);
336 dpm_wait_for_consumers(dev, async);
337 }
338
339
340
341
342
343
344 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
345 {
346 switch (state.event) {
347 #ifdef CONFIG_SUSPEND
348 case PM_EVENT_SUSPEND:
349 return ops->suspend;
350 case PM_EVENT_RESUME:
351 return ops->resume;
352 #endif
353 #ifdef CONFIG_HIBERNATE_CALLBACKS
354 case PM_EVENT_FREEZE:
355 case PM_EVENT_QUIESCE:
356 return ops->freeze;
357 case PM_EVENT_HIBERNATE:
358 return ops->poweroff;
359 case PM_EVENT_THAW:
360 case PM_EVENT_RECOVER:
361 return ops->thaw;
362 break;
363 case PM_EVENT_RESTORE:
364 return ops->restore;
365 #endif
366 }
367
368 return NULL;
369 }
370
371
372
373
374
375
376
377
378 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
379 pm_message_t state)
380 {
381 switch (state.event) {
382 #ifdef CONFIG_SUSPEND
383 case PM_EVENT_SUSPEND:
384 return ops->suspend_late;
385 case PM_EVENT_RESUME:
386 return ops->resume_early;
387 #endif
388 #ifdef CONFIG_HIBERNATE_CALLBACKS
389 case PM_EVENT_FREEZE:
390 case PM_EVENT_QUIESCE:
391 return ops->freeze_late;
392 case PM_EVENT_HIBERNATE:
393 return ops->poweroff_late;
394 case PM_EVENT_THAW:
395 case PM_EVENT_RECOVER:
396 return ops->thaw_early;
397 case PM_EVENT_RESTORE:
398 return ops->restore_early;
399 #endif
400 }
401
402 return NULL;
403 }
404
405
406
407
408
409
410
411
412
413 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
414 {
415 switch (state.event) {
416 #ifdef CONFIG_SUSPEND
417 case PM_EVENT_SUSPEND:
418 return ops->suspend_noirq;
419 case PM_EVENT_RESUME:
420 return ops->resume_noirq;
421 #endif
422 #ifdef CONFIG_HIBERNATE_CALLBACKS
423 case PM_EVENT_FREEZE:
424 case PM_EVENT_QUIESCE:
425 return ops->freeze_noirq;
426 case PM_EVENT_HIBERNATE:
427 return ops->poweroff_noirq;
428 case PM_EVENT_THAW:
429 case PM_EVENT_RECOVER:
430 return ops->thaw_noirq;
431 case PM_EVENT_RESTORE:
432 return ops->restore_noirq;
433 #endif
434 }
435
436 return NULL;
437 }
438
439 static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
440 {
441 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
442 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
443 ", may wakeup" : "");
444 }
445
446 static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
447 int error)
448 {
449 pr_err("Device %s failed to %s%s: error %d\n",
450 dev_name(dev), pm_verb(state.event), info, error);
451 }
452
453 static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
454 const char *info)
455 {
456 ktime_t calltime;
457 u64 usecs64;
458 int usecs;
459
460 calltime = ktime_get();
461 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
462 do_div(usecs64, NSEC_PER_USEC);
463 usecs = usecs64;
464 if (usecs == 0)
465 usecs = 1;
466
467 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
468 info ?: "", info ? " " : "", pm_verb(state.event),
469 error ? "aborted" : "complete",
470 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
471 }
472
473 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
474 pm_message_t state, const char *info)
475 {
476 ktime_t calltime;
477 int error;
478
479 if (!cb)
480 return 0;
481
482 calltime = initcall_debug_start(dev, cb);
483
484 pm_dev_dbg(dev, state, info);
485 trace_device_pm_callback_start(dev, info, state.event);
486 error = cb(dev);
487 trace_device_pm_callback_end(dev, error);
488 suspend_report_result(cb, error);
489
490 initcall_debug_report(dev, calltime, cb, error);
491
492 return error;
493 }
494
495 #ifdef CONFIG_DPM_WATCHDOG
496 struct dpm_watchdog {
497 struct device *dev;
498 struct task_struct *tsk;
499 struct timer_list timer;
500 };
501
502 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
503 struct dpm_watchdog wd
504
505
506
507
508
509
510
511
512
513 static void dpm_watchdog_handler(struct timer_list *t)
514 {
515 struct dpm_watchdog *wd = from_timer(wd, t, timer);
516
517 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
518 show_stack(wd->tsk, NULL);
519 panic("%s %s: unrecoverable failure\n",
520 dev_driver_string(wd->dev), dev_name(wd->dev));
521 }
522
523
524
525
526
527
528 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
529 {
530 struct timer_list *timer = &wd->timer;
531
532 wd->dev = dev;
533 wd->tsk = current;
534
535 timer_setup_on_stack(timer, dpm_watchdog_handler, 0);
536
537 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
538 add_timer(timer);
539 }
540
541
542
543
544
545 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
546 {
547 struct timer_list *timer = &wd->timer;
548
549 del_timer_sync(timer);
550 destroy_timer_on_stack(timer);
551 }
552 #else
553 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
554 #define dpm_watchdog_set(x, y)
555 #define dpm_watchdog_clear(x)
556 #endif
557
558
559
560
561
562
563
564 static pm_message_t suspend_event(pm_message_t resume_msg)
565 {
566 switch (resume_msg.event) {
567 case PM_EVENT_RESUME:
568 return PMSG_SUSPEND;
569 case PM_EVENT_THAW:
570 case PM_EVENT_RESTORE:
571 return PMSG_FREEZE;
572 case PM_EVENT_RECOVER:
573 return PMSG_HIBERNATE;
574 }
575 return PMSG_ON;
576 }
577
578
579
580
581
582
583
584
585 bool dev_pm_may_skip_resume(struct device *dev)
586 {
587 return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE;
588 }
589
590 static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev,
591 pm_message_t state,
592 const char **info_p)
593 {
594 pm_callback_t callback;
595 const char *info;
596
597 if (dev->pm_domain) {
598 info = "noirq power domain ";
599 callback = pm_noirq_op(&dev->pm_domain->ops, state);
600 } else if (dev->type && dev->type->pm) {
601 info = "noirq type ";
602 callback = pm_noirq_op(dev->type->pm, state);
603 } else if (dev->class && dev->class->pm) {
604 info = "noirq class ";
605 callback = pm_noirq_op(dev->class->pm, state);
606 } else if (dev->bus && dev->bus->pm) {
607 info = "noirq bus ";
608 callback = pm_noirq_op(dev->bus->pm, state);
609 } else {
610 return NULL;
611 }
612
613 if (info_p)
614 *info_p = info;
615
616 return callback;
617 }
618
619 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
620 pm_message_t state,
621 const char **info_p);
622
623 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
624 pm_message_t state,
625 const char **info_p);
626
627
628
629
630
631
632
633
634
635
636 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
637 {
638 pm_callback_t callback;
639 const char *info;
640 bool skip_resume;
641 int error = 0;
642
643 TRACE_DEVICE(dev);
644 TRACE_RESUME(0);
645
646 if (dev->power.syscore || dev->power.direct_complete)
647 goto Out;
648
649 if (!dev->power.is_noirq_suspended)
650 goto Out;
651
652 if (!dpm_wait_for_superior(dev, async))
653 goto Out;
654
655 skip_resume = dev_pm_may_skip_resume(dev);
656
657 callback = dpm_subsys_resume_noirq_cb(dev, state, &info);
658 if (callback)
659 goto Run;
660
661 if (skip_resume)
662 goto Skip;
663
664 if (dev_pm_smart_suspend_and_suspended(dev)) {
665 pm_message_t suspend_msg = suspend_event(state);
666
667
668
669
670
671
672
673
674
675 if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) &&
676 !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) {
677 if (state.event == PM_EVENT_THAW) {
678 skip_resume = true;
679 goto Skip;
680 } else {
681 pm_runtime_set_active(dev);
682 }
683 }
684 }
685
686 if (dev->driver && dev->driver->pm) {
687 info = "noirq driver ";
688 callback = pm_noirq_op(dev->driver->pm, state);
689 }
690
691 Run:
692 error = dpm_run_callback(callback, dev, state, info);
693
694 Skip:
695 dev->power.is_noirq_suspended = false;
696
697 if (skip_resume) {
698
699 dev->power.is_late_suspended = false;
700 dev->power.is_suspended = false;
701
702
703
704
705
706
707
708 pm_runtime_set_suspended(dev);
709 }
710
711 Out:
712 complete_all(&dev->power.completion);
713 TRACE_RESUME(error);
714 return error;
715 }
716
717 static bool is_async(struct device *dev)
718 {
719 return dev->power.async_suspend && pm_async_enabled
720 && !pm_trace_is_enabled();
721 }
722
723 static bool dpm_async_fn(struct device *dev, async_func_t func)
724 {
725 reinit_completion(&dev->power.completion);
726
727 if (is_async(dev)) {
728 get_device(dev);
729 async_schedule_dev(func, dev);
730 return true;
731 }
732
733 return false;
734 }
735
736 static void async_resume_noirq(void *data, async_cookie_t cookie)
737 {
738 struct device *dev = (struct device *)data;
739 int error;
740
741 error = device_resume_noirq(dev, pm_transition, true);
742 if (error)
743 pm_dev_err(dev, pm_transition, " async", error);
744
745 put_device(dev);
746 }
747
748 static void dpm_noirq_resume_devices(pm_message_t state)
749 {
750 struct device *dev;
751 ktime_t starttime = ktime_get();
752
753 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
754 mutex_lock(&dpm_list_mtx);
755 pm_transition = state;
756
757
758
759
760
761
762 list_for_each_entry(dev, &dpm_noirq_list, power.entry)
763 dpm_async_fn(dev, async_resume_noirq);
764
765 while (!list_empty(&dpm_noirq_list)) {
766 dev = to_device(dpm_noirq_list.next);
767 get_device(dev);
768 list_move_tail(&dev->power.entry, &dpm_late_early_list);
769 mutex_unlock(&dpm_list_mtx);
770
771 if (!is_async(dev)) {
772 int error;
773
774 error = device_resume_noirq(dev, state, false);
775 if (error) {
776 suspend_stats.failed_resume_noirq++;
777 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
778 dpm_save_failed_dev(dev_name(dev));
779 pm_dev_err(dev, state, " noirq", error);
780 }
781 }
782
783 mutex_lock(&dpm_list_mtx);
784 put_device(dev);
785 }
786 mutex_unlock(&dpm_list_mtx);
787 async_synchronize_full();
788 dpm_show_time(starttime, state, 0, "noirq");
789 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
790 }
791
792
793
794
795
796
797
798
799 void dpm_resume_noirq(pm_message_t state)
800 {
801 dpm_noirq_resume_devices(state);
802
803 resume_device_irqs();
804 device_wakeup_disarm_wake_irqs();
805
806 cpuidle_resume();
807 }
808
809 static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev,
810 pm_message_t state,
811 const char **info_p)
812 {
813 pm_callback_t callback;
814 const char *info;
815
816 if (dev->pm_domain) {
817 info = "early power domain ";
818 callback = pm_late_early_op(&dev->pm_domain->ops, state);
819 } else if (dev->type && dev->type->pm) {
820 info = "early type ";
821 callback = pm_late_early_op(dev->type->pm, state);
822 } else if (dev->class && dev->class->pm) {
823 info = "early class ";
824 callback = pm_late_early_op(dev->class->pm, state);
825 } else if (dev->bus && dev->bus->pm) {
826 info = "early bus ";
827 callback = pm_late_early_op(dev->bus->pm, state);
828 } else {
829 return NULL;
830 }
831
832 if (info_p)
833 *info_p = info;
834
835 return callback;
836 }
837
838
839
840
841
842
843
844
845
846 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
847 {
848 pm_callback_t callback;
849 const char *info;
850 int error = 0;
851
852 TRACE_DEVICE(dev);
853 TRACE_RESUME(0);
854
855 if (dev->power.syscore || dev->power.direct_complete)
856 goto Out;
857
858 if (!dev->power.is_late_suspended)
859 goto Out;
860
861 if (!dpm_wait_for_superior(dev, async))
862 goto Out;
863
864 callback = dpm_subsys_resume_early_cb(dev, state, &info);
865
866 if (!callback && dev->driver && dev->driver->pm) {
867 info = "early driver ";
868 callback = pm_late_early_op(dev->driver->pm, state);
869 }
870
871 error = dpm_run_callback(callback, dev, state, info);
872 dev->power.is_late_suspended = false;
873
874 Out:
875 TRACE_RESUME(error);
876
877 pm_runtime_enable(dev);
878 complete_all(&dev->power.completion);
879 return error;
880 }
881
882 static void async_resume_early(void *data, async_cookie_t cookie)
883 {
884 struct device *dev = (struct device *)data;
885 int error;
886
887 error = device_resume_early(dev, pm_transition, true);
888 if (error)
889 pm_dev_err(dev, pm_transition, " async", error);
890
891 put_device(dev);
892 }
893
894
895
896
897
898 void dpm_resume_early(pm_message_t state)
899 {
900 struct device *dev;
901 ktime_t starttime = ktime_get();
902
903 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
904 mutex_lock(&dpm_list_mtx);
905 pm_transition = state;
906
907
908
909
910
911
912 list_for_each_entry(dev, &dpm_late_early_list, power.entry)
913 dpm_async_fn(dev, async_resume_early);
914
915 while (!list_empty(&dpm_late_early_list)) {
916 dev = to_device(dpm_late_early_list.next);
917 get_device(dev);
918 list_move_tail(&dev->power.entry, &dpm_suspended_list);
919 mutex_unlock(&dpm_list_mtx);
920
921 if (!is_async(dev)) {
922 int error;
923
924 error = device_resume_early(dev, state, false);
925 if (error) {
926 suspend_stats.failed_resume_early++;
927 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
928 dpm_save_failed_dev(dev_name(dev));
929 pm_dev_err(dev, state, " early", error);
930 }
931 }
932 mutex_lock(&dpm_list_mtx);
933 put_device(dev);
934 }
935 mutex_unlock(&dpm_list_mtx);
936 async_synchronize_full();
937 dpm_show_time(starttime, state, 0, "early");
938 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
939 }
940
941
942
943
944
945 void dpm_resume_start(pm_message_t state)
946 {
947 dpm_resume_noirq(state);
948 dpm_resume_early(state);
949 }
950 EXPORT_SYMBOL_GPL(dpm_resume_start);
951
952
953
954
955
956
957
958 static int device_resume(struct device *dev, pm_message_t state, bool async)
959 {
960 pm_callback_t callback = NULL;
961 const char *info = NULL;
962 int error = 0;
963 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
964
965 TRACE_DEVICE(dev);
966 TRACE_RESUME(0);
967
968 if (dev->power.syscore)
969 goto Complete;
970
971 if (dev->power.direct_complete) {
972
973 pm_runtime_enable(dev);
974 goto Complete;
975 }
976
977 if (!dpm_wait_for_superior(dev, async))
978 goto Complete;
979
980 dpm_watchdog_set(&wd, dev);
981 device_lock(dev);
982
983
984
985
986
987 dev->power.is_prepared = false;
988
989 if (!dev->power.is_suspended)
990 goto Unlock;
991
992 if (dev->pm_domain) {
993 info = "power domain ";
994 callback = pm_op(&dev->pm_domain->ops, state);
995 goto Driver;
996 }
997
998 if (dev->type && dev->type->pm) {
999 info = "type ";
1000 callback = pm_op(dev->type->pm, state);
1001 goto Driver;
1002 }
1003
1004 if (dev->class && dev->class->pm) {
1005 info = "class ";
1006 callback = pm_op(dev->class->pm, state);
1007 goto Driver;
1008 }
1009
1010 if (dev->bus) {
1011 if (dev->bus->pm) {
1012 info = "bus ";
1013 callback = pm_op(dev->bus->pm, state);
1014 } else if (dev->bus->resume) {
1015 info = "legacy bus ";
1016 callback = dev->bus->resume;
1017 goto End;
1018 }
1019 }
1020
1021 Driver:
1022 if (!callback && dev->driver && dev->driver->pm) {
1023 info = "driver ";
1024 callback = pm_op(dev->driver->pm, state);
1025 }
1026
1027 End:
1028 error = dpm_run_callback(callback, dev, state, info);
1029 dev->power.is_suspended = false;
1030
1031 Unlock:
1032 device_unlock(dev);
1033 dpm_watchdog_clear(&wd);
1034
1035 Complete:
1036 complete_all(&dev->power.completion);
1037
1038 TRACE_RESUME(error);
1039
1040 return error;
1041 }
1042
1043 static void async_resume(void *data, async_cookie_t cookie)
1044 {
1045 struct device *dev = (struct device *)data;
1046 int error;
1047
1048 error = device_resume(dev, pm_transition, true);
1049 if (error)
1050 pm_dev_err(dev, pm_transition, " async", error);
1051 put_device(dev);
1052 }
1053
1054
1055
1056
1057
1058
1059
1060
1061 void dpm_resume(pm_message_t state)
1062 {
1063 struct device *dev;
1064 ktime_t starttime = ktime_get();
1065
1066 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
1067 might_sleep();
1068
1069 mutex_lock(&dpm_list_mtx);
1070 pm_transition = state;
1071 async_error = 0;
1072
1073 list_for_each_entry(dev, &dpm_suspended_list, power.entry)
1074 dpm_async_fn(dev, async_resume);
1075
1076 while (!list_empty(&dpm_suspended_list)) {
1077 dev = to_device(dpm_suspended_list.next);
1078 get_device(dev);
1079 if (!is_async(dev)) {
1080 int error;
1081
1082 mutex_unlock(&dpm_list_mtx);
1083
1084 error = device_resume(dev, state, false);
1085 if (error) {
1086 suspend_stats.failed_resume++;
1087 dpm_save_failed_step(SUSPEND_RESUME);
1088 dpm_save_failed_dev(dev_name(dev));
1089 pm_dev_err(dev, state, "", error);
1090 }
1091
1092 mutex_lock(&dpm_list_mtx);
1093 }
1094 if (!list_empty(&dev->power.entry))
1095 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1096 put_device(dev);
1097 }
1098 mutex_unlock(&dpm_list_mtx);
1099 async_synchronize_full();
1100 dpm_show_time(starttime, state, 0, NULL);
1101
1102 cpufreq_resume();
1103 devfreq_resume();
1104 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
1105 }
1106
1107
1108
1109
1110
1111
1112 static void device_complete(struct device *dev, pm_message_t state)
1113 {
1114 void (*callback)(struct device *) = NULL;
1115 const char *info = NULL;
1116
1117 if (dev->power.syscore)
1118 return;
1119
1120 device_lock(dev);
1121
1122 if (dev->pm_domain) {
1123 info = "completing power domain ";
1124 callback = dev->pm_domain->ops.complete;
1125 } else if (dev->type && dev->type->pm) {
1126 info = "completing type ";
1127 callback = dev->type->pm->complete;
1128 } else if (dev->class && dev->class->pm) {
1129 info = "completing class ";
1130 callback = dev->class->pm->complete;
1131 } else if (dev->bus && dev->bus->pm) {
1132 info = "completing bus ";
1133 callback = dev->bus->pm->complete;
1134 }
1135
1136 if (!callback && dev->driver && dev->driver->pm) {
1137 info = "completing driver ";
1138 callback = dev->driver->pm->complete;
1139 }
1140
1141 if (callback) {
1142 pm_dev_dbg(dev, state, info);
1143 callback(dev);
1144 }
1145
1146 device_unlock(dev);
1147
1148 pm_runtime_put(dev);
1149 }
1150
1151
1152
1153
1154
1155
1156
1157
1158 void dpm_complete(pm_message_t state)
1159 {
1160 struct list_head list;
1161
1162 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1163 might_sleep();
1164
1165 INIT_LIST_HEAD(&list);
1166 mutex_lock(&dpm_list_mtx);
1167 while (!list_empty(&dpm_prepared_list)) {
1168 struct device *dev = to_device(dpm_prepared_list.prev);
1169
1170 get_device(dev);
1171 dev->power.is_prepared = false;
1172 list_move(&dev->power.entry, &list);
1173 mutex_unlock(&dpm_list_mtx);
1174
1175 trace_device_pm_callback_start(dev, "", state.event);
1176 device_complete(dev, state);
1177 trace_device_pm_callback_end(dev, 0);
1178
1179 mutex_lock(&dpm_list_mtx);
1180 put_device(dev);
1181 }
1182 list_splice(&list, &dpm_list);
1183 mutex_unlock(&dpm_list_mtx);
1184
1185
1186 device_unblock_probing();
1187 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1188 }
1189
1190
1191
1192
1193
1194
1195
1196
1197 void dpm_resume_end(pm_message_t state)
1198 {
1199 dpm_resume(state);
1200 dpm_complete(state);
1201 }
1202 EXPORT_SYMBOL_GPL(dpm_resume_end);
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214 static pm_message_t resume_event(pm_message_t sleep_state)
1215 {
1216 switch (sleep_state.event) {
1217 case PM_EVENT_SUSPEND:
1218 return PMSG_RESUME;
1219 case PM_EVENT_FREEZE:
1220 case PM_EVENT_QUIESCE:
1221 return PMSG_RECOVER;
1222 case PM_EVENT_HIBERNATE:
1223 return PMSG_RESTORE;
1224 }
1225 return PMSG_ON;
1226 }
1227
1228 static void dpm_superior_set_must_resume(struct device *dev)
1229 {
1230 struct device_link *link;
1231 int idx;
1232
1233 if (dev->parent)
1234 dev->parent->power.must_resume = true;
1235
1236 idx = device_links_read_lock();
1237
1238 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
1239 link->supplier->power.must_resume = true;
1240
1241 device_links_read_unlock(idx);
1242 }
1243
1244 static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev,
1245 pm_message_t state,
1246 const char **info_p)
1247 {
1248 pm_callback_t callback;
1249 const char *info;
1250
1251 if (dev->pm_domain) {
1252 info = "noirq power domain ";
1253 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1254 } else if (dev->type && dev->type->pm) {
1255 info = "noirq type ";
1256 callback = pm_noirq_op(dev->type->pm, state);
1257 } else if (dev->class && dev->class->pm) {
1258 info = "noirq class ";
1259 callback = pm_noirq_op(dev->class->pm, state);
1260 } else if (dev->bus && dev->bus->pm) {
1261 info = "noirq bus ";
1262 callback = pm_noirq_op(dev->bus->pm, state);
1263 } else {
1264 return NULL;
1265 }
1266
1267 if (info_p)
1268 *info_p = info;
1269
1270 return callback;
1271 }
1272
1273 static bool device_must_resume(struct device *dev, pm_message_t state,
1274 bool no_subsys_suspend_noirq)
1275 {
1276 pm_message_t resume_msg = resume_event(state);
1277
1278
1279
1280
1281
1282
1283
1284 if (no_subsys_suspend_noirq &&
1285 !dpm_subsys_suspend_late_cb(dev, state, NULL) &&
1286 !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) &&
1287 !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL))
1288 return !pm_runtime_status_suspended(dev) &&
1289 (resume_msg.event != PM_EVENT_RESUME ||
1290 (device_can_wakeup(dev) && !device_may_wakeup(dev)));
1291
1292
1293
1294
1295
1296 return !dev->power.may_skip_resume;
1297 }
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1309 {
1310 pm_callback_t callback;
1311 const char *info;
1312 bool no_subsys_cb = false;
1313 int error = 0;
1314
1315 TRACE_DEVICE(dev);
1316 TRACE_SUSPEND(0);
1317
1318 dpm_wait_for_subordinate(dev, async);
1319
1320 if (async_error)
1321 goto Complete;
1322
1323 if (dev->power.syscore || dev->power.direct_complete)
1324 goto Complete;
1325
1326 callback = dpm_subsys_suspend_noirq_cb(dev, state, &info);
1327 if (callback)
1328 goto Run;
1329
1330 no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL);
1331
1332 if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb)
1333 goto Skip;
1334
1335 if (dev->driver && dev->driver->pm) {
1336 info = "noirq driver ";
1337 callback = pm_noirq_op(dev->driver->pm, state);
1338 }
1339
1340 Run:
1341 error = dpm_run_callback(callback, dev, state, info);
1342 if (error) {
1343 async_error = error;
1344 goto Complete;
1345 }
1346
1347 Skip:
1348 dev->power.is_noirq_suspended = true;
1349
1350 if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) {
1351 dev->power.must_resume = dev->power.must_resume ||
1352 atomic_read(&dev->power.usage_count) > 1 ||
1353 device_must_resume(dev, state, no_subsys_cb);
1354 } else {
1355 dev->power.must_resume = true;
1356 }
1357
1358 if (dev->power.must_resume)
1359 dpm_superior_set_must_resume(dev);
1360
1361 Complete:
1362 complete_all(&dev->power.completion);
1363 TRACE_SUSPEND(error);
1364 return error;
1365 }
1366
1367 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1368 {
1369 struct device *dev = (struct device *)data;
1370 int error;
1371
1372 error = __device_suspend_noirq(dev, pm_transition, true);
1373 if (error) {
1374 dpm_save_failed_dev(dev_name(dev));
1375 pm_dev_err(dev, pm_transition, " async", error);
1376 }
1377
1378 put_device(dev);
1379 }
1380
1381 static int device_suspend_noirq(struct device *dev)
1382 {
1383 if (dpm_async_fn(dev, async_suspend_noirq))
1384 return 0;
1385
1386 return __device_suspend_noirq(dev, pm_transition, false);
1387 }
1388
1389 static int dpm_noirq_suspend_devices(pm_message_t state)
1390 {
1391 ktime_t starttime = ktime_get();
1392 int error = 0;
1393
1394 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1395 mutex_lock(&dpm_list_mtx);
1396 pm_transition = state;
1397 async_error = 0;
1398
1399 while (!list_empty(&dpm_late_early_list)) {
1400 struct device *dev = to_device(dpm_late_early_list.prev);
1401
1402 get_device(dev);
1403 mutex_unlock(&dpm_list_mtx);
1404
1405 error = device_suspend_noirq(dev);
1406
1407 mutex_lock(&dpm_list_mtx);
1408 if (error) {
1409 pm_dev_err(dev, state, " noirq", error);
1410 dpm_save_failed_dev(dev_name(dev));
1411 put_device(dev);
1412 break;
1413 }
1414 if (!list_empty(&dev->power.entry))
1415 list_move(&dev->power.entry, &dpm_noirq_list);
1416 put_device(dev);
1417
1418 if (async_error)
1419 break;
1420 }
1421 mutex_unlock(&dpm_list_mtx);
1422 async_synchronize_full();
1423 if (!error)
1424 error = async_error;
1425
1426 if (error) {
1427 suspend_stats.failed_suspend_noirq++;
1428 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1429 }
1430 dpm_show_time(starttime, state, error, "noirq");
1431 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1432 return error;
1433 }
1434
1435
1436
1437
1438
1439
1440
1441
1442 int dpm_suspend_noirq(pm_message_t state)
1443 {
1444 int ret;
1445
1446 cpuidle_pause();
1447
1448 device_wakeup_arm_wake_irqs();
1449 suspend_device_irqs();
1450
1451 ret = dpm_noirq_suspend_devices(state);
1452 if (ret)
1453 dpm_resume_noirq(resume_event(state));
1454
1455 return ret;
1456 }
1457
1458 static void dpm_propagate_wakeup_to_parent(struct device *dev)
1459 {
1460 struct device *parent = dev->parent;
1461
1462 if (!parent)
1463 return;
1464
1465 spin_lock_irq(&parent->power.lock);
1466
1467 if (dev->power.wakeup_path && !parent->power.ignore_children)
1468 parent->power.wakeup_path = true;
1469
1470 spin_unlock_irq(&parent->power.lock);
1471 }
1472
1473 static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev,
1474 pm_message_t state,
1475 const char **info_p)
1476 {
1477 pm_callback_t callback;
1478 const char *info;
1479
1480 if (dev->pm_domain) {
1481 info = "late power domain ";
1482 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1483 } else if (dev->type && dev->type->pm) {
1484 info = "late type ";
1485 callback = pm_late_early_op(dev->type->pm, state);
1486 } else if (dev->class && dev->class->pm) {
1487 info = "late class ";
1488 callback = pm_late_early_op(dev->class->pm, state);
1489 } else if (dev->bus && dev->bus->pm) {
1490 info = "late bus ";
1491 callback = pm_late_early_op(dev->bus->pm, state);
1492 } else {
1493 return NULL;
1494 }
1495
1496 if (info_p)
1497 *info_p = info;
1498
1499 return callback;
1500 }
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1511 {
1512 pm_callback_t callback;
1513 const char *info;
1514 int error = 0;
1515
1516 TRACE_DEVICE(dev);
1517 TRACE_SUSPEND(0);
1518
1519 __pm_runtime_disable(dev, false);
1520
1521 dpm_wait_for_subordinate(dev, async);
1522
1523 if (async_error)
1524 goto Complete;
1525
1526 if (pm_wakeup_pending()) {
1527 async_error = -EBUSY;
1528 goto Complete;
1529 }
1530
1531 if (dev->power.syscore || dev->power.direct_complete)
1532 goto Complete;
1533
1534 callback = dpm_subsys_suspend_late_cb(dev, state, &info);
1535 if (callback)
1536 goto Run;
1537
1538 if (dev_pm_smart_suspend_and_suspended(dev) &&
1539 !dpm_subsys_suspend_noirq_cb(dev, state, NULL))
1540 goto Skip;
1541
1542 if (dev->driver && dev->driver->pm) {
1543 info = "late driver ";
1544 callback = pm_late_early_op(dev->driver->pm, state);
1545 }
1546
1547 Run:
1548 error = dpm_run_callback(callback, dev, state, info);
1549 if (error) {
1550 async_error = error;
1551 goto Complete;
1552 }
1553 dpm_propagate_wakeup_to_parent(dev);
1554
1555 Skip:
1556 dev->power.is_late_suspended = true;
1557
1558 Complete:
1559 TRACE_SUSPEND(error);
1560 complete_all(&dev->power.completion);
1561 return error;
1562 }
1563
1564 static void async_suspend_late(void *data, async_cookie_t cookie)
1565 {
1566 struct device *dev = (struct device *)data;
1567 int error;
1568
1569 error = __device_suspend_late(dev, pm_transition, true);
1570 if (error) {
1571 dpm_save_failed_dev(dev_name(dev));
1572 pm_dev_err(dev, pm_transition, " async", error);
1573 }
1574 put_device(dev);
1575 }
1576
1577 static int device_suspend_late(struct device *dev)
1578 {
1579 if (dpm_async_fn(dev, async_suspend_late))
1580 return 0;
1581
1582 return __device_suspend_late(dev, pm_transition, false);
1583 }
1584
1585
1586
1587
1588
1589 int dpm_suspend_late(pm_message_t state)
1590 {
1591 ktime_t starttime = ktime_get();
1592 int error = 0;
1593
1594 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1595 mutex_lock(&dpm_list_mtx);
1596 pm_transition = state;
1597 async_error = 0;
1598
1599 while (!list_empty(&dpm_suspended_list)) {
1600 struct device *dev = to_device(dpm_suspended_list.prev);
1601
1602 get_device(dev);
1603 mutex_unlock(&dpm_list_mtx);
1604
1605 error = device_suspend_late(dev);
1606
1607 mutex_lock(&dpm_list_mtx);
1608 if (!list_empty(&dev->power.entry))
1609 list_move(&dev->power.entry, &dpm_late_early_list);
1610
1611 if (error) {
1612 pm_dev_err(dev, state, " late", error);
1613 dpm_save_failed_dev(dev_name(dev));
1614 put_device(dev);
1615 break;
1616 }
1617 put_device(dev);
1618
1619 if (async_error)
1620 break;
1621 }
1622 mutex_unlock(&dpm_list_mtx);
1623 async_synchronize_full();
1624 if (!error)
1625 error = async_error;
1626 if (error) {
1627 suspend_stats.failed_suspend_late++;
1628 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1629 dpm_resume_early(resume_event(state));
1630 }
1631 dpm_show_time(starttime, state, error, "late");
1632 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1633 return error;
1634 }
1635
1636
1637
1638
1639
1640 int dpm_suspend_end(pm_message_t state)
1641 {
1642 ktime_t starttime = ktime_get();
1643 int error;
1644
1645 error = dpm_suspend_late(state);
1646 if (error)
1647 goto out;
1648
1649 error = dpm_suspend_noirq(state);
1650 if (error)
1651 dpm_resume_early(resume_event(state));
1652
1653 out:
1654 dpm_show_time(starttime, state, error, "end");
1655 return error;
1656 }
1657 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1658
1659
1660
1661
1662
1663
1664
1665
1666 static int legacy_suspend(struct device *dev, pm_message_t state,
1667 int (*cb)(struct device *dev, pm_message_t state),
1668 const char *info)
1669 {
1670 int error;
1671 ktime_t calltime;
1672
1673 calltime = initcall_debug_start(dev, cb);
1674
1675 trace_device_pm_callback_start(dev, info, state.event);
1676 error = cb(dev, state);
1677 trace_device_pm_callback_end(dev, error);
1678 suspend_report_result(cb, error);
1679
1680 initcall_debug_report(dev, calltime, cb, error);
1681
1682 return error;
1683 }
1684
1685 static void dpm_clear_superiors_direct_complete(struct device *dev)
1686 {
1687 struct device_link *link;
1688 int idx;
1689
1690 if (dev->parent) {
1691 spin_lock_irq(&dev->parent->power.lock);
1692 dev->parent->power.direct_complete = false;
1693 spin_unlock_irq(&dev->parent->power.lock);
1694 }
1695
1696 idx = device_links_read_lock();
1697
1698 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1699 spin_lock_irq(&link->supplier->power.lock);
1700 link->supplier->power.direct_complete = false;
1701 spin_unlock_irq(&link->supplier->power.lock);
1702 }
1703
1704 device_links_read_unlock(idx);
1705 }
1706
1707
1708
1709
1710
1711
1712
1713 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1714 {
1715 pm_callback_t callback = NULL;
1716 const char *info = NULL;
1717 int error = 0;
1718 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1719
1720 TRACE_DEVICE(dev);
1721 TRACE_SUSPEND(0);
1722
1723 dpm_wait_for_subordinate(dev, async);
1724
1725 if (async_error) {
1726 dev->power.direct_complete = false;
1727 goto Complete;
1728 }
1729
1730
1731
1732
1733
1734
1735
1736 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1737 pm_wakeup_event(dev, 0);
1738
1739 if (pm_wakeup_pending()) {
1740 dev->power.direct_complete = false;
1741 async_error = -EBUSY;
1742 goto Complete;
1743 }
1744
1745 if (dev->power.syscore)
1746 goto Complete;
1747
1748
1749 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1750 dev->power.direct_complete = false;
1751
1752 if (dev->power.direct_complete) {
1753 if (pm_runtime_status_suspended(dev)) {
1754 pm_runtime_disable(dev);
1755 if (pm_runtime_status_suspended(dev)) {
1756 pm_dev_dbg(dev, state, "direct-complete ");
1757 goto Complete;
1758 }
1759
1760 pm_runtime_enable(dev);
1761 }
1762 dev->power.direct_complete = false;
1763 }
1764
1765 dev->power.may_skip_resume = false;
1766 dev->power.must_resume = false;
1767
1768 dpm_watchdog_set(&wd, dev);
1769 device_lock(dev);
1770
1771 if (dev->pm_domain) {
1772 info = "power domain ";
1773 callback = pm_op(&dev->pm_domain->ops, state);
1774 goto Run;
1775 }
1776
1777 if (dev->type && dev->type->pm) {
1778 info = "type ";
1779 callback = pm_op(dev->type->pm, state);
1780 goto Run;
1781 }
1782
1783 if (dev->class && dev->class->pm) {
1784 info = "class ";
1785 callback = pm_op(dev->class->pm, state);
1786 goto Run;
1787 }
1788
1789 if (dev->bus) {
1790 if (dev->bus->pm) {
1791 info = "bus ";
1792 callback = pm_op(dev->bus->pm, state);
1793 } else if (dev->bus->suspend) {
1794 pm_dev_dbg(dev, state, "legacy bus ");
1795 error = legacy_suspend(dev, state, dev->bus->suspend,
1796 "legacy bus ");
1797 goto End;
1798 }
1799 }
1800
1801 Run:
1802 if (!callback && dev->driver && dev->driver->pm) {
1803 info = "driver ";
1804 callback = pm_op(dev->driver->pm, state);
1805 }
1806
1807 error = dpm_run_callback(callback, dev, state, info);
1808
1809 End:
1810 if (!error) {
1811 dev->power.is_suspended = true;
1812 if (device_may_wakeup(dev))
1813 dev->power.wakeup_path = true;
1814
1815 dpm_propagate_wakeup_to_parent(dev);
1816 dpm_clear_superiors_direct_complete(dev);
1817 }
1818
1819 device_unlock(dev);
1820 dpm_watchdog_clear(&wd);
1821
1822 Complete:
1823 if (error)
1824 async_error = error;
1825
1826 complete_all(&dev->power.completion);
1827 TRACE_SUSPEND(error);
1828 return error;
1829 }
1830
1831 static void async_suspend(void *data, async_cookie_t cookie)
1832 {
1833 struct device *dev = (struct device *)data;
1834 int error;
1835
1836 error = __device_suspend(dev, pm_transition, true);
1837 if (error) {
1838 dpm_save_failed_dev(dev_name(dev));
1839 pm_dev_err(dev, pm_transition, " async", error);
1840 }
1841
1842 put_device(dev);
1843 }
1844
1845 static int device_suspend(struct device *dev)
1846 {
1847 if (dpm_async_fn(dev, async_suspend))
1848 return 0;
1849
1850 return __device_suspend(dev, pm_transition, false);
1851 }
1852
1853
1854
1855
1856
1857 int dpm_suspend(pm_message_t state)
1858 {
1859 ktime_t starttime = ktime_get();
1860 int error = 0;
1861
1862 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1863 might_sleep();
1864
1865 devfreq_suspend();
1866 cpufreq_suspend();
1867
1868 mutex_lock(&dpm_list_mtx);
1869 pm_transition = state;
1870 async_error = 0;
1871 while (!list_empty(&dpm_prepared_list)) {
1872 struct device *dev = to_device(dpm_prepared_list.prev);
1873
1874 get_device(dev);
1875 mutex_unlock(&dpm_list_mtx);
1876
1877 error = device_suspend(dev);
1878
1879 mutex_lock(&dpm_list_mtx);
1880 if (error) {
1881 pm_dev_err(dev, state, "", error);
1882 dpm_save_failed_dev(dev_name(dev));
1883 put_device(dev);
1884 break;
1885 }
1886 if (!list_empty(&dev->power.entry))
1887 list_move(&dev->power.entry, &dpm_suspended_list);
1888 put_device(dev);
1889 if (async_error)
1890 break;
1891 }
1892 mutex_unlock(&dpm_list_mtx);
1893 async_synchronize_full();
1894 if (!error)
1895 error = async_error;
1896 if (error) {
1897 suspend_stats.failed_suspend++;
1898 dpm_save_failed_step(SUSPEND_SUSPEND);
1899 }
1900 dpm_show_time(starttime, state, error, NULL);
1901 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1902 return error;
1903 }
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 static int device_prepare(struct device *dev, pm_message_t state)
1914 {
1915 int (*callback)(struct device *) = NULL;
1916 int ret = 0;
1917
1918 if (dev->power.syscore)
1919 return 0;
1920
1921 WARN_ON(!pm_runtime_enabled(dev) &&
1922 dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND |
1923 DPM_FLAG_LEAVE_SUSPENDED));
1924
1925
1926
1927
1928
1929
1930
1931 pm_runtime_get_noresume(dev);
1932
1933 device_lock(dev);
1934
1935 dev->power.wakeup_path = false;
1936
1937 if (dev->power.no_pm_callbacks)
1938 goto unlock;
1939
1940 if (dev->pm_domain)
1941 callback = dev->pm_domain->ops.prepare;
1942 else if (dev->type && dev->type->pm)
1943 callback = dev->type->pm->prepare;
1944 else if (dev->class && dev->class->pm)
1945 callback = dev->class->pm->prepare;
1946 else if (dev->bus && dev->bus->pm)
1947 callback = dev->bus->pm->prepare;
1948
1949 if (!callback && dev->driver && dev->driver->pm)
1950 callback = dev->driver->pm->prepare;
1951
1952 if (callback)
1953 ret = callback(dev);
1954
1955 unlock:
1956 device_unlock(dev);
1957
1958 if (ret < 0) {
1959 suspend_report_result(callback, ret);
1960 pm_runtime_put(dev);
1961 return ret;
1962 }
1963
1964
1965
1966
1967
1968
1969
1970 spin_lock_irq(&dev->power.lock);
1971 dev->power.direct_complete = state.event == PM_EVENT_SUSPEND &&
1972 ((pm_runtime_suspended(dev) && ret > 0) ||
1973 dev->power.no_pm_callbacks) &&
1974 !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP);
1975 spin_unlock_irq(&dev->power.lock);
1976 return 0;
1977 }
1978
1979
1980
1981
1982
1983
1984
1985 int dpm_prepare(pm_message_t state)
1986 {
1987 int error = 0;
1988
1989 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1990 might_sleep();
1991
1992
1993
1994
1995
1996
1997 wait_for_device_probe();
1998
1999
2000
2001
2002
2003
2004 device_block_probing();
2005
2006 mutex_lock(&dpm_list_mtx);
2007 while (!list_empty(&dpm_list)) {
2008 struct device *dev = to_device(dpm_list.next);
2009
2010 get_device(dev);
2011 mutex_unlock(&dpm_list_mtx);
2012
2013 trace_device_pm_callback_start(dev, "", state.event);
2014 error = device_prepare(dev, state);
2015 trace_device_pm_callback_end(dev, error);
2016
2017 mutex_lock(&dpm_list_mtx);
2018 if (error) {
2019 if (error == -EAGAIN) {
2020 put_device(dev);
2021 error = 0;
2022 continue;
2023 }
2024 pr_info("Device %s not prepared for power transition: code %d\n",
2025 dev_name(dev), error);
2026 put_device(dev);
2027 break;
2028 }
2029 dev->power.is_prepared = true;
2030 if (!list_empty(&dev->power.entry))
2031 list_move_tail(&dev->power.entry, &dpm_prepared_list);
2032 put_device(dev);
2033 }
2034 mutex_unlock(&dpm_list_mtx);
2035 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
2036 return error;
2037 }
2038
2039
2040
2041
2042
2043
2044
2045
2046 int dpm_suspend_start(pm_message_t state)
2047 {
2048 ktime_t starttime = ktime_get();
2049 int error;
2050
2051 error = dpm_prepare(state);
2052 if (error) {
2053 suspend_stats.failed_prepare++;
2054 dpm_save_failed_step(SUSPEND_PREPARE);
2055 } else
2056 error = dpm_suspend(state);
2057 dpm_show_time(starttime, state, error, "start");
2058 return error;
2059 }
2060 EXPORT_SYMBOL_GPL(dpm_suspend_start);
2061
2062 void __suspend_report_result(const char *function, void *fn, int ret)
2063 {
2064 if (ret)
2065 pr_err("%s(): %pS returns %d\n", function, fn, ret);
2066 }
2067 EXPORT_SYMBOL_GPL(__suspend_report_result);
2068
2069
2070
2071
2072
2073
2074 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
2075 {
2076 dpm_wait(dev, subordinate->power.async_suspend);
2077 return async_error;
2078 }
2079 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
2090 {
2091 struct device *dev;
2092
2093 if (!fn)
2094 return;
2095
2096 device_pm_lock();
2097 list_for_each_entry(dev, &dpm_list, power.entry)
2098 fn(dev, data);
2099 device_pm_unlock();
2100 }
2101 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
2102
2103 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
2104 {
2105 if (!ops)
2106 return true;
2107
2108 return !ops->prepare &&
2109 !ops->suspend &&
2110 !ops->suspend_late &&
2111 !ops->suspend_noirq &&
2112 !ops->resume_noirq &&
2113 !ops->resume_early &&
2114 !ops->resume &&
2115 !ops->complete;
2116 }
2117
2118 void device_pm_check_callbacks(struct device *dev)
2119 {
2120 spin_lock_irq(&dev->power.lock);
2121 dev->power.no_pm_callbacks =
2122 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
2123 !dev->bus->suspend && !dev->bus->resume)) &&
2124 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
2125 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
2126 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
2127 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
2128 !dev->driver->suspend && !dev->driver->resume));
2129 spin_unlock_irq(&dev->power.lock);
2130 }
2131
2132 bool dev_pm_smart_suspend_and_suspended(struct device *dev)
2133 {
2134 return dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) &&
2135 pm_runtime_status_suspended(dev);
2136 }