Lines Matching refs:dev

92 void device_pm_sleep_init(struct device *dev)  in device_pm_sleep_init()  argument
94 dev->power.is_prepared = false; in device_pm_sleep_init()
95 dev->power.is_suspended = false; in device_pm_sleep_init()
96 dev->power.is_noirq_suspended = false; in device_pm_sleep_init()
97 dev->power.is_late_suspended = false; in device_pm_sleep_init()
98 init_completion(&dev->power.completion); in device_pm_sleep_init()
99 complete_all(&dev->power.completion); in device_pm_sleep_init()
100 dev->power.wakeup = NULL; in device_pm_sleep_init()
101 INIT_LIST_HEAD(&dev->power.entry); in device_pm_sleep_init()
124 void device_pm_add(struct device *dev) in device_pm_add() argument
127 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_add()
129 if (dev->parent && dev->parent->power.is_prepared) in device_pm_add()
130 dev_warn(dev, "parent %s should not be sleeping\n", in device_pm_add()
131 dev_name(dev->parent)); in device_pm_add()
132 list_add_tail(&dev->power.entry, &dpm_list); in device_pm_add()
140 void device_pm_remove(struct device *dev) in device_pm_remove() argument
143 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_remove()
144 complete_all(&dev->power.completion); in device_pm_remove()
146 list_del_init(&dev->power.entry); in device_pm_remove()
148 device_wakeup_disable(dev); in device_pm_remove()
149 pm_runtime_remove(dev); in device_pm_remove()
184 void device_pm_move_last(struct device *dev) in device_pm_move_last() argument
187 dev->bus ? dev->bus->name : "No Bus", dev_name(dev)); in device_pm_move_last()
188 list_move_tail(&dev->power.entry, &dpm_list); in device_pm_move_last()
191 static ktime_t initcall_debug_start(struct device *dev) in initcall_debug_start() argument
197 dev_name(dev), task_pid_nr(current), in initcall_debug_start()
198 dev->parent ? dev_name(dev->parent) : "none"); in initcall_debug_start()
205 static void initcall_debug_report(struct device *dev, ktime_t calltime, in initcall_debug_report() argument
215 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), in initcall_debug_report()
225 static void dpm_wait(struct device *dev, bool async) in dpm_wait() argument
227 if (!dev) in dpm_wait()
230 if (async || (pm_async_enabled && dev->power.async_suspend)) in dpm_wait()
231 wait_for_completion(&dev->power.completion); in dpm_wait()
234 static int dpm_wait_fn(struct device *dev, void *async_ptr) in dpm_wait_fn() argument
236 dpm_wait(dev, *((bool *)async_ptr)); in dpm_wait_fn()
240 static void dpm_wait_for_children(struct device *dev, bool async) in dpm_wait_for_children() argument
242 device_for_each_child(dev, &async, dpm_wait_fn); in dpm_wait_for_children()
345 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info) in pm_dev_dbg() argument
347 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event), in pm_dev_dbg()
348 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ? in pm_dev_dbg()
352 static void pm_dev_err(struct device *dev, pm_message_t state, char *info, in pm_dev_err() argument
356 dev_name(dev), pm_verb(state.event), info, error); in pm_dev_err()
376 static int dpm_run_callback(pm_callback_t cb, struct device *dev, in dpm_run_callback() argument
385 calltime = initcall_debug_start(dev); in dpm_run_callback()
387 pm_dev_dbg(dev, state, info); in dpm_run_callback()
388 trace_device_pm_callback_start(dev, info, state.event); in dpm_run_callback()
389 error = cb(dev); in dpm_run_callback()
390 trace_device_pm_callback_end(dev, error); in dpm_run_callback()
393 initcall_debug_report(dev, calltime, error, state, info); in dpm_run_callback()
400 struct device *dev; member
420 dev_emerg(wd->dev, "**** DPM device timeout ****\n"); in dpm_watchdog_handler()
423 dev_driver_string(wd->dev), dev_name(wd->dev)); in dpm_watchdog_handler()
431 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) in dpm_watchdog_set() argument
435 wd->dev = dev; in dpm_watchdog_set()
474 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) in device_resume_noirq() argument
480 TRACE_DEVICE(dev); in device_resume_noirq()
483 if (dev->power.syscore || dev->power.direct_complete) in device_resume_noirq()
486 if (!dev->power.is_noirq_suspended) in device_resume_noirq()
489 dpm_wait(dev->parent, async); in device_resume_noirq()
491 if (dev->pm_domain) { in device_resume_noirq()
493 callback = pm_noirq_op(&dev->pm_domain->ops, state); in device_resume_noirq()
494 } else if (dev->type && dev->type->pm) { in device_resume_noirq()
496 callback = pm_noirq_op(dev->type->pm, state); in device_resume_noirq()
497 } else if (dev->class && dev->class->pm) { in device_resume_noirq()
499 callback = pm_noirq_op(dev->class->pm, state); in device_resume_noirq()
500 } else if (dev->bus && dev->bus->pm) { in device_resume_noirq()
502 callback = pm_noirq_op(dev->bus->pm, state); in device_resume_noirq()
505 if (!callback && dev->driver && dev->driver->pm) { in device_resume_noirq()
507 callback = pm_noirq_op(dev->driver->pm, state); in device_resume_noirq()
510 error = dpm_run_callback(callback, dev, state, info); in device_resume_noirq()
511 dev->power.is_noirq_suspended = false; in device_resume_noirq()
514 complete_all(&dev->power.completion); in device_resume_noirq()
519 static bool is_async(struct device *dev) in is_async() argument
521 return dev->power.async_suspend && pm_async_enabled in is_async()
527 struct device *dev = (struct device *)data; in async_resume_noirq() local
530 error = device_resume_noirq(dev, pm_transition, true); in async_resume_noirq()
532 pm_dev_err(dev, pm_transition, " async", error); in async_resume_noirq()
534 put_device(dev); in async_resume_noirq()
546 struct device *dev; in dpm_resume_noirq() local
558 list_for_each_entry(dev, &dpm_noirq_list, power.entry) { in dpm_resume_noirq()
559 reinit_completion(&dev->power.completion); in dpm_resume_noirq()
560 if (is_async(dev)) { in dpm_resume_noirq()
561 get_device(dev); in dpm_resume_noirq()
562 async_schedule(async_resume_noirq, dev); in dpm_resume_noirq()
567 dev = to_device(dpm_noirq_list.next); in dpm_resume_noirq()
568 get_device(dev); in dpm_resume_noirq()
569 list_move_tail(&dev->power.entry, &dpm_late_early_list); in dpm_resume_noirq()
572 if (!is_async(dev)) { in dpm_resume_noirq()
575 error = device_resume_noirq(dev, state, false); in dpm_resume_noirq()
579 dpm_save_failed_dev(dev_name(dev)); in dpm_resume_noirq()
580 pm_dev_err(dev, state, " noirq", error); in dpm_resume_noirq()
585 put_device(dev); in dpm_resume_noirq()
604 static int device_resume_early(struct device *dev, pm_message_t state, bool async) in device_resume_early() argument
610 TRACE_DEVICE(dev); in device_resume_early()
613 if (dev->power.syscore || dev->power.direct_complete) in device_resume_early()
616 if (!dev->power.is_late_suspended) in device_resume_early()
619 dpm_wait(dev->parent, async); in device_resume_early()
621 if (dev->pm_domain) { in device_resume_early()
623 callback = pm_late_early_op(&dev->pm_domain->ops, state); in device_resume_early()
624 } else if (dev->type && dev->type->pm) { in device_resume_early()
626 callback = pm_late_early_op(dev->type->pm, state); in device_resume_early()
627 } else if (dev->class && dev->class->pm) { in device_resume_early()
629 callback = pm_late_early_op(dev->class->pm, state); in device_resume_early()
630 } else if (dev->bus && dev->bus->pm) { in device_resume_early()
632 callback = pm_late_early_op(dev->bus->pm, state); in device_resume_early()
635 if (!callback && dev->driver && dev->driver->pm) { in device_resume_early()
637 callback = pm_late_early_op(dev->driver->pm, state); in device_resume_early()
640 error = dpm_run_callback(callback, dev, state, info); in device_resume_early()
641 dev->power.is_late_suspended = false; in device_resume_early()
646 pm_runtime_enable(dev); in device_resume_early()
647 complete_all(&dev->power.completion); in device_resume_early()
653 struct device *dev = (struct device *)data; in async_resume_early() local
656 error = device_resume_early(dev, pm_transition, true); in async_resume_early()
658 pm_dev_err(dev, pm_transition, " async", error); in async_resume_early()
660 put_device(dev); in async_resume_early()
669 struct device *dev; in dpm_resume_early() local
681 list_for_each_entry(dev, &dpm_late_early_list, power.entry) { in dpm_resume_early()
682 reinit_completion(&dev->power.completion); in dpm_resume_early()
683 if (is_async(dev)) { in dpm_resume_early()
684 get_device(dev); in dpm_resume_early()
685 async_schedule(async_resume_early, dev); in dpm_resume_early()
690 dev = to_device(dpm_late_early_list.next); in dpm_resume_early()
691 get_device(dev); in dpm_resume_early()
692 list_move_tail(&dev->power.entry, &dpm_suspended_list); in dpm_resume_early()
695 if (!is_async(dev)) { in dpm_resume_early()
698 error = device_resume_early(dev, state, false); in dpm_resume_early()
702 dpm_save_failed_dev(dev_name(dev)); in dpm_resume_early()
703 pm_dev_err(dev, state, " early", error); in dpm_resume_early()
707 put_device(dev); in dpm_resume_early()
732 static int device_resume(struct device *dev, pm_message_t state, bool async) in device_resume() argument
739 TRACE_DEVICE(dev); in device_resume()
742 if (dev->power.syscore) in device_resume()
745 if (dev->power.direct_complete) { in device_resume()
747 pm_runtime_enable(dev); in device_resume()
751 dpm_wait(dev->parent, async); in device_resume()
752 dpm_watchdog_set(&wd, dev); in device_resume()
753 device_lock(dev); in device_resume()
759 dev->power.is_prepared = false; in device_resume()
761 if (!dev->power.is_suspended) in device_resume()
764 if (dev->pm_domain) { in device_resume()
766 callback = pm_op(&dev->pm_domain->ops, state); in device_resume()
770 if (dev->type && dev->type->pm) { in device_resume()
772 callback = pm_op(dev->type->pm, state); in device_resume()
776 if (dev->class) { in device_resume()
777 if (dev->class->pm) { in device_resume()
779 callback = pm_op(dev->class->pm, state); in device_resume()
781 } else if (dev->class->resume) { in device_resume()
783 callback = dev->class->resume; in device_resume()
788 if (dev->bus) { in device_resume()
789 if (dev->bus->pm) { in device_resume()
791 callback = pm_op(dev->bus->pm, state); in device_resume()
792 } else if (dev->bus->resume) { in device_resume()
794 callback = dev->bus->resume; in device_resume()
800 if (!callback && dev->driver && dev->driver->pm) { in device_resume()
802 callback = pm_op(dev->driver->pm, state); in device_resume()
806 error = dpm_run_callback(callback, dev, state, info); in device_resume()
807 dev->power.is_suspended = false; in device_resume()
810 device_unlock(dev); in device_resume()
814 complete_all(&dev->power.completion); in device_resume()
823 struct device *dev = (struct device *)data; in async_resume() local
826 error = device_resume(dev, pm_transition, true); in async_resume()
828 pm_dev_err(dev, pm_transition, " async", error); in async_resume()
829 put_device(dev); in async_resume()
841 struct device *dev; in dpm_resume() local
851 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { in dpm_resume()
852 reinit_completion(&dev->power.completion); in dpm_resume()
853 if (is_async(dev)) { in dpm_resume()
854 get_device(dev); in dpm_resume()
855 async_schedule(async_resume, dev); in dpm_resume()
860 dev = to_device(dpm_suspended_list.next); in dpm_resume()
861 get_device(dev); in dpm_resume()
862 if (!is_async(dev)) { in dpm_resume()
867 error = device_resume(dev, state, false); in dpm_resume()
871 dpm_save_failed_dev(dev_name(dev)); in dpm_resume()
872 pm_dev_err(dev, state, "", error); in dpm_resume()
877 if (!list_empty(&dev->power.entry)) in dpm_resume()
878 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_resume()
879 put_device(dev); in dpm_resume()
894 static void device_complete(struct device *dev, pm_message_t state) in device_complete() argument
899 if (dev->power.syscore) in device_complete()
902 device_lock(dev); in device_complete()
904 if (dev->pm_domain) { in device_complete()
906 callback = dev->pm_domain->ops.complete; in device_complete()
907 } else if (dev->type && dev->type->pm) { in device_complete()
909 callback = dev->type->pm->complete; in device_complete()
910 } else if (dev->class && dev->class->pm) { in device_complete()
912 callback = dev->class->pm->complete; in device_complete()
913 } else if (dev->bus && dev->bus->pm) { in device_complete()
915 callback = dev->bus->pm->complete; in device_complete()
918 if (!callback && dev->driver && dev->driver->pm) { in device_complete()
920 callback = dev->driver->pm->complete; in device_complete()
924 pm_dev_dbg(dev, state, info); in device_complete()
925 callback(dev); in device_complete()
928 device_unlock(dev); in device_complete()
930 pm_runtime_put(dev); in device_complete()
950 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_complete() local
952 get_device(dev); in dpm_complete()
953 dev->power.is_prepared = false; in dpm_complete()
954 list_move(&dev->power.entry, &list); in dpm_complete()
957 trace_device_pm_callback_start(dev, "", state.event); in dpm_complete()
958 device_complete(dev, state); in dpm_complete()
959 trace_device_pm_callback_end(dev, 0); in dpm_complete()
962 put_device(dev); in dpm_complete()
1016 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) in __device_suspend_noirq() argument
1022 TRACE_DEVICE(dev); in __device_suspend_noirq()
1033 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_noirq()
1036 dpm_wait_for_children(dev, async); in __device_suspend_noirq()
1038 if (dev->pm_domain) { in __device_suspend_noirq()
1040 callback = pm_noirq_op(&dev->pm_domain->ops, state); in __device_suspend_noirq()
1041 } else if (dev->type && dev->type->pm) { in __device_suspend_noirq()
1043 callback = pm_noirq_op(dev->type->pm, state); in __device_suspend_noirq()
1044 } else if (dev->class && dev->class->pm) { in __device_suspend_noirq()
1046 callback = pm_noirq_op(dev->class->pm, state); in __device_suspend_noirq()
1047 } else if (dev->bus && dev->bus->pm) { in __device_suspend_noirq()
1049 callback = pm_noirq_op(dev->bus->pm, state); in __device_suspend_noirq()
1052 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend_noirq()
1054 callback = pm_noirq_op(dev->driver->pm, state); in __device_suspend_noirq()
1057 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_noirq()
1059 dev->power.is_noirq_suspended = true; in __device_suspend_noirq()
1064 complete_all(&dev->power.completion); in __device_suspend_noirq()
1071 struct device *dev = (struct device *)data; in async_suspend_noirq() local
1074 error = __device_suspend_noirq(dev, pm_transition, true); in async_suspend_noirq()
1076 dpm_save_failed_dev(dev_name(dev)); in async_suspend_noirq()
1077 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_noirq()
1080 put_device(dev); in async_suspend_noirq()
1083 static int device_suspend_noirq(struct device *dev) in device_suspend_noirq() argument
1085 reinit_completion(&dev->power.completion); in device_suspend_noirq()
1087 if (is_async(dev)) { in device_suspend_noirq()
1088 get_device(dev); in device_suspend_noirq()
1089 async_schedule(async_suspend_noirq, dev); in device_suspend_noirq()
1092 return __device_suspend_noirq(dev, pm_transition, false); in device_suspend_noirq()
1116 struct device *dev = to_device(dpm_late_early_list.prev); in dpm_suspend_noirq() local
1118 get_device(dev); in dpm_suspend_noirq()
1121 error = device_suspend_noirq(dev); in dpm_suspend_noirq()
1125 pm_dev_err(dev, state, " noirq", error); in dpm_suspend_noirq()
1126 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_noirq()
1127 put_device(dev); in dpm_suspend_noirq()
1130 if (!list_empty(&dev->power.entry)) in dpm_suspend_noirq()
1131 list_move(&dev->power.entry, &dpm_noirq_list); in dpm_suspend_noirq()
1132 put_device(dev); in dpm_suspend_noirq()
1161 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) in __device_suspend_late() argument
1167 TRACE_DEVICE(dev); in __device_suspend_late()
1170 __pm_runtime_disable(dev, false); in __device_suspend_late()
1180 if (dev->power.syscore || dev->power.direct_complete) in __device_suspend_late()
1183 dpm_wait_for_children(dev, async); in __device_suspend_late()
1185 if (dev->pm_domain) { in __device_suspend_late()
1187 callback = pm_late_early_op(&dev->pm_domain->ops, state); in __device_suspend_late()
1188 } else if (dev->type && dev->type->pm) { in __device_suspend_late()
1190 callback = pm_late_early_op(dev->type->pm, state); in __device_suspend_late()
1191 } else if (dev->class && dev->class->pm) { in __device_suspend_late()
1193 callback = pm_late_early_op(dev->class->pm, state); in __device_suspend_late()
1194 } else if (dev->bus && dev->bus->pm) { in __device_suspend_late()
1196 callback = pm_late_early_op(dev->bus->pm, state); in __device_suspend_late()
1199 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend_late()
1201 callback = pm_late_early_op(dev->driver->pm, state); in __device_suspend_late()
1204 error = dpm_run_callback(callback, dev, state, info); in __device_suspend_late()
1206 dev->power.is_late_suspended = true; in __device_suspend_late()
1212 complete_all(&dev->power.completion); in __device_suspend_late()
1218 struct device *dev = (struct device *)data; in async_suspend_late() local
1221 error = __device_suspend_late(dev, pm_transition, true); in async_suspend_late()
1223 dpm_save_failed_dev(dev_name(dev)); in async_suspend_late()
1224 pm_dev_err(dev, pm_transition, " async", error); in async_suspend_late()
1226 put_device(dev); in async_suspend_late()
1229 static int device_suspend_late(struct device *dev) in device_suspend_late() argument
1231 reinit_completion(&dev->power.completion); in device_suspend_late()
1233 if (is_async(dev)) { in device_suspend_late()
1234 get_device(dev); in device_suspend_late()
1235 async_schedule(async_suspend_late, dev); in device_suspend_late()
1239 return __device_suspend_late(dev, pm_transition, false); in device_suspend_late()
1257 struct device *dev = to_device(dpm_suspended_list.prev); in dpm_suspend_late() local
1259 get_device(dev); in dpm_suspend_late()
1262 error = device_suspend_late(dev); in dpm_suspend_late()
1265 if (!list_empty(&dev->power.entry)) in dpm_suspend_late()
1266 list_move(&dev->power.entry, &dpm_late_early_list); in dpm_suspend_late()
1269 pm_dev_err(dev, state, " late", error); in dpm_suspend_late()
1270 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend_late()
1271 put_device(dev); in dpm_suspend_late()
1274 put_device(dev); in dpm_suspend_late()
1321 static int legacy_suspend(struct device *dev, pm_message_t state, in legacy_suspend() argument
1322 int (*cb)(struct device *dev, pm_message_t state), in legacy_suspend() argument
1328 calltime = initcall_debug_start(dev); in legacy_suspend()
1330 trace_device_pm_callback_start(dev, info, state.event); in legacy_suspend()
1331 error = cb(dev, state); in legacy_suspend()
1332 trace_device_pm_callback_end(dev, error); in legacy_suspend()
1335 initcall_debug_report(dev, calltime, error, state, info); in legacy_suspend()
1346 static int __device_suspend(struct device *dev, pm_message_t state, bool async) in __device_suspend() argument
1353 TRACE_DEVICE(dev); in __device_suspend()
1356 dpm_wait_for_children(dev, async); in __device_suspend()
1367 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) in __device_suspend()
1368 pm_wakeup_event(dev, 0); in __device_suspend()
1375 if (dev->power.syscore) in __device_suspend()
1378 if (dev->power.direct_complete) { in __device_suspend()
1379 if (pm_runtime_status_suspended(dev)) { in __device_suspend()
1380 pm_runtime_disable(dev); in __device_suspend()
1381 if (pm_runtime_status_suspended(dev)) in __device_suspend()
1384 pm_runtime_enable(dev); in __device_suspend()
1386 dev->power.direct_complete = false; in __device_suspend()
1389 dpm_watchdog_set(&wd, dev); in __device_suspend()
1390 device_lock(dev); in __device_suspend()
1392 if (dev->pm_domain) { in __device_suspend()
1394 callback = pm_op(&dev->pm_domain->ops, state); in __device_suspend()
1398 if (dev->type && dev->type->pm) { in __device_suspend()
1400 callback = pm_op(dev->type->pm, state); in __device_suspend()
1404 if (dev->class) { in __device_suspend()
1405 if (dev->class->pm) { in __device_suspend()
1407 callback = pm_op(dev->class->pm, state); in __device_suspend()
1409 } else if (dev->class->suspend) { in __device_suspend()
1410 pm_dev_dbg(dev, state, "legacy class "); in __device_suspend()
1411 error = legacy_suspend(dev, state, dev->class->suspend, in __device_suspend()
1417 if (dev->bus) { in __device_suspend()
1418 if (dev->bus->pm) { in __device_suspend()
1420 callback = pm_op(dev->bus->pm, state); in __device_suspend()
1421 } else if (dev->bus->suspend) { in __device_suspend()
1422 pm_dev_dbg(dev, state, "legacy bus "); in __device_suspend()
1423 error = legacy_suspend(dev, state, dev->bus->suspend, in __device_suspend()
1430 if (!callback && dev->driver && dev->driver->pm) { in __device_suspend()
1432 callback = pm_op(dev->driver->pm, state); in __device_suspend()
1435 error = dpm_run_callback(callback, dev, state, info); in __device_suspend()
1439 struct device *parent = dev->parent; in __device_suspend()
1441 dev->power.is_suspended = true; in __device_suspend()
1445 dev->parent->power.direct_complete = false; in __device_suspend()
1446 if (dev->power.wakeup_path in __device_suspend()
1447 && !dev->parent->power.ignore_children) in __device_suspend()
1448 dev->parent->power.wakeup_path = true; in __device_suspend()
1454 device_unlock(dev); in __device_suspend()
1458 complete_all(&dev->power.completion); in __device_suspend()
1468 struct device *dev = (struct device *)data; in async_suspend() local
1471 error = __device_suspend(dev, pm_transition, true); in async_suspend()
1473 dpm_save_failed_dev(dev_name(dev)); in async_suspend()
1474 pm_dev_err(dev, pm_transition, " async", error); in async_suspend()
1477 put_device(dev); in async_suspend()
1480 static int device_suspend(struct device *dev) in device_suspend() argument
1482 reinit_completion(&dev->power.completion); in device_suspend()
1484 if (is_async(dev)) { in device_suspend()
1485 get_device(dev); in device_suspend()
1486 async_schedule(async_suspend, dev); in device_suspend()
1490 return __device_suspend(dev, pm_transition, false); in device_suspend()
1511 struct device *dev = to_device(dpm_prepared_list.prev); in dpm_suspend() local
1513 get_device(dev); in dpm_suspend()
1516 error = device_suspend(dev); in dpm_suspend()
1520 pm_dev_err(dev, state, "", error); in dpm_suspend()
1521 dpm_save_failed_dev(dev_name(dev)); in dpm_suspend()
1522 put_device(dev); in dpm_suspend()
1525 if (!list_empty(&dev->power.entry)) in dpm_suspend()
1526 list_move(&dev->power.entry, &dpm_suspended_list); in dpm_suspend()
1527 put_device(dev); in dpm_suspend()
1552 static int device_prepare(struct device *dev, pm_message_t state) in device_prepare() argument
1558 if (dev->power.syscore) in device_prepare()
1567 pm_runtime_get_noresume(dev); in device_prepare()
1569 device_lock(dev); in device_prepare()
1571 dev->power.wakeup_path = device_may_wakeup(dev); in device_prepare()
1573 if (dev->pm_domain) { in device_prepare()
1575 callback = dev->pm_domain->ops.prepare; in device_prepare()
1576 } else if (dev->type && dev->type->pm) { in device_prepare()
1578 callback = dev->type->pm->prepare; in device_prepare()
1579 } else if (dev->class && dev->class->pm) { in device_prepare()
1581 callback = dev->class->pm->prepare; in device_prepare()
1582 } else if (dev->bus && dev->bus->pm) { in device_prepare()
1584 callback = dev->bus->pm->prepare; in device_prepare()
1587 if (!callback && dev->driver && dev->driver->pm) { in device_prepare()
1589 callback = dev->driver->pm->prepare; in device_prepare()
1593 ret = callback(dev); in device_prepare()
1595 device_unlock(dev); in device_prepare()
1599 pm_runtime_put(dev); in device_prepare()
1609 spin_lock_irq(&dev->power.lock); in device_prepare()
1610 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND; in device_prepare()
1611 spin_unlock_irq(&dev->power.lock); in device_prepare()
1630 struct device *dev = to_device(dpm_list.next); in dpm_prepare() local
1632 get_device(dev); in dpm_prepare()
1635 trace_device_pm_callback_start(dev, "", state.event); in dpm_prepare()
1636 error = device_prepare(dev, state); in dpm_prepare()
1637 trace_device_pm_callback_end(dev, error); in dpm_prepare()
1642 put_device(dev); in dpm_prepare()
1648 dev_name(dev), error); in dpm_prepare()
1649 put_device(dev); in dpm_prepare()
1652 dev->power.is_prepared = true; in dpm_prepare()
1653 if (!list_empty(&dev->power.entry)) in dpm_prepare()
1654 list_move_tail(&dev->power.entry, &dpm_prepared_list); in dpm_prepare()
1655 put_device(dev); in dpm_prepare()
1695 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev) in device_pm_wait_for_dev() argument
1697 dpm_wait(dev, subordinate->power.async_suspend); in device_pm_wait_for_dev()
1712 struct device *dev; in dpm_for_each_dev() local
1718 list_for_each_entry(dev, &dpm_list, power.entry) in dpm_for_each_dev()
1719 fn(dev, data); in dpm_for_each_dev()