Lines Matching refs:dev

19 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)  in __rpm_get_callback()  argument
24 if (dev->pm_domain) in __rpm_get_callback()
25 ops = &dev->pm_domain->ops; in __rpm_get_callback()
26 else if (dev->type && dev->type->pm) in __rpm_get_callback()
27 ops = dev->type->pm; in __rpm_get_callback()
28 else if (dev->class && dev->class->pm) in __rpm_get_callback()
29 ops = dev->class->pm; in __rpm_get_callback()
30 else if (dev->bus && dev->bus->pm) in __rpm_get_callback()
31 ops = dev->bus->pm; in __rpm_get_callback()
40 if (!cb && dev->driver && dev->driver->pm) in __rpm_get_callback()
41 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset); in __rpm_get_callback()
46 #define RPM_GET_CALLBACK(dev, callback) \ argument
47 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
49 static int rpm_resume(struct device *dev, int rpmflags);
50 static int rpm_suspend(struct device *dev, int rpmflags);
63 void update_pm_runtime_accounting(struct device *dev) in update_pm_runtime_accounting() argument
68 delta = now - dev->power.accounting_timestamp; in update_pm_runtime_accounting()
70 dev->power.accounting_timestamp = now; in update_pm_runtime_accounting()
72 if (dev->power.disable_depth > 0) in update_pm_runtime_accounting()
75 if (dev->power.runtime_status == RPM_SUSPENDED) in update_pm_runtime_accounting()
76 dev->power.suspended_jiffies += delta; in update_pm_runtime_accounting()
78 dev->power.active_jiffies += delta; in update_pm_runtime_accounting()
81 static void __update_runtime_status(struct device *dev, enum rpm_status status) in __update_runtime_status() argument
83 update_pm_runtime_accounting(dev); in __update_runtime_status()
84 dev->power.runtime_status = status; in __update_runtime_status()
91 static void pm_runtime_deactivate_timer(struct device *dev) in pm_runtime_deactivate_timer() argument
93 if (dev->power.timer_expires > 0) { in pm_runtime_deactivate_timer()
94 del_timer(&dev->power.suspend_timer); in pm_runtime_deactivate_timer()
95 dev->power.timer_expires = 0; in pm_runtime_deactivate_timer()
103 static void pm_runtime_cancel_pending(struct device *dev) in pm_runtime_cancel_pending() argument
105 pm_runtime_deactivate_timer(dev); in pm_runtime_cancel_pending()
110 dev->power.request = RPM_REQ_NONE; in pm_runtime_cancel_pending()
125 unsigned long pm_runtime_autosuspend_expiration(struct device *dev) in pm_runtime_autosuspend_expiration() argument
132 if (!dev->power.use_autosuspend) in pm_runtime_autosuspend_expiration()
135 autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); in pm_runtime_autosuspend_expiration()
139 last_busy = ACCESS_ONCE(dev->power.last_busy); in pm_runtime_autosuspend_expiration()
160 static int dev_memalloc_noio(struct device *dev, void *data) in dev_memalloc_noio() argument
162 return dev->power.memalloc_noio; in dev_memalloc_noio()
193 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable) in pm_runtime_set_memalloc_noio() argument
202 spin_lock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
203 enabled = dev->power.memalloc_noio; in pm_runtime_set_memalloc_noio()
204 dev->power.memalloc_noio = enable; in pm_runtime_set_memalloc_noio()
205 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_memalloc_noio()
214 dev = dev->parent; in pm_runtime_set_memalloc_noio()
221 if (!dev || (!enable && in pm_runtime_set_memalloc_noio()
222 device_for_each_child(dev, NULL, in pm_runtime_set_memalloc_noio()
234 static int rpm_check_suspend_allowed(struct device *dev) in rpm_check_suspend_allowed() argument
238 if (dev->power.runtime_error) in rpm_check_suspend_allowed()
240 else if (dev->power.disable_depth > 0) in rpm_check_suspend_allowed()
242 else if (atomic_read(&dev->power.usage_count) > 0) in rpm_check_suspend_allowed()
244 else if (!pm_children_suspended(dev)) in rpm_check_suspend_allowed()
248 else if ((dev->power.deferred_resume in rpm_check_suspend_allowed()
249 && dev->power.runtime_status == RPM_SUSPENDING) in rpm_check_suspend_allowed()
250 || (dev->power.request_pending in rpm_check_suspend_allowed()
251 && dev->power.request == RPM_REQ_RESUME)) in rpm_check_suspend_allowed()
253 else if (__dev_pm_qos_read_value(dev) < 0) in rpm_check_suspend_allowed()
255 else if (dev->power.runtime_status == RPM_SUSPENDED) in rpm_check_suspend_allowed()
266 static int __rpm_callback(int (*cb)(struct device *), struct device *dev) in __rpm_callback() argument
267 __releases(&dev->power.lock) __acquires(&dev->power.lock) in __rpm_callback()
271 if (dev->power.irq_safe) in __rpm_callback()
272 spin_unlock(&dev->power.lock); in __rpm_callback()
274 spin_unlock_irq(&dev->power.lock); in __rpm_callback()
276 retval = cb(dev); in __rpm_callback()
278 if (dev->power.irq_safe) in __rpm_callback()
279 spin_lock(&dev->power.lock); in __rpm_callback()
281 spin_lock_irq(&dev->power.lock); in __rpm_callback()
299 static int rpm_idle(struct device *dev, int rpmflags) in rpm_idle() argument
304 trace_rpm_idle(dev, rpmflags); in rpm_idle()
305 retval = rpm_check_suspend_allowed(dev); in rpm_idle()
310 else if (dev->power.runtime_status != RPM_ACTIVE) in rpm_idle()
317 else if (dev->power.request_pending && in rpm_idle()
318 dev->power.request > RPM_REQ_IDLE) in rpm_idle()
322 else if (dev->power.idle_notification) in rpm_idle()
328 dev->power.request = RPM_REQ_NONE; in rpm_idle()
330 if (dev->power.no_callbacks) in rpm_idle()
335 dev->power.request = RPM_REQ_IDLE; in rpm_idle()
336 if (!dev->power.request_pending) { in rpm_idle()
337 dev->power.request_pending = true; in rpm_idle()
338 queue_work(pm_wq, &dev->power.work); in rpm_idle()
340 trace_rpm_return_int(dev, _THIS_IP_, 0); in rpm_idle()
344 dev->power.idle_notification = true; in rpm_idle()
346 callback = RPM_GET_CALLBACK(dev, runtime_idle); in rpm_idle()
349 retval = __rpm_callback(callback, dev); in rpm_idle()
351 dev->power.idle_notification = false; in rpm_idle()
352 wake_up_all(&dev->power.wait_queue); in rpm_idle()
355 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_idle()
356 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO); in rpm_idle()
364 static int rpm_callback(int (*cb)(struct device *), struct device *dev) in rpm_callback() argument
371 if (dev->power.memalloc_noio) { in rpm_callback()
384 retval = __rpm_callback(cb, dev); in rpm_callback()
387 retval = __rpm_callback(cb, dev); in rpm_callback()
390 dev->power.runtime_error = retval; in rpm_callback()
415 static int rpm_suspend(struct device *dev, int rpmflags) in rpm_suspend() argument
416 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_suspend()
422 trace_rpm_suspend(dev, rpmflags); in rpm_suspend()
425 retval = rpm_check_suspend_allowed(dev); in rpm_suspend()
431 else if (dev->power.runtime_status == RPM_RESUMING && in rpm_suspend()
439 && dev->power.runtime_status != RPM_SUSPENDING) { in rpm_suspend()
440 unsigned long expires = pm_runtime_autosuspend_expiration(dev); in rpm_suspend()
444 dev->power.request = RPM_REQ_NONE; in rpm_suspend()
453 if (!(dev->power.timer_expires && time_before_eq( in rpm_suspend()
454 dev->power.timer_expires, expires))) { in rpm_suspend()
455 dev->power.timer_expires = expires; in rpm_suspend()
456 mod_timer(&dev->power.suspend_timer, expires); in rpm_suspend()
458 dev->power.timer_autosuspends = 1; in rpm_suspend()
464 pm_runtime_cancel_pending(dev); in rpm_suspend()
466 if (dev->power.runtime_status == RPM_SUSPENDING) { in rpm_suspend()
474 if (dev->power.irq_safe) { in rpm_suspend()
475 spin_unlock(&dev->power.lock); in rpm_suspend()
479 spin_lock(&dev->power.lock); in rpm_suspend()
485 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_suspend()
487 if (dev->power.runtime_status != RPM_SUSPENDING) in rpm_suspend()
490 spin_unlock_irq(&dev->power.lock); in rpm_suspend()
494 spin_lock_irq(&dev->power.lock); in rpm_suspend()
496 finish_wait(&dev->power.wait_queue, &wait); in rpm_suspend()
500 if (dev->power.no_callbacks) in rpm_suspend()
505 dev->power.request = (rpmflags & RPM_AUTO) ? in rpm_suspend()
507 if (!dev->power.request_pending) { in rpm_suspend()
508 dev->power.request_pending = true; in rpm_suspend()
509 queue_work(pm_wq, &dev->power.work); in rpm_suspend()
514 __update_runtime_status(dev, RPM_SUSPENDING); in rpm_suspend()
516 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in rpm_suspend()
518 dev_pm_enable_wake_irq(dev); in rpm_suspend()
519 retval = rpm_callback(callback, dev); in rpm_suspend()
524 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_suspend()
525 pm_runtime_deactivate_timer(dev); in rpm_suspend()
527 if (dev->parent) { in rpm_suspend()
528 parent = dev->parent; in rpm_suspend()
531 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
533 if (dev->power.deferred_resume) { in rpm_suspend()
534 dev->power.deferred_resume = false; in rpm_suspend()
535 rpm_resume(dev, 0); in rpm_suspend()
541 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { in rpm_suspend()
542 spin_unlock(&dev->power.lock); in rpm_suspend()
548 spin_lock(&dev->power.lock); in rpm_suspend()
552 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_suspend()
557 dev_pm_disable_wake_irq(dev); in rpm_suspend()
558 __update_runtime_status(dev, RPM_ACTIVE); in rpm_suspend()
559 dev->power.deferred_resume = false; in rpm_suspend()
560 wake_up_all(&dev->power.wait_queue); in rpm_suspend()
563 dev->power.runtime_error = 0; in rpm_suspend()
572 pm_runtime_autosuspend_expiration(dev) != 0) in rpm_suspend()
575 pm_runtime_cancel_pending(dev); in rpm_suspend()
597 static int rpm_resume(struct device *dev, int rpmflags) in rpm_resume() argument
598 __releases(&dev->power.lock) __acquires(&dev->power.lock) in rpm_resume()
604 trace_rpm_resume(dev, rpmflags); in rpm_resume()
607 if (dev->power.runtime_error) in rpm_resume()
609 else if (dev->power.disable_depth == 1 && dev->power.is_suspended in rpm_resume()
610 && dev->power.runtime_status == RPM_ACTIVE) in rpm_resume()
612 else if (dev->power.disable_depth > 0) in rpm_resume()
623 dev->power.request = RPM_REQ_NONE; in rpm_resume()
624 if (!dev->power.timer_autosuspends) in rpm_resume()
625 pm_runtime_deactivate_timer(dev); in rpm_resume()
627 if (dev->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
632 if (dev->power.runtime_status == RPM_RESUMING in rpm_resume()
633 || dev->power.runtime_status == RPM_SUSPENDING) { in rpm_resume()
637 if (dev->power.runtime_status == RPM_SUSPENDING) in rpm_resume()
638 dev->power.deferred_resume = true; in rpm_resume()
644 if (dev->power.irq_safe) { in rpm_resume()
645 spin_unlock(&dev->power.lock); in rpm_resume()
649 spin_lock(&dev->power.lock); in rpm_resume()
655 prepare_to_wait(&dev->power.wait_queue, &wait, in rpm_resume()
657 if (dev->power.runtime_status != RPM_RESUMING in rpm_resume()
658 && dev->power.runtime_status != RPM_SUSPENDING) in rpm_resume()
661 spin_unlock_irq(&dev->power.lock); in rpm_resume()
665 spin_lock_irq(&dev->power.lock); in rpm_resume()
667 finish_wait(&dev->power.wait_queue, &wait); in rpm_resume()
676 if (dev->power.no_callbacks && !parent && dev->parent) { in rpm_resume()
677 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING); in rpm_resume()
678 if (dev->parent->power.disable_depth > 0 in rpm_resume()
679 || dev->parent->power.ignore_children in rpm_resume()
680 || dev->parent->power.runtime_status == RPM_ACTIVE) { in rpm_resume()
681 atomic_inc(&dev->parent->power.child_count); in rpm_resume()
682 spin_unlock(&dev->parent->power.lock); in rpm_resume()
686 spin_unlock(&dev->parent->power.lock); in rpm_resume()
691 dev->power.request = RPM_REQ_RESUME; in rpm_resume()
692 if (!dev->power.request_pending) { in rpm_resume()
693 dev->power.request_pending = true; in rpm_resume()
694 queue_work(pm_wq, &dev->power.work); in rpm_resume()
700 if (!parent && dev->parent) { in rpm_resume()
706 parent = dev->parent; in rpm_resume()
707 if (dev->power.irq_safe) in rpm_resume()
709 spin_unlock(&dev->power.lock); in rpm_resume()
726 spin_lock(&dev->power.lock); in rpm_resume()
733 if (dev->power.no_callbacks) in rpm_resume()
736 __update_runtime_status(dev, RPM_RESUMING); in rpm_resume()
738 callback = RPM_GET_CALLBACK(dev, runtime_resume); in rpm_resume()
740 dev_pm_disable_wake_irq(dev); in rpm_resume()
741 retval = rpm_callback(callback, dev); in rpm_resume()
743 __update_runtime_status(dev, RPM_SUSPENDED); in rpm_resume()
744 pm_runtime_cancel_pending(dev); in rpm_resume()
745 dev_pm_enable_wake_irq(dev); in rpm_resume()
748 __update_runtime_status(dev, RPM_ACTIVE); in rpm_resume()
749 pm_runtime_mark_last_busy(dev); in rpm_resume()
753 wake_up_all(&dev->power.wait_queue); in rpm_resume()
756 rpm_idle(dev, RPM_ASYNC); in rpm_resume()
759 if (parent && !dev->power.irq_safe) { in rpm_resume()
760 spin_unlock_irq(&dev->power.lock); in rpm_resume()
764 spin_lock_irq(&dev->power.lock); in rpm_resume()
767 trace_rpm_return_int(dev, _THIS_IP_, retval); in rpm_resume()
781 struct device *dev = container_of(work, struct device, power.work); in pm_runtime_work() local
784 spin_lock_irq(&dev->power.lock); in pm_runtime_work()
786 if (!dev->power.request_pending) in pm_runtime_work()
789 req = dev->power.request; in pm_runtime_work()
790 dev->power.request = RPM_REQ_NONE; in pm_runtime_work()
791 dev->power.request_pending = false; in pm_runtime_work()
797 rpm_idle(dev, RPM_NOWAIT); in pm_runtime_work()
800 rpm_suspend(dev, RPM_NOWAIT); in pm_runtime_work()
803 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO); in pm_runtime_work()
806 rpm_resume(dev, RPM_NOWAIT); in pm_runtime_work()
811 spin_unlock_irq(&dev->power.lock); in pm_runtime_work()
822 struct device *dev = (struct device *)data; in pm_suspend_timer_fn() local
826 spin_lock_irqsave(&dev->power.lock, flags); in pm_suspend_timer_fn()
828 expires = dev->power.timer_expires; in pm_suspend_timer_fn()
831 dev->power.timer_expires = 0; in pm_suspend_timer_fn()
832 rpm_suspend(dev, dev->power.timer_autosuspends ? in pm_suspend_timer_fn()
836 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_suspend_timer_fn()
844 int pm_schedule_suspend(struct device *dev, unsigned int delay) in pm_schedule_suspend() argument
849 spin_lock_irqsave(&dev->power.lock, flags); in pm_schedule_suspend()
852 retval = rpm_suspend(dev, RPM_ASYNC); in pm_schedule_suspend()
856 retval = rpm_check_suspend_allowed(dev); in pm_schedule_suspend()
861 pm_runtime_cancel_pending(dev); in pm_schedule_suspend()
863 dev->power.timer_expires = jiffies + msecs_to_jiffies(delay); in pm_schedule_suspend()
864 dev->power.timer_expires += !dev->power.timer_expires; in pm_schedule_suspend()
865 dev->power.timer_autosuspends = 0; in pm_schedule_suspend()
866 mod_timer(&dev->power.suspend_timer, dev->power.timer_expires); in pm_schedule_suspend()
869 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_schedule_suspend()
887 int __pm_runtime_idle(struct device *dev, int rpmflags) in __pm_runtime_idle() argument
892 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_idle()
895 if (!atomic_dec_and_test(&dev->power.usage_count)) in __pm_runtime_idle()
899 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_idle()
900 retval = rpm_idle(dev, rpmflags); in __pm_runtime_idle()
901 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_idle()
919 int __pm_runtime_suspend(struct device *dev, int rpmflags) in __pm_runtime_suspend() argument
924 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_suspend()
927 if (!atomic_dec_and_test(&dev->power.usage_count)) in __pm_runtime_suspend()
931 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_suspend()
932 retval = rpm_suspend(dev, rpmflags); in __pm_runtime_suspend()
933 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_suspend()
950 int __pm_runtime_resume(struct device *dev, int rpmflags) in __pm_runtime_resume() argument
955 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe); in __pm_runtime_resume()
958 atomic_inc(&dev->power.usage_count); in __pm_runtime_resume()
960 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_resume()
961 retval = rpm_resume(dev, rpmflags); in __pm_runtime_resume()
962 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_resume()
985 int __pm_runtime_set_status(struct device *dev, unsigned int status) in __pm_runtime_set_status() argument
987 struct device *parent = dev->parent; in __pm_runtime_set_status()
995 spin_lock_irqsave(&dev->power.lock, flags); in __pm_runtime_set_status()
997 if (!dev->power.runtime_error && !dev->power.disable_depth) { in __pm_runtime_set_status()
1002 if (dev->power.runtime_status == status) in __pm_runtime_set_status()
1026 else if (dev->power.runtime_status == RPM_SUSPENDED) in __pm_runtime_set_status()
1036 __update_runtime_status(dev, status); in __pm_runtime_set_status()
1037 dev->power.runtime_error = 0; in __pm_runtime_set_status()
1039 spin_unlock_irqrestore(&dev->power.lock, flags); in __pm_runtime_set_status()
1057 static void __pm_runtime_barrier(struct device *dev) in __pm_runtime_barrier() argument
1059 pm_runtime_deactivate_timer(dev); in __pm_runtime_barrier()
1061 if (dev->power.request_pending) { in __pm_runtime_barrier()
1062 dev->power.request = RPM_REQ_NONE; in __pm_runtime_barrier()
1063 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1065 cancel_work_sync(&dev->power.work); in __pm_runtime_barrier()
1067 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1068 dev->power.request_pending = false; in __pm_runtime_barrier()
1071 if (dev->power.runtime_status == RPM_SUSPENDING in __pm_runtime_barrier()
1072 || dev->power.runtime_status == RPM_RESUMING in __pm_runtime_barrier()
1073 || dev->power.idle_notification) { in __pm_runtime_barrier()
1078 prepare_to_wait(&dev->power.wait_queue, &wait, in __pm_runtime_barrier()
1080 if (dev->power.runtime_status != RPM_SUSPENDING in __pm_runtime_barrier()
1081 && dev->power.runtime_status != RPM_RESUMING in __pm_runtime_barrier()
1082 && !dev->power.idle_notification) in __pm_runtime_barrier()
1084 spin_unlock_irq(&dev->power.lock); in __pm_runtime_barrier()
1088 spin_lock_irq(&dev->power.lock); in __pm_runtime_barrier()
1090 finish_wait(&dev->power.wait_queue, &wait); in __pm_runtime_barrier()
1108 int pm_runtime_barrier(struct device *dev) in pm_runtime_barrier() argument
1112 pm_runtime_get_noresume(dev); in pm_runtime_barrier()
1113 spin_lock_irq(&dev->power.lock); in pm_runtime_barrier()
1115 if (dev->power.request_pending in pm_runtime_barrier()
1116 && dev->power.request == RPM_REQ_RESUME) { in pm_runtime_barrier()
1117 rpm_resume(dev, 0); in pm_runtime_barrier()
1121 __pm_runtime_barrier(dev); in pm_runtime_barrier()
1123 spin_unlock_irq(&dev->power.lock); in pm_runtime_barrier()
1124 pm_runtime_put_noidle(dev); in pm_runtime_barrier()
1144 void __pm_runtime_disable(struct device *dev, bool check_resume) in __pm_runtime_disable() argument
1146 spin_lock_irq(&dev->power.lock); in __pm_runtime_disable()
1148 if (dev->power.disable_depth > 0) { in __pm_runtime_disable()
1149 dev->power.disable_depth++; in __pm_runtime_disable()
1158 if (check_resume && dev->power.request_pending in __pm_runtime_disable()
1159 && dev->power.request == RPM_REQ_RESUME) { in __pm_runtime_disable()
1164 pm_runtime_get_noresume(dev); in __pm_runtime_disable()
1166 rpm_resume(dev, 0); in __pm_runtime_disable()
1168 pm_runtime_put_noidle(dev); in __pm_runtime_disable()
1171 if (!dev->power.disable_depth++) in __pm_runtime_disable()
1172 __pm_runtime_barrier(dev); in __pm_runtime_disable()
1175 spin_unlock_irq(&dev->power.lock); in __pm_runtime_disable()
1183 void pm_runtime_enable(struct device *dev) in pm_runtime_enable() argument
1187 spin_lock_irqsave(&dev->power.lock, flags); in pm_runtime_enable()
1189 if (dev->power.disable_depth > 0) in pm_runtime_enable()
1190 dev->power.disable_depth--; in pm_runtime_enable()
1192 dev_warn(dev, "Unbalanced %s!\n", __func__); in pm_runtime_enable()
1194 spin_unlock_irqrestore(&dev->power.lock, flags); in pm_runtime_enable()
1206 void pm_runtime_forbid(struct device *dev) in pm_runtime_forbid() argument
1208 spin_lock_irq(&dev->power.lock); in pm_runtime_forbid()
1209 if (!dev->power.runtime_auto) in pm_runtime_forbid()
1212 dev->power.runtime_auto = false; in pm_runtime_forbid()
1213 atomic_inc(&dev->power.usage_count); in pm_runtime_forbid()
1214 rpm_resume(dev, 0); in pm_runtime_forbid()
1217 spin_unlock_irq(&dev->power.lock); in pm_runtime_forbid()
1227 void pm_runtime_allow(struct device *dev) in pm_runtime_allow() argument
1229 spin_lock_irq(&dev->power.lock); in pm_runtime_allow()
1230 if (dev->power.runtime_auto) in pm_runtime_allow()
1233 dev->power.runtime_auto = true; in pm_runtime_allow()
1234 if (atomic_dec_and_test(&dev->power.usage_count)) in pm_runtime_allow()
1235 rpm_idle(dev, RPM_AUTO); in pm_runtime_allow()
1238 spin_unlock_irq(&dev->power.lock); in pm_runtime_allow()
1250 void pm_runtime_no_callbacks(struct device *dev) in pm_runtime_no_callbacks() argument
1252 spin_lock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1253 dev->power.no_callbacks = 1; in pm_runtime_no_callbacks()
1254 spin_unlock_irq(&dev->power.lock); in pm_runtime_no_callbacks()
1255 if (device_is_registered(dev)) in pm_runtime_no_callbacks()
1256 rpm_sysfs_remove(dev); in pm_runtime_no_callbacks()
1271 void pm_runtime_irq_safe(struct device *dev) in pm_runtime_irq_safe() argument
1273 if (dev->parent) in pm_runtime_irq_safe()
1274 pm_runtime_get_sync(dev->parent); in pm_runtime_irq_safe()
1275 spin_lock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1276 dev->power.irq_safe = 1; in pm_runtime_irq_safe()
1277 spin_unlock_irq(&dev->power.lock); in pm_runtime_irq_safe()
1292 static void update_autosuspend(struct device *dev, int old_delay, int old_use) in update_autosuspend() argument
1294 int delay = dev->power.autosuspend_delay; in update_autosuspend()
1297 if (dev->power.use_autosuspend && delay < 0) { in update_autosuspend()
1301 atomic_inc(&dev->power.usage_count); in update_autosuspend()
1302 rpm_resume(dev, 0); in update_autosuspend()
1311 atomic_dec(&dev->power.usage_count); in update_autosuspend()
1314 rpm_idle(dev, RPM_AUTO); in update_autosuspend()
1327 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay) in pm_runtime_set_autosuspend_delay() argument
1331 spin_lock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1332 old_delay = dev->power.autosuspend_delay; in pm_runtime_set_autosuspend_delay()
1333 old_use = dev->power.use_autosuspend; in pm_runtime_set_autosuspend_delay()
1334 dev->power.autosuspend_delay = delay; in pm_runtime_set_autosuspend_delay()
1335 update_autosuspend(dev, old_delay, old_use); in pm_runtime_set_autosuspend_delay()
1336 spin_unlock_irq(&dev->power.lock); in pm_runtime_set_autosuspend_delay()
1348 void __pm_runtime_use_autosuspend(struct device *dev, bool use) in __pm_runtime_use_autosuspend() argument
1352 spin_lock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1353 old_delay = dev->power.autosuspend_delay; in __pm_runtime_use_autosuspend()
1354 old_use = dev->power.use_autosuspend; in __pm_runtime_use_autosuspend()
1355 dev->power.use_autosuspend = use; in __pm_runtime_use_autosuspend()
1356 update_autosuspend(dev, old_delay, old_use); in __pm_runtime_use_autosuspend()
1357 spin_unlock_irq(&dev->power.lock); in __pm_runtime_use_autosuspend()
1365 void pm_runtime_init(struct device *dev) in pm_runtime_init() argument
1367 dev->power.runtime_status = RPM_SUSPENDED; in pm_runtime_init()
1368 dev->power.idle_notification = false; in pm_runtime_init()
1370 dev->power.disable_depth = 1; in pm_runtime_init()
1371 atomic_set(&dev->power.usage_count, 0); in pm_runtime_init()
1373 dev->power.runtime_error = 0; in pm_runtime_init()
1375 atomic_set(&dev->power.child_count, 0); in pm_runtime_init()
1376 pm_suspend_ignore_children(dev, false); in pm_runtime_init()
1377 dev->power.runtime_auto = true; in pm_runtime_init()
1379 dev->power.request_pending = false; in pm_runtime_init()
1380 dev->power.request = RPM_REQ_NONE; in pm_runtime_init()
1381 dev->power.deferred_resume = false; in pm_runtime_init()
1382 dev->power.accounting_timestamp = jiffies; in pm_runtime_init()
1383 INIT_WORK(&dev->power.work, pm_runtime_work); in pm_runtime_init()
1385 dev->power.timer_expires = 0; in pm_runtime_init()
1386 setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn, in pm_runtime_init()
1387 (unsigned long)dev); in pm_runtime_init()
1389 init_waitqueue_head(&dev->power.wait_queue); in pm_runtime_init()
1396 void pm_runtime_remove(struct device *dev) in pm_runtime_remove() argument
1398 __pm_runtime_disable(dev, false); in pm_runtime_remove()
1401 if (dev->power.runtime_status == RPM_ACTIVE) in pm_runtime_remove()
1402 pm_runtime_set_suspended(dev); in pm_runtime_remove()
1403 if (dev->power.irq_safe && dev->parent) in pm_runtime_remove()
1404 pm_runtime_put(dev->parent); in pm_runtime_remove()
1419 int pm_runtime_force_suspend(struct device *dev) in pm_runtime_force_suspend() argument
1424 pm_runtime_disable(dev); in pm_runtime_force_suspend()
1425 if (pm_runtime_status_suspended(dev)) in pm_runtime_force_suspend()
1428 callback = RPM_GET_CALLBACK(dev, runtime_suspend); in pm_runtime_force_suspend()
1435 ret = callback(dev); in pm_runtime_force_suspend()
1439 pm_runtime_set_suspended(dev); in pm_runtime_force_suspend()
1442 pm_runtime_enable(dev); in pm_runtime_force_suspend()
1459 int pm_runtime_force_resume(struct device *dev) in pm_runtime_force_resume() argument
1464 callback = RPM_GET_CALLBACK(dev, runtime_resume); in pm_runtime_force_resume()
1471 ret = pm_runtime_set_active(dev); in pm_runtime_force_resume()
1475 ret = callback(dev); in pm_runtime_force_resume()
1477 pm_runtime_set_suspended(dev); in pm_runtime_force_resume()
1481 pm_runtime_mark_last_busy(dev); in pm_runtime_force_resume()
1483 pm_runtime_enable(dev); in pm_runtime_force_resume()