This source file includes following definitions.
- __rpm_get_callback
- update_pm_runtime_accounting
- __update_runtime_status
- rpm_get_accounted_time
- pm_runtime_active_time
- pm_runtime_suspended_time
- pm_runtime_deactivate_timer
- pm_runtime_cancel_pending
- pm_runtime_autosuspend_expiration
- dev_memalloc_noio
- pm_runtime_set_memalloc_noio
- rpm_check_suspend_allowed
- rpm_get_suppliers
- rpm_put_suppliers
- __rpm_callback
- rpm_idle
- rpm_callback
- rpm_suspend
- rpm_resume
- pm_runtime_work
- pm_suspend_timer_fn
- pm_schedule_suspend
- __pm_runtime_idle
- __pm_runtime_suspend
- __pm_runtime_resume
- pm_runtime_get_if_in_use
- __pm_runtime_set_status
- __pm_runtime_barrier
- pm_runtime_barrier
- __pm_runtime_disable
- pm_runtime_enable
- pm_runtime_forbid
- pm_runtime_allow
- pm_runtime_no_callbacks
- pm_runtime_irq_safe
- update_autosuspend
- pm_runtime_set_autosuspend_delay
- __pm_runtime_use_autosuspend
- pm_runtime_init
- pm_runtime_reinit
- pm_runtime_remove
- pm_runtime_clean_up_links
- pm_runtime_get_suppliers
- pm_runtime_put_suppliers
- pm_runtime_new_link
- pm_runtime_drop_link
- pm_runtime_need_not_resume
- pm_runtime_force_suspend
- pm_runtime_force_resume
1
2
3
4
5
6
7
8 #include <linux/sched/mm.h>
9 #include <linux/ktime.h>
10 #include <linux/hrtimer.h>
11 #include <linux/export.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/pm_wakeirq.h>
14 #include <trace/events/rpm.h>
15
16 #include "../base.h"
17 #include "power.h"
18
19 typedef int (*pm_callback_t)(struct device *);
20
21 static pm_callback_t __rpm_get_callback(struct device *dev, size_t cb_offset)
22 {
23 pm_callback_t cb;
24 const struct dev_pm_ops *ops;
25
26 if (dev->pm_domain)
27 ops = &dev->pm_domain->ops;
28 else if (dev->type && dev->type->pm)
29 ops = dev->type->pm;
30 else if (dev->class && dev->class->pm)
31 ops = dev->class->pm;
32 else if (dev->bus && dev->bus->pm)
33 ops = dev->bus->pm;
34 else
35 ops = NULL;
36
37 if (ops)
38 cb = *(pm_callback_t *)((void *)ops + cb_offset);
39 else
40 cb = NULL;
41
42 if (!cb && dev->driver && dev->driver->pm)
43 cb = *(pm_callback_t *)((void *)dev->driver->pm + cb_offset);
44
45 return cb;
46 }
47
48 #define RPM_GET_CALLBACK(dev, callback) \
49 __rpm_get_callback(dev, offsetof(struct dev_pm_ops, callback))
50
51 static int rpm_resume(struct device *dev, int rpmflags);
52 static int rpm_suspend(struct device *dev, int rpmflags);
53
54
55
56
57
58
59
60
61
62
63
64
65 static void update_pm_runtime_accounting(struct device *dev)
66 {
67 u64 now, last, delta;
68
69 if (dev->power.disable_depth > 0)
70 return;
71
72 last = dev->power.accounting_timestamp;
73
74 now = ktime_get_mono_fast_ns();
75 dev->power.accounting_timestamp = now;
76
77
78
79
80
81
82 if (now < last)
83 return;
84
85 delta = now - last;
86
87 if (dev->power.runtime_status == RPM_SUSPENDED)
88 dev->power.suspended_time += delta;
89 else
90 dev->power.active_time += delta;
91 }
92
93 static void __update_runtime_status(struct device *dev, enum rpm_status status)
94 {
95 update_pm_runtime_accounting(dev);
96 dev->power.runtime_status = status;
97 }
98
99 static u64 rpm_get_accounted_time(struct device *dev, bool suspended)
100 {
101 u64 time;
102 unsigned long flags;
103
104 spin_lock_irqsave(&dev->power.lock, flags);
105
106 update_pm_runtime_accounting(dev);
107 time = suspended ? dev->power.suspended_time : dev->power.active_time;
108
109 spin_unlock_irqrestore(&dev->power.lock, flags);
110
111 return time;
112 }
113
114 u64 pm_runtime_active_time(struct device *dev)
115 {
116 return rpm_get_accounted_time(dev, false);
117 }
118
119 u64 pm_runtime_suspended_time(struct device *dev)
120 {
121 return rpm_get_accounted_time(dev, true);
122 }
123 EXPORT_SYMBOL_GPL(pm_runtime_suspended_time);
124
125
126
127
128
129 static void pm_runtime_deactivate_timer(struct device *dev)
130 {
131 if (dev->power.timer_expires > 0) {
132 hrtimer_try_to_cancel(&dev->power.suspend_timer);
133 dev->power.timer_expires = 0;
134 }
135 }
136
137
138
139
140
141 static void pm_runtime_cancel_pending(struct device *dev)
142 {
143 pm_runtime_deactivate_timer(dev);
144
145
146
147
148 dev->power.request = RPM_REQ_NONE;
149 }
150
151
152
153
154
155
156
157
158
159
160
161
162
163 u64 pm_runtime_autosuspend_expiration(struct device *dev)
164 {
165 int autosuspend_delay;
166 u64 expires;
167
168 if (!dev->power.use_autosuspend)
169 return 0;
170
171 autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay);
172 if (autosuspend_delay < 0)
173 return 0;
174
175 expires = READ_ONCE(dev->power.last_busy);
176 expires += (u64)autosuspend_delay * NSEC_PER_MSEC;
177 if (expires > ktime_get_mono_fast_ns())
178 return expires;
179
180 return 0;
181 }
182 EXPORT_SYMBOL_GPL(pm_runtime_autosuspend_expiration);
183
184 static int dev_memalloc_noio(struct device *dev, void *data)
185 {
186 return dev->power.memalloc_noio;
187 }
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217 void pm_runtime_set_memalloc_noio(struct device *dev, bool enable)
218 {
219 static DEFINE_MUTEX(dev_hotplug_mutex);
220
221 mutex_lock(&dev_hotplug_mutex);
222 for (;;) {
223 bool enabled;
224
225
226 spin_lock_irq(&dev->power.lock);
227 enabled = dev->power.memalloc_noio;
228 dev->power.memalloc_noio = enable;
229 spin_unlock_irq(&dev->power.lock);
230
231
232
233
234
235 if (enabled && enable)
236 break;
237
238 dev = dev->parent;
239
240
241
242
243
244
245 if (!dev || (!enable &&
246 device_for_each_child(dev, NULL,
247 dev_memalloc_noio)))
248 break;
249 }
250 mutex_unlock(&dev_hotplug_mutex);
251 }
252 EXPORT_SYMBOL_GPL(pm_runtime_set_memalloc_noio);
253
254
255
256
257
258 static int rpm_check_suspend_allowed(struct device *dev)
259 {
260 int retval = 0;
261
262 if (dev->power.runtime_error)
263 retval = -EINVAL;
264 else if (dev->power.disable_depth > 0)
265 retval = -EACCES;
266 else if (atomic_read(&dev->power.usage_count) > 0)
267 retval = -EAGAIN;
268 else if (!dev->power.ignore_children &&
269 atomic_read(&dev->power.child_count))
270 retval = -EBUSY;
271
272
273 else if ((dev->power.deferred_resume
274 && dev->power.runtime_status == RPM_SUSPENDING)
275 || (dev->power.request_pending
276 && dev->power.request == RPM_REQ_RESUME))
277 retval = -EAGAIN;
278 else if (__dev_pm_qos_resume_latency(dev) == 0)
279 retval = -EPERM;
280 else if (dev->power.runtime_status == RPM_SUSPENDED)
281 retval = 1;
282
283 return retval;
284 }
285
286 static int rpm_get_suppliers(struct device *dev)
287 {
288 struct device_link *link;
289
290 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
291 device_links_read_lock_held()) {
292 int retval;
293
294 if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
295 READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
296 continue;
297
298 retval = pm_runtime_get_sync(link->supplier);
299
300 if (retval < 0 && retval != -EACCES) {
301 pm_runtime_put_noidle(link->supplier);
302 return retval;
303 }
304 refcount_inc(&link->rpm_active);
305 }
306 return 0;
307 }
308
309 static void rpm_put_suppliers(struct device *dev)
310 {
311 struct device_link *link;
312
313 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
314 device_links_read_lock_held()) {
315 if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
316 continue;
317
318 while (refcount_dec_not_one(&link->rpm_active))
319 pm_runtime_put(link->supplier);
320 }
321 }
322
323
324
325
326
327
328 static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
329 __releases(&dev->power.lock) __acquires(&dev->power.lock)
330 {
331 int retval, idx;
332 bool use_links = dev->power.links_count > 0;
333
334 if (dev->power.irq_safe) {
335 spin_unlock(&dev->power.lock);
336 } else {
337 spin_unlock_irq(&dev->power.lock);
338
339
340
341
342
343
344
345
346 if (use_links && dev->power.runtime_status == RPM_RESUMING) {
347 idx = device_links_read_lock();
348
349 retval = rpm_get_suppliers(dev);
350 if (retval)
351 goto fail;
352
353 device_links_read_unlock(idx);
354 }
355 }
356
357 retval = cb(dev);
358
359 if (dev->power.irq_safe) {
360 spin_lock(&dev->power.lock);
361 } else {
362
363
364
365
366
367
368
369 if (use_links
370 && ((dev->power.runtime_status == RPM_SUSPENDING && !retval)
371 || (dev->power.runtime_status == RPM_RESUMING && retval))) {
372 idx = device_links_read_lock();
373
374 fail:
375 rpm_put_suppliers(dev);
376
377 device_links_read_unlock(idx);
378 }
379
380 spin_lock_irq(&dev->power.lock);
381 }
382
383 return retval;
384 }
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399 static int rpm_idle(struct device *dev, int rpmflags)
400 {
401 int (*callback)(struct device *);
402 int retval;
403
404 trace_rpm_idle_rcuidle(dev, rpmflags);
405 retval = rpm_check_suspend_allowed(dev);
406 if (retval < 0)
407 ;
408
409
410 else if (dev->power.runtime_status != RPM_ACTIVE)
411 retval = -EAGAIN;
412
413
414
415
416
417 else if (dev->power.request_pending &&
418 dev->power.request > RPM_REQ_IDLE)
419 retval = -EAGAIN;
420
421
422 else if (dev->power.idle_notification)
423 retval = -EINPROGRESS;
424 if (retval)
425 goto out;
426
427
428 dev->power.request = RPM_REQ_NONE;
429
430 if (dev->power.no_callbacks)
431 goto out;
432
433
434 if (rpmflags & RPM_ASYNC) {
435 dev->power.request = RPM_REQ_IDLE;
436 if (!dev->power.request_pending) {
437 dev->power.request_pending = true;
438 queue_work(pm_wq, &dev->power.work);
439 }
440 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, 0);
441 return 0;
442 }
443
444 dev->power.idle_notification = true;
445
446 callback = RPM_GET_CALLBACK(dev, runtime_idle);
447
448 if (callback)
449 retval = __rpm_callback(callback, dev);
450
451 dev->power.idle_notification = false;
452 wake_up_all(&dev->power.wait_queue);
453
454 out:
455 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
456 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
457 }
458
459
460
461
462
463
464 static int rpm_callback(int (*cb)(struct device *), struct device *dev)
465 {
466 int retval;
467
468 if (!cb)
469 return -ENOSYS;
470
471 if (dev->power.memalloc_noio) {
472 unsigned int noio_flag;
473
474
475
476
477
478
479
480
481
482
483 noio_flag = memalloc_noio_save();
484 retval = __rpm_callback(cb, dev);
485 memalloc_noio_restore(noio_flag);
486 } else {
487 retval = __rpm_callback(cb, dev);
488 }
489
490 dev->power.runtime_error = retval;
491 return retval != -EACCES ? retval : -EIO;
492 }
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515 static int rpm_suspend(struct device *dev, int rpmflags)
516 __releases(&dev->power.lock) __acquires(&dev->power.lock)
517 {
518 int (*callback)(struct device *);
519 struct device *parent = NULL;
520 int retval;
521
522 trace_rpm_suspend_rcuidle(dev, rpmflags);
523
524 repeat:
525 retval = rpm_check_suspend_allowed(dev);
526
527 if (retval < 0)
528 ;
529
530
531 else if (dev->power.runtime_status == RPM_RESUMING &&
532 !(rpmflags & RPM_ASYNC))
533 retval = -EAGAIN;
534 if (retval)
535 goto out;
536
537
538 if ((rpmflags & RPM_AUTO)
539 && dev->power.runtime_status != RPM_SUSPENDING) {
540 u64 expires = pm_runtime_autosuspend_expiration(dev);
541
542 if (expires != 0) {
543
544 dev->power.request = RPM_REQ_NONE;
545
546
547
548
549
550
551
552
553 if (!(dev->power.timer_expires &&
554 dev->power.timer_expires <= expires)) {
555
556
557
558
559 u64 slack = (u64)READ_ONCE(dev->power.autosuspend_delay) *
560 (NSEC_PER_MSEC >> 2);
561
562 dev->power.timer_expires = expires;
563 hrtimer_start_range_ns(&dev->power.suspend_timer,
564 ns_to_ktime(expires),
565 slack,
566 HRTIMER_MODE_ABS);
567 }
568 dev->power.timer_autosuspends = 1;
569 goto out;
570 }
571 }
572
573
574 pm_runtime_cancel_pending(dev);
575
576 if (dev->power.runtime_status == RPM_SUSPENDING) {
577 DEFINE_WAIT(wait);
578
579 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
580 retval = -EINPROGRESS;
581 goto out;
582 }
583
584 if (dev->power.irq_safe) {
585 spin_unlock(&dev->power.lock);
586
587 cpu_relax();
588
589 spin_lock(&dev->power.lock);
590 goto repeat;
591 }
592
593
594 for (;;) {
595 prepare_to_wait(&dev->power.wait_queue, &wait,
596 TASK_UNINTERRUPTIBLE);
597 if (dev->power.runtime_status != RPM_SUSPENDING)
598 break;
599
600 spin_unlock_irq(&dev->power.lock);
601
602 schedule();
603
604 spin_lock_irq(&dev->power.lock);
605 }
606 finish_wait(&dev->power.wait_queue, &wait);
607 goto repeat;
608 }
609
610 if (dev->power.no_callbacks)
611 goto no_callback;
612
613
614 if (rpmflags & RPM_ASYNC) {
615 dev->power.request = (rpmflags & RPM_AUTO) ?
616 RPM_REQ_AUTOSUSPEND : RPM_REQ_SUSPEND;
617 if (!dev->power.request_pending) {
618 dev->power.request_pending = true;
619 queue_work(pm_wq, &dev->power.work);
620 }
621 goto out;
622 }
623
624 __update_runtime_status(dev, RPM_SUSPENDING);
625
626 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
627
628 dev_pm_enable_wake_irq_check(dev, true);
629 retval = rpm_callback(callback, dev);
630 if (retval)
631 goto fail;
632
633 no_callback:
634 __update_runtime_status(dev, RPM_SUSPENDED);
635 pm_runtime_deactivate_timer(dev);
636
637 if (dev->parent) {
638 parent = dev->parent;
639 atomic_add_unless(&parent->power.child_count, -1, 0);
640 }
641 wake_up_all(&dev->power.wait_queue);
642
643 if (dev->power.deferred_resume) {
644 dev->power.deferred_resume = false;
645 rpm_resume(dev, 0);
646 retval = -EAGAIN;
647 goto out;
648 }
649
650
651 if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
652 spin_unlock(&dev->power.lock);
653
654 spin_lock(&parent->power.lock);
655 rpm_idle(parent, RPM_ASYNC);
656 spin_unlock(&parent->power.lock);
657
658 spin_lock(&dev->power.lock);
659 }
660
661 out:
662 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
663
664 return retval;
665
666 fail:
667 dev_pm_disable_wake_irq_check(dev);
668 __update_runtime_status(dev, RPM_ACTIVE);
669 dev->power.deferred_resume = false;
670 wake_up_all(&dev->power.wait_queue);
671
672 if (retval == -EAGAIN || retval == -EBUSY) {
673 dev->power.runtime_error = 0;
674
675
676
677
678
679
680
681 if ((rpmflags & RPM_AUTO) &&
682 pm_runtime_autosuspend_expiration(dev) != 0)
683 goto repeat;
684 } else {
685 pm_runtime_cancel_pending(dev);
686 }
687 goto out;
688 }
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707 static int rpm_resume(struct device *dev, int rpmflags)
708 __releases(&dev->power.lock) __acquires(&dev->power.lock)
709 {
710 int (*callback)(struct device *);
711 struct device *parent = NULL;
712 int retval = 0;
713
714 trace_rpm_resume_rcuidle(dev, rpmflags);
715
716 repeat:
717 if (dev->power.runtime_error)
718 retval = -EINVAL;
719 else if (dev->power.disable_depth == 1 && dev->power.is_suspended
720 && dev->power.runtime_status == RPM_ACTIVE)
721 retval = 1;
722 else if (dev->power.disable_depth > 0)
723 retval = -EACCES;
724 if (retval)
725 goto out;
726
727
728
729
730
731
732
733 dev->power.request = RPM_REQ_NONE;
734 if (!dev->power.timer_autosuspends)
735 pm_runtime_deactivate_timer(dev);
736
737 if (dev->power.runtime_status == RPM_ACTIVE) {
738 retval = 1;
739 goto out;
740 }
741
742 if (dev->power.runtime_status == RPM_RESUMING
743 || dev->power.runtime_status == RPM_SUSPENDING) {
744 DEFINE_WAIT(wait);
745
746 if (rpmflags & (RPM_ASYNC | RPM_NOWAIT)) {
747 if (dev->power.runtime_status == RPM_SUSPENDING)
748 dev->power.deferred_resume = true;
749 else
750 retval = -EINPROGRESS;
751 goto out;
752 }
753
754 if (dev->power.irq_safe) {
755 spin_unlock(&dev->power.lock);
756
757 cpu_relax();
758
759 spin_lock(&dev->power.lock);
760 goto repeat;
761 }
762
763
764 for (;;) {
765 prepare_to_wait(&dev->power.wait_queue, &wait,
766 TASK_UNINTERRUPTIBLE);
767 if (dev->power.runtime_status != RPM_RESUMING
768 && dev->power.runtime_status != RPM_SUSPENDING)
769 break;
770
771 spin_unlock_irq(&dev->power.lock);
772
773 schedule();
774
775 spin_lock_irq(&dev->power.lock);
776 }
777 finish_wait(&dev->power.wait_queue, &wait);
778 goto repeat;
779 }
780
781
782
783
784
785
786 if (dev->power.no_callbacks && !parent && dev->parent) {
787 spin_lock_nested(&dev->parent->power.lock, SINGLE_DEPTH_NESTING);
788 if (dev->parent->power.disable_depth > 0
789 || dev->parent->power.ignore_children
790 || dev->parent->power.runtime_status == RPM_ACTIVE) {
791 atomic_inc(&dev->parent->power.child_count);
792 spin_unlock(&dev->parent->power.lock);
793 retval = 1;
794 goto no_callback;
795 }
796 spin_unlock(&dev->parent->power.lock);
797 }
798
799
800 if (rpmflags & RPM_ASYNC) {
801 dev->power.request = RPM_REQ_RESUME;
802 if (!dev->power.request_pending) {
803 dev->power.request_pending = true;
804 queue_work(pm_wq, &dev->power.work);
805 }
806 retval = 0;
807 goto out;
808 }
809
810 if (!parent && dev->parent) {
811
812
813
814
815
816 parent = dev->parent;
817 if (dev->power.irq_safe)
818 goto skip_parent;
819 spin_unlock(&dev->power.lock);
820
821 pm_runtime_get_noresume(parent);
822
823 spin_lock(&parent->power.lock);
824
825
826
827
828 if (!parent->power.disable_depth
829 && !parent->power.ignore_children) {
830 rpm_resume(parent, 0);
831 if (parent->power.runtime_status != RPM_ACTIVE)
832 retval = -EBUSY;
833 }
834 spin_unlock(&parent->power.lock);
835
836 spin_lock(&dev->power.lock);
837 if (retval)
838 goto out;
839 goto repeat;
840 }
841 skip_parent:
842
843 if (dev->power.no_callbacks)
844 goto no_callback;
845
846 __update_runtime_status(dev, RPM_RESUMING);
847
848 callback = RPM_GET_CALLBACK(dev, runtime_resume);
849
850 dev_pm_disable_wake_irq_check(dev);
851 retval = rpm_callback(callback, dev);
852 if (retval) {
853 __update_runtime_status(dev, RPM_SUSPENDED);
854 pm_runtime_cancel_pending(dev);
855 dev_pm_enable_wake_irq_check(dev, false);
856 } else {
857 no_callback:
858 __update_runtime_status(dev, RPM_ACTIVE);
859 pm_runtime_mark_last_busy(dev);
860 if (parent)
861 atomic_inc(&parent->power.child_count);
862 }
863 wake_up_all(&dev->power.wait_queue);
864
865 if (retval >= 0)
866 rpm_idle(dev, RPM_ASYNC);
867
868 out:
869 if (parent && !dev->power.irq_safe) {
870 spin_unlock_irq(&dev->power.lock);
871
872 pm_runtime_put(parent);
873
874 spin_lock_irq(&dev->power.lock);
875 }
876
877 trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
878
879 return retval;
880 }
881
882
883
884
885
886
887
888
889 static void pm_runtime_work(struct work_struct *work)
890 {
891 struct device *dev = container_of(work, struct device, power.work);
892 enum rpm_request req;
893
894 spin_lock_irq(&dev->power.lock);
895
896 if (!dev->power.request_pending)
897 goto out;
898
899 req = dev->power.request;
900 dev->power.request = RPM_REQ_NONE;
901 dev->power.request_pending = false;
902
903 switch (req) {
904 case RPM_REQ_NONE:
905 break;
906 case RPM_REQ_IDLE:
907 rpm_idle(dev, RPM_NOWAIT);
908 break;
909 case RPM_REQ_SUSPEND:
910 rpm_suspend(dev, RPM_NOWAIT);
911 break;
912 case RPM_REQ_AUTOSUSPEND:
913 rpm_suspend(dev, RPM_NOWAIT | RPM_AUTO);
914 break;
915 case RPM_REQ_RESUME:
916 rpm_resume(dev, RPM_NOWAIT);
917 break;
918 }
919
920 out:
921 spin_unlock_irq(&dev->power.lock);
922 }
923
924
925
926
927
928
929
930 static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer)
931 {
932 struct device *dev = container_of(timer, struct device, power.suspend_timer);
933 unsigned long flags;
934 u64 expires;
935
936 spin_lock_irqsave(&dev->power.lock, flags);
937
938 expires = dev->power.timer_expires;
939
940
941
942
943 if (expires > 0 && expires < ktime_get_mono_fast_ns()) {
944 dev->power.timer_expires = 0;
945 rpm_suspend(dev, dev->power.timer_autosuspends ?
946 (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC);
947 }
948
949 spin_unlock_irqrestore(&dev->power.lock, flags);
950
951 return HRTIMER_NORESTART;
952 }
953
954
955
956
957
958
959 int pm_schedule_suspend(struct device *dev, unsigned int delay)
960 {
961 unsigned long flags;
962 u64 expires;
963 int retval;
964
965 spin_lock_irqsave(&dev->power.lock, flags);
966
967 if (!delay) {
968 retval = rpm_suspend(dev, RPM_ASYNC);
969 goto out;
970 }
971
972 retval = rpm_check_suspend_allowed(dev);
973 if (retval)
974 goto out;
975
976
977 pm_runtime_cancel_pending(dev);
978
979 expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC;
980 dev->power.timer_expires = expires;
981 dev->power.timer_autosuspends = 0;
982 hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS);
983
984 out:
985 spin_unlock_irqrestore(&dev->power.lock, flags);
986
987 return retval;
988 }
989 EXPORT_SYMBOL_GPL(pm_schedule_suspend);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003 int __pm_runtime_idle(struct device *dev, int rpmflags)
1004 {
1005 unsigned long flags;
1006 int retval;
1007
1008 if (rpmflags & RPM_GET_PUT) {
1009 if (!atomic_dec_and_test(&dev->power.usage_count))
1010 return 0;
1011 }
1012
1013 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1014
1015 spin_lock_irqsave(&dev->power.lock, flags);
1016 retval = rpm_idle(dev, rpmflags);
1017 spin_unlock_irqrestore(&dev->power.lock, flags);
1018
1019 return retval;
1020 }
1021 EXPORT_SYMBOL_GPL(__pm_runtime_idle);
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 int __pm_runtime_suspend(struct device *dev, int rpmflags)
1036 {
1037 unsigned long flags;
1038 int retval;
1039
1040 if (rpmflags & RPM_GET_PUT) {
1041 if (!atomic_dec_and_test(&dev->power.usage_count))
1042 return 0;
1043 }
1044
1045 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
1046
1047 spin_lock_irqsave(&dev->power.lock, flags);
1048 retval = rpm_suspend(dev, rpmflags);
1049 spin_unlock_irqrestore(&dev->power.lock, flags);
1050
1051 return retval;
1052 }
1053 EXPORT_SYMBOL_GPL(__pm_runtime_suspend);
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066 int __pm_runtime_resume(struct device *dev, int rpmflags)
1067 {
1068 unsigned long flags;
1069 int retval;
1070
1071 might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
1072 dev->power.runtime_status != RPM_ACTIVE);
1073
1074 if (rpmflags & RPM_GET_PUT)
1075 atomic_inc(&dev->power.usage_count);
1076
1077 spin_lock_irqsave(&dev->power.lock, flags);
1078 retval = rpm_resume(dev, rpmflags);
1079 spin_unlock_irqrestore(&dev->power.lock, flags);
1080
1081 return retval;
1082 }
1083 EXPORT_SYMBOL_GPL(__pm_runtime_resume);
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 int pm_runtime_get_if_in_use(struct device *dev)
1096 {
1097 unsigned long flags;
1098 int retval;
1099
1100 spin_lock_irqsave(&dev->power.lock, flags);
1101 retval = dev->power.disable_depth > 0 ? -EINVAL :
1102 dev->power.runtime_status == RPM_ACTIVE
1103 && atomic_inc_not_zero(&dev->power.usage_count);
1104 spin_unlock_irqrestore(&dev->power.lock, flags);
1105 return retval;
1106 }
1107 EXPORT_SYMBOL_GPL(pm_runtime_get_if_in_use);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133 int __pm_runtime_set_status(struct device *dev, unsigned int status)
1134 {
1135 struct device *parent = dev->parent;
1136 bool notify_parent = false;
1137 int error = 0;
1138
1139 if (status != RPM_ACTIVE && status != RPM_SUSPENDED)
1140 return -EINVAL;
1141
1142 spin_lock_irq(&dev->power.lock);
1143
1144
1145
1146
1147
1148 if (dev->power.runtime_error || dev->power.disable_depth)
1149 dev->power.disable_depth++;
1150 else
1151 error = -EAGAIN;
1152
1153 spin_unlock_irq(&dev->power.lock);
1154
1155 if (error)
1156 return error;
1157
1158
1159
1160
1161
1162
1163
1164 if (status == RPM_ACTIVE) {
1165 int idx = device_links_read_lock();
1166
1167 error = rpm_get_suppliers(dev);
1168 if (error)
1169 status = RPM_SUSPENDED;
1170
1171 device_links_read_unlock(idx);
1172 }
1173
1174 spin_lock_irq(&dev->power.lock);
1175
1176 if (dev->power.runtime_status == status || !parent)
1177 goto out_set;
1178
1179 if (status == RPM_SUSPENDED) {
1180 atomic_add_unless(&parent->power.child_count, -1, 0);
1181 notify_parent = !parent->power.ignore_children;
1182 } else {
1183 spin_lock_nested(&parent->power.lock, SINGLE_DEPTH_NESTING);
1184
1185
1186
1187
1188
1189
1190 if (!parent->power.disable_depth
1191 && !parent->power.ignore_children
1192 && parent->power.runtime_status != RPM_ACTIVE) {
1193 dev_err(dev, "runtime PM trying to activate child device %s but parent (%s) is not active\n",
1194 dev_name(dev),
1195 dev_name(parent));
1196 error = -EBUSY;
1197 } else if (dev->power.runtime_status == RPM_SUSPENDED) {
1198 atomic_inc(&parent->power.child_count);
1199 }
1200
1201 spin_unlock(&parent->power.lock);
1202
1203 if (error) {
1204 status = RPM_SUSPENDED;
1205 goto out;
1206 }
1207 }
1208
1209 out_set:
1210 __update_runtime_status(dev, status);
1211 if (!error)
1212 dev->power.runtime_error = 0;
1213
1214 out:
1215 spin_unlock_irq(&dev->power.lock);
1216
1217 if (notify_parent)
1218 pm_request_idle(parent);
1219
1220 if (status == RPM_SUSPENDED) {
1221 int idx = device_links_read_lock();
1222
1223 rpm_put_suppliers(dev);
1224
1225 device_links_read_unlock(idx);
1226 }
1227
1228 pm_runtime_enable(dev);
1229
1230 return error;
1231 }
1232 EXPORT_SYMBOL_GPL(__pm_runtime_set_status);
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243 static void __pm_runtime_barrier(struct device *dev)
1244 {
1245 pm_runtime_deactivate_timer(dev);
1246
1247 if (dev->power.request_pending) {
1248 dev->power.request = RPM_REQ_NONE;
1249 spin_unlock_irq(&dev->power.lock);
1250
1251 cancel_work_sync(&dev->power.work);
1252
1253 spin_lock_irq(&dev->power.lock);
1254 dev->power.request_pending = false;
1255 }
1256
1257 if (dev->power.runtime_status == RPM_SUSPENDING
1258 || dev->power.runtime_status == RPM_RESUMING
1259 || dev->power.idle_notification) {
1260 DEFINE_WAIT(wait);
1261
1262
1263 for (;;) {
1264 prepare_to_wait(&dev->power.wait_queue, &wait,
1265 TASK_UNINTERRUPTIBLE);
1266 if (dev->power.runtime_status != RPM_SUSPENDING
1267 && dev->power.runtime_status != RPM_RESUMING
1268 && !dev->power.idle_notification)
1269 break;
1270 spin_unlock_irq(&dev->power.lock);
1271
1272 schedule();
1273
1274 spin_lock_irq(&dev->power.lock);
1275 }
1276 finish_wait(&dev->power.wait_queue, &wait);
1277 }
1278 }
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 int pm_runtime_barrier(struct device *dev)
1295 {
1296 int retval = 0;
1297
1298 pm_runtime_get_noresume(dev);
1299 spin_lock_irq(&dev->power.lock);
1300
1301 if (dev->power.request_pending
1302 && dev->power.request == RPM_REQ_RESUME) {
1303 rpm_resume(dev, 0);
1304 retval = 1;
1305 }
1306
1307 __pm_runtime_barrier(dev);
1308
1309 spin_unlock_irq(&dev->power.lock);
1310 pm_runtime_put_noidle(dev);
1311
1312 return retval;
1313 }
1314 EXPORT_SYMBOL_GPL(pm_runtime_barrier);
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330 void __pm_runtime_disable(struct device *dev, bool check_resume)
1331 {
1332 spin_lock_irq(&dev->power.lock);
1333
1334 if (dev->power.disable_depth > 0) {
1335 dev->power.disable_depth++;
1336 goto out;
1337 }
1338
1339
1340
1341
1342
1343
1344 if (check_resume && dev->power.request_pending
1345 && dev->power.request == RPM_REQ_RESUME) {
1346
1347
1348
1349
1350 pm_runtime_get_noresume(dev);
1351
1352 rpm_resume(dev, 0);
1353
1354 pm_runtime_put_noidle(dev);
1355 }
1356
1357
1358 update_pm_runtime_accounting(dev);
1359
1360 if (!dev->power.disable_depth++)
1361 __pm_runtime_barrier(dev);
1362
1363 out:
1364 spin_unlock_irq(&dev->power.lock);
1365 }
1366 EXPORT_SYMBOL_GPL(__pm_runtime_disable);
1367
1368
1369
1370
1371
1372 void pm_runtime_enable(struct device *dev)
1373 {
1374 unsigned long flags;
1375
1376 spin_lock_irqsave(&dev->power.lock, flags);
1377
1378 if (dev->power.disable_depth > 0) {
1379 dev->power.disable_depth--;
1380
1381
1382 if (!dev->power.disable_depth)
1383 dev->power.accounting_timestamp = ktime_get_mono_fast_ns();
1384 } else {
1385 dev_warn(dev, "Unbalanced %s!\n", __func__);
1386 }
1387
1388 WARN(!dev->power.disable_depth &&
1389 dev->power.runtime_status == RPM_SUSPENDED &&
1390 !dev->power.ignore_children &&
1391 atomic_read(&dev->power.child_count) > 0,
1392 "Enabling runtime PM for inactive device (%s) with active children\n",
1393 dev_name(dev));
1394
1395 spin_unlock_irqrestore(&dev->power.lock, flags);
1396 }
1397 EXPORT_SYMBOL_GPL(pm_runtime_enable);
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407 void pm_runtime_forbid(struct device *dev)
1408 {
1409 spin_lock_irq(&dev->power.lock);
1410 if (!dev->power.runtime_auto)
1411 goto out;
1412
1413 dev->power.runtime_auto = false;
1414 atomic_inc(&dev->power.usage_count);
1415 rpm_resume(dev, 0);
1416
1417 out:
1418 spin_unlock_irq(&dev->power.lock);
1419 }
1420 EXPORT_SYMBOL_GPL(pm_runtime_forbid);
1421
1422
1423
1424
1425
1426
1427
1428 void pm_runtime_allow(struct device *dev)
1429 {
1430 spin_lock_irq(&dev->power.lock);
1431 if (dev->power.runtime_auto)
1432 goto out;
1433
1434 dev->power.runtime_auto = true;
1435 if (atomic_dec_and_test(&dev->power.usage_count))
1436 rpm_idle(dev, RPM_AUTO | RPM_ASYNC);
1437
1438 out:
1439 spin_unlock_irq(&dev->power.lock);
1440 }
1441 EXPORT_SYMBOL_GPL(pm_runtime_allow);
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451 void pm_runtime_no_callbacks(struct device *dev)
1452 {
1453 spin_lock_irq(&dev->power.lock);
1454 dev->power.no_callbacks = 1;
1455 spin_unlock_irq(&dev->power.lock);
1456 if (device_is_registered(dev))
1457 rpm_sysfs_remove(dev);
1458 }
1459 EXPORT_SYMBOL_GPL(pm_runtime_no_callbacks);
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472 void pm_runtime_irq_safe(struct device *dev)
1473 {
1474 if (dev->parent)
1475 pm_runtime_get_sync(dev->parent);
1476 spin_lock_irq(&dev->power.lock);
1477 dev->power.irq_safe = 1;
1478 spin_unlock_irq(&dev->power.lock);
1479 }
1480 EXPORT_SYMBOL_GPL(pm_runtime_irq_safe);
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 static void update_autosuspend(struct device *dev, int old_delay, int old_use)
1494 {
1495 int delay = dev->power.autosuspend_delay;
1496
1497
1498 if (dev->power.use_autosuspend && delay < 0) {
1499
1500
1501 if (!old_use || old_delay >= 0) {
1502 atomic_inc(&dev->power.usage_count);
1503 rpm_resume(dev, 0);
1504 }
1505 }
1506
1507
1508 else {
1509
1510
1511 if (old_use && old_delay < 0)
1512 atomic_dec(&dev->power.usage_count);
1513
1514
1515 rpm_idle(dev, RPM_AUTO);
1516 }
1517 }
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528 void pm_runtime_set_autosuspend_delay(struct device *dev, int delay)
1529 {
1530 int old_delay, old_use;
1531
1532 spin_lock_irq(&dev->power.lock);
1533 old_delay = dev->power.autosuspend_delay;
1534 old_use = dev->power.use_autosuspend;
1535 dev->power.autosuspend_delay = delay;
1536 update_autosuspend(dev, old_delay, old_use);
1537 spin_unlock_irq(&dev->power.lock);
1538 }
1539 EXPORT_SYMBOL_GPL(pm_runtime_set_autosuspend_delay);
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549 void __pm_runtime_use_autosuspend(struct device *dev, bool use)
1550 {
1551 int old_delay, old_use;
1552
1553 spin_lock_irq(&dev->power.lock);
1554 old_delay = dev->power.autosuspend_delay;
1555 old_use = dev->power.use_autosuspend;
1556 dev->power.use_autosuspend = use;
1557 update_autosuspend(dev, old_delay, old_use);
1558 spin_unlock_irq(&dev->power.lock);
1559 }
1560 EXPORT_SYMBOL_GPL(__pm_runtime_use_autosuspend);
1561
1562
1563
1564
1565
1566 void pm_runtime_init(struct device *dev)
1567 {
1568 dev->power.runtime_status = RPM_SUSPENDED;
1569 dev->power.idle_notification = false;
1570
1571 dev->power.disable_depth = 1;
1572 atomic_set(&dev->power.usage_count, 0);
1573
1574 dev->power.runtime_error = 0;
1575
1576 atomic_set(&dev->power.child_count, 0);
1577 pm_suspend_ignore_children(dev, false);
1578 dev->power.runtime_auto = true;
1579
1580 dev->power.request_pending = false;
1581 dev->power.request = RPM_REQ_NONE;
1582 dev->power.deferred_resume = false;
1583 INIT_WORK(&dev->power.work, pm_runtime_work);
1584
1585 dev->power.timer_expires = 0;
1586 hrtimer_init(&dev->power.suspend_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1587 dev->power.suspend_timer.function = pm_suspend_timer_fn;
1588
1589 init_waitqueue_head(&dev->power.wait_queue);
1590 }
1591
1592
1593
1594
1595
1596 void pm_runtime_reinit(struct device *dev)
1597 {
1598 if (!pm_runtime_enabled(dev)) {
1599 if (dev->power.runtime_status == RPM_ACTIVE)
1600 pm_runtime_set_suspended(dev);
1601 if (dev->power.irq_safe) {
1602 spin_lock_irq(&dev->power.lock);
1603 dev->power.irq_safe = 0;
1604 spin_unlock_irq(&dev->power.lock);
1605 if (dev->parent)
1606 pm_runtime_put(dev->parent);
1607 }
1608 }
1609 }
1610
1611
1612
1613
1614
1615 void pm_runtime_remove(struct device *dev)
1616 {
1617 __pm_runtime_disable(dev, false);
1618 pm_runtime_reinit(dev);
1619 }
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638 void pm_runtime_clean_up_links(struct device *dev)
1639 {
1640 struct device_link *link;
1641 int idx;
1642
1643 idx = device_links_read_lock();
1644
1645 list_for_each_entry_rcu(link, &dev->links.consumers, s_node,
1646 device_links_read_lock_held()) {
1647 if (!(link->flags & DL_FLAG_MANAGED))
1648 continue;
1649
1650 while (refcount_dec_not_one(&link->rpm_active))
1651 pm_runtime_put_noidle(dev);
1652 }
1653
1654 device_links_read_unlock(idx);
1655 }
1656
1657
1658
1659
1660
1661 void pm_runtime_get_suppliers(struct device *dev)
1662 {
1663 struct device_link *link;
1664 int idx;
1665
1666 idx = device_links_read_lock();
1667
1668 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1669 device_links_read_lock_held())
1670 if (link->flags & DL_FLAG_PM_RUNTIME) {
1671 link->supplier_preactivated = true;
1672 refcount_inc(&link->rpm_active);
1673 pm_runtime_get_sync(link->supplier);
1674 }
1675
1676 device_links_read_unlock(idx);
1677 }
1678
1679
1680
1681
1682
1683 void pm_runtime_put_suppliers(struct device *dev)
1684 {
1685 struct device_link *link;
1686 int idx;
1687
1688 idx = device_links_read_lock();
1689
1690 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
1691 device_links_read_lock_held())
1692 if (link->supplier_preactivated) {
1693 link->supplier_preactivated = false;
1694 if (refcount_dec_not_one(&link->rpm_active))
1695 pm_runtime_put(link->supplier);
1696 }
1697
1698 device_links_read_unlock(idx);
1699 }
1700
1701 void pm_runtime_new_link(struct device *dev)
1702 {
1703 spin_lock_irq(&dev->power.lock);
1704 dev->power.links_count++;
1705 spin_unlock_irq(&dev->power.lock);
1706 }
1707
1708 void pm_runtime_drop_link(struct device *dev)
1709 {
1710 spin_lock_irq(&dev->power.lock);
1711 WARN_ON(dev->power.links_count == 0);
1712 dev->power.links_count--;
1713 spin_unlock_irq(&dev->power.lock);
1714 }
1715
1716 static bool pm_runtime_need_not_resume(struct device *dev)
1717 {
1718 return atomic_read(&dev->power.usage_count) <= 1 &&
1719 (atomic_read(&dev->power.child_count) == 0 ||
1720 dev->power.ignore_children);
1721 }
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740 int pm_runtime_force_suspend(struct device *dev)
1741 {
1742 int (*callback)(struct device *);
1743 int ret;
1744
1745 pm_runtime_disable(dev);
1746 if (pm_runtime_status_suspended(dev))
1747 return 0;
1748
1749 callback = RPM_GET_CALLBACK(dev, runtime_suspend);
1750
1751 ret = callback ? callback(dev) : 0;
1752 if (ret)
1753 goto err;
1754
1755
1756
1757
1758
1759
1760
1761 if (pm_runtime_need_not_resume(dev))
1762 pm_runtime_set_suspended(dev);
1763 else
1764 __update_runtime_status(dev, RPM_SUSPENDED);
1765
1766 return 0;
1767
1768 err:
1769 pm_runtime_enable(dev);
1770 return ret;
1771 }
1772 EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 int pm_runtime_force_resume(struct device *dev)
1787 {
1788 int (*callback)(struct device *);
1789 int ret = 0;
1790
1791 if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev))
1792 goto out;
1793
1794
1795
1796
1797
1798 __update_runtime_status(dev, RPM_ACTIVE);
1799
1800 callback = RPM_GET_CALLBACK(dev, runtime_resume);
1801
1802 ret = callback ? callback(dev) : 0;
1803 if (ret) {
1804 pm_runtime_set_suspended(dev);
1805 goto out;
1806 }
1807
1808 pm_runtime_mark_last_busy(dev);
1809 out:
1810 pm_runtime_enable(dev);
1811 return ret;
1812 }
1813 EXPORT_SYMBOL_GPL(pm_runtime_force_resume);