This source file includes following definitions.
- control_show
- control_store
- runtime_active_time_show
- runtime_suspended_time_show
- runtime_status_show
- autosuspend_delay_ms_show
- autosuspend_delay_ms_store
- pm_qos_resume_latency_us_show
- pm_qos_resume_latency_us_store
- pm_qos_latency_tolerance_us_show
- pm_qos_latency_tolerance_us_store
- pm_qos_no_power_off_show
- pm_qos_no_power_off_store
- wakeup_show
- wakeup_store
- wakeup_count_show
- wakeup_active_count_show
- wakeup_abort_count_show
- wakeup_expire_count_show
- wakeup_active_show
- wakeup_total_time_ms_show
- wakeup_max_time_ms_show
- wakeup_last_time_ms_show
- wakeup_prevent_sleep_time_ms_show
- runtime_usage_show
- runtime_active_kids_show
- runtime_enabled_show
- async_show
- async_store
- dpm_sysfs_add
- wakeup_sysfs_add
- wakeup_sysfs_remove
- pm_qos_sysfs_add_resume_latency
- pm_qos_sysfs_remove_resume_latency
- pm_qos_sysfs_add_flags
- pm_qos_sysfs_remove_flags
- pm_qos_sysfs_add_latency_tolerance
- pm_qos_sysfs_remove_latency_tolerance
- rpm_sysfs_remove
- dpm_sysfs_remove
1
2
3 #include <linux/device.h>
4 #include <linux/string.h>
5 #include <linux/export.h>
6 #include <linux/pm_qos.h>
7 #include <linux/pm_runtime.h>
8 #include <linux/pm_wakeup.h>
9 #include <linux/atomic.h>
10 #include <linux/jiffies.h>
11 #include "power.h"
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94 const char power_group_name[] = "power";
95 EXPORT_SYMBOL_GPL(power_group_name);
96
97 static const char ctrl_auto[] = "auto";
98 static const char ctrl_on[] = "on";
99
100 static ssize_t control_show(struct device *dev, struct device_attribute *attr,
101 char *buf)
102 {
103 return sprintf(buf, "%s\n",
104 dev->power.runtime_auto ? ctrl_auto : ctrl_on);
105 }
106
107 static ssize_t control_store(struct device * dev, struct device_attribute *attr,
108 const char * buf, size_t n)
109 {
110 device_lock(dev);
111 if (sysfs_streq(buf, ctrl_auto))
112 pm_runtime_allow(dev);
113 else if (sysfs_streq(buf, ctrl_on))
114 pm_runtime_forbid(dev);
115 else
116 n = -EINVAL;
117 device_unlock(dev);
118 return n;
119 }
120
121 static DEVICE_ATTR_RW(control);
122
123 static ssize_t runtime_active_time_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
125 {
126 int ret;
127 u64 tmp = pm_runtime_active_time(dev);
128 do_div(tmp, NSEC_PER_MSEC);
129 ret = sprintf(buf, "%llu\n", tmp);
130 return ret;
131 }
132
133 static DEVICE_ATTR_RO(runtime_active_time);
134
135 static ssize_t runtime_suspended_time_show(struct device *dev,
136 struct device_attribute *attr, char *buf)
137 {
138 int ret;
139 u64 tmp = pm_runtime_suspended_time(dev);
140 do_div(tmp, NSEC_PER_MSEC);
141 ret = sprintf(buf, "%llu\n", tmp);
142 return ret;
143 }
144
145 static DEVICE_ATTR_RO(runtime_suspended_time);
146
147 static ssize_t runtime_status_show(struct device *dev,
148 struct device_attribute *attr, char *buf)
149 {
150 const char *p;
151
152 if (dev->power.runtime_error) {
153 p = "error\n";
154 } else if (dev->power.disable_depth) {
155 p = "unsupported\n";
156 } else {
157 switch (dev->power.runtime_status) {
158 case RPM_SUSPENDED:
159 p = "suspended\n";
160 break;
161 case RPM_SUSPENDING:
162 p = "suspending\n";
163 break;
164 case RPM_RESUMING:
165 p = "resuming\n";
166 break;
167 case RPM_ACTIVE:
168 p = "active\n";
169 break;
170 default:
171 return -EIO;
172 }
173 }
174 return sprintf(buf, p);
175 }
176
177 static DEVICE_ATTR_RO(runtime_status);
178
179 static ssize_t autosuspend_delay_ms_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181 {
182 if (!dev->power.use_autosuspend)
183 return -EIO;
184 return sprintf(buf, "%d\n", dev->power.autosuspend_delay);
185 }
186
187 static ssize_t autosuspend_delay_ms_store(struct device *dev,
188 struct device_attribute *attr, const char *buf, size_t n)
189 {
190 long delay;
191
192 if (!dev->power.use_autosuspend)
193 return -EIO;
194
195 if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
196 return -EINVAL;
197
198 device_lock(dev);
199 pm_runtime_set_autosuspend_delay(dev, delay);
200 device_unlock(dev);
201 return n;
202 }
203
204 static DEVICE_ATTR_RW(autosuspend_delay_ms);
205
206 static ssize_t pm_qos_resume_latency_us_show(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209 {
210 s32 value = dev_pm_qos_requested_resume_latency(dev);
211
212 if (value == 0)
213 return sprintf(buf, "n/a\n");
214 if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
215 value = 0;
216
217 return sprintf(buf, "%d\n", value);
218 }
219
220 static ssize_t pm_qos_resume_latency_us_store(struct device *dev,
221 struct device_attribute *attr,
222 const char *buf, size_t n)
223 {
224 s32 value;
225 int ret;
226
227 if (!kstrtos32(buf, 0, &value)) {
228
229
230
231
232 if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT)
233 return -EINVAL;
234
235 if (value == 0)
236 value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
237 } else if (sysfs_streq(buf, "n/a")) {
238 value = 0;
239 } else {
240 return -EINVAL;
241 }
242
243 ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req,
244 value);
245 return ret < 0 ? ret : n;
246 }
247
248 static DEVICE_ATTR_RW(pm_qos_resume_latency_us);
249
250 static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev,
251 struct device_attribute *attr,
252 char *buf)
253 {
254 s32 value = dev_pm_qos_get_user_latency_tolerance(dev);
255
256 if (value < 0)
257 return sprintf(buf, "auto\n");
258 if (value == PM_QOS_LATENCY_ANY)
259 return sprintf(buf, "any\n");
260
261 return sprintf(buf, "%d\n", value);
262 }
263
264 static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev,
265 struct device_attribute *attr,
266 const char *buf, size_t n)
267 {
268 s32 value;
269 int ret;
270
271 if (kstrtos32(buf, 0, &value) == 0) {
272
273 if (value < 0)
274 return -EINVAL;
275 } else {
276 if (sysfs_streq(buf, "auto"))
277 value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
278 else if (sysfs_streq(buf, "any"))
279 value = PM_QOS_LATENCY_ANY;
280 else
281 return -EINVAL;
282 }
283 ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
284 return ret < 0 ? ret : n;
285 }
286
287 static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us);
288
289 static ssize_t pm_qos_no_power_off_show(struct device *dev,
290 struct device_attribute *attr,
291 char *buf)
292 {
293 return sprintf(buf, "%d\n", !!(dev_pm_qos_requested_flags(dev)
294 & PM_QOS_FLAG_NO_POWER_OFF));
295 }
296
297 static ssize_t pm_qos_no_power_off_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t n)
300 {
301 int ret;
302
303 if (kstrtoint(buf, 0, &ret))
304 return -EINVAL;
305
306 if (ret != 0 && ret != 1)
307 return -EINVAL;
308
309 ret = dev_pm_qos_update_flags(dev, PM_QOS_FLAG_NO_POWER_OFF, ret);
310 return ret < 0 ? ret : n;
311 }
312
313 static DEVICE_ATTR_RW(pm_qos_no_power_off);
314
315 #ifdef CONFIG_PM_SLEEP
316 static const char _enabled[] = "enabled";
317 static const char _disabled[] = "disabled";
318
319 static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr,
320 char *buf)
321 {
322 return sprintf(buf, "%s\n", device_can_wakeup(dev)
323 ? (device_may_wakeup(dev) ? _enabled : _disabled)
324 : "");
325 }
326
327 static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr,
328 const char *buf, size_t n)
329 {
330 if (!device_can_wakeup(dev))
331 return -EINVAL;
332
333 if (sysfs_streq(buf, _enabled))
334 device_set_wakeup_enable(dev, 1);
335 else if (sysfs_streq(buf, _disabled))
336 device_set_wakeup_enable(dev, 0);
337 else
338 return -EINVAL;
339 return n;
340 }
341
342 static DEVICE_ATTR_RW(wakeup);
343
344 static ssize_t wakeup_count_show(struct device *dev,
345 struct device_attribute *attr, char *buf)
346 {
347 unsigned long count = 0;
348 bool enabled = false;
349
350 spin_lock_irq(&dev->power.lock);
351 if (dev->power.wakeup) {
352 count = dev->power.wakeup->wakeup_count;
353 enabled = true;
354 }
355 spin_unlock_irq(&dev->power.lock);
356 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
357 }
358
359 static DEVICE_ATTR_RO(wakeup_count);
360
361 static ssize_t wakeup_active_count_show(struct device *dev,
362 struct device_attribute *attr,
363 char *buf)
364 {
365 unsigned long count = 0;
366 bool enabled = false;
367
368 spin_lock_irq(&dev->power.lock);
369 if (dev->power.wakeup) {
370 count = dev->power.wakeup->active_count;
371 enabled = true;
372 }
373 spin_unlock_irq(&dev->power.lock);
374 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
375 }
376
377 static DEVICE_ATTR_RO(wakeup_active_count);
378
379 static ssize_t wakeup_abort_count_show(struct device *dev,
380 struct device_attribute *attr,
381 char *buf)
382 {
383 unsigned long count = 0;
384 bool enabled = false;
385
386 spin_lock_irq(&dev->power.lock);
387 if (dev->power.wakeup) {
388 count = dev->power.wakeup->wakeup_count;
389 enabled = true;
390 }
391 spin_unlock_irq(&dev->power.lock);
392 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
393 }
394
395 static DEVICE_ATTR_RO(wakeup_abort_count);
396
397 static ssize_t wakeup_expire_count_show(struct device *dev,
398 struct device_attribute *attr,
399 char *buf)
400 {
401 unsigned long count = 0;
402 bool enabled = false;
403
404 spin_lock_irq(&dev->power.lock);
405 if (dev->power.wakeup) {
406 count = dev->power.wakeup->expire_count;
407 enabled = true;
408 }
409 spin_unlock_irq(&dev->power.lock);
410 return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n");
411 }
412
413 static DEVICE_ATTR_RO(wakeup_expire_count);
414
415 static ssize_t wakeup_active_show(struct device *dev,
416 struct device_attribute *attr, char *buf)
417 {
418 unsigned int active = 0;
419 bool enabled = false;
420
421 spin_lock_irq(&dev->power.lock);
422 if (dev->power.wakeup) {
423 active = dev->power.wakeup->active;
424 enabled = true;
425 }
426 spin_unlock_irq(&dev->power.lock);
427 return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n");
428 }
429
430 static DEVICE_ATTR_RO(wakeup_active);
431
432 static ssize_t wakeup_total_time_ms_show(struct device *dev,
433 struct device_attribute *attr,
434 char *buf)
435 {
436 s64 msec = 0;
437 bool enabled = false;
438
439 spin_lock_irq(&dev->power.lock);
440 if (dev->power.wakeup) {
441 msec = ktime_to_ms(dev->power.wakeup->total_time);
442 enabled = true;
443 }
444 spin_unlock_irq(&dev->power.lock);
445 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
446 }
447
448 static DEVICE_ATTR_RO(wakeup_total_time_ms);
449
450 static ssize_t wakeup_max_time_ms_show(struct device *dev,
451 struct device_attribute *attr, char *buf)
452 {
453 s64 msec = 0;
454 bool enabled = false;
455
456 spin_lock_irq(&dev->power.lock);
457 if (dev->power.wakeup) {
458 msec = ktime_to_ms(dev->power.wakeup->max_time);
459 enabled = true;
460 }
461 spin_unlock_irq(&dev->power.lock);
462 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
463 }
464
465 static DEVICE_ATTR_RO(wakeup_max_time_ms);
466
467 static ssize_t wakeup_last_time_ms_show(struct device *dev,
468 struct device_attribute *attr,
469 char *buf)
470 {
471 s64 msec = 0;
472 bool enabled = false;
473
474 spin_lock_irq(&dev->power.lock);
475 if (dev->power.wakeup) {
476 msec = ktime_to_ms(dev->power.wakeup->last_time);
477 enabled = true;
478 }
479 spin_unlock_irq(&dev->power.lock);
480 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
481 }
482
483 static DEVICE_ATTR_RO(wakeup_last_time_ms);
484
485 #ifdef CONFIG_PM_AUTOSLEEP
486 static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev,
487 struct device_attribute *attr,
488 char *buf)
489 {
490 s64 msec = 0;
491 bool enabled = false;
492
493 spin_lock_irq(&dev->power.lock);
494 if (dev->power.wakeup) {
495 msec = ktime_to_ms(dev->power.wakeup->prevent_sleep_time);
496 enabled = true;
497 }
498 spin_unlock_irq(&dev->power.lock);
499 return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n");
500 }
501
502 static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms);
503 #endif
504 #endif
505
506 #ifdef CONFIG_PM_ADVANCED_DEBUG
507 static ssize_t runtime_usage_show(struct device *dev,
508 struct device_attribute *attr, char *buf)
509 {
510 return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count));
511 }
512 static DEVICE_ATTR_RO(runtime_usage);
513
514 static ssize_t runtime_active_kids_show(struct device *dev,
515 struct device_attribute *attr,
516 char *buf)
517 {
518 return sprintf(buf, "%d\n", dev->power.ignore_children ?
519 0 : atomic_read(&dev->power.child_count));
520 }
521 static DEVICE_ATTR_RO(runtime_active_kids);
522
523 static ssize_t runtime_enabled_show(struct device *dev,
524 struct device_attribute *attr, char *buf)
525 {
526 if (dev->power.disable_depth && (dev->power.runtime_auto == false))
527 return sprintf(buf, "disabled & forbidden\n");
528 if (dev->power.disable_depth)
529 return sprintf(buf, "disabled\n");
530 if (dev->power.runtime_auto == false)
531 return sprintf(buf, "forbidden\n");
532 return sprintf(buf, "enabled\n");
533 }
534 static DEVICE_ATTR_RO(runtime_enabled);
535
536 #ifdef CONFIG_PM_SLEEP
537 static ssize_t async_show(struct device *dev, struct device_attribute *attr,
538 char *buf)
539 {
540 return sprintf(buf, "%s\n",
541 device_async_suspend_enabled(dev) ?
542 _enabled : _disabled);
543 }
544
545 static ssize_t async_store(struct device *dev, struct device_attribute *attr,
546 const char *buf, size_t n)
547 {
548 if (sysfs_streq(buf, _enabled))
549 device_enable_async_suspend(dev);
550 else if (sysfs_streq(buf, _disabled))
551 device_disable_async_suspend(dev);
552 else
553 return -EINVAL;
554 return n;
555 }
556
557 static DEVICE_ATTR_RW(async);
558
559 #endif
560 #endif
561
562 static struct attribute *power_attrs[] = {
563 #ifdef CONFIG_PM_ADVANCED_DEBUG
564 #ifdef CONFIG_PM_SLEEP
565 &dev_attr_async.attr,
566 #endif
567 &dev_attr_runtime_status.attr,
568 &dev_attr_runtime_usage.attr,
569 &dev_attr_runtime_active_kids.attr,
570 &dev_attr_runtime_enabled.attr,
571 #endif
572 NULL,
573 };
574 static const struct attribute_group pm_attr_group = {
575 .name = power_group_name,
576 .attrs = power_attrs,
577 };
578
579 static struct attribute *wakeup_attrs[] = {
580 #ifdef CONFIG_PM_SLEEP
581 &dev_attr_wakeup.attr,
582 &dev_attr_wakeup_count.attr,
583 &dev_attr_wakeup_active_count.attr,
584 &dev_attr_wakeup_abort_count.attr,
585 &dev_attr_wakeup_expire_count.attr,
586 &dev_attr_wakeup_active.attr,
587 &dev_attr_wakeup_total_time_ms.attr,
588 &dev_attr_wakeup_max_time_ms.attr,
589 &dev_attr_wakeup_last_time_ms.attr,
590 #ifdef CONFIG_PM_AUTOSLEEP
591 &dev_attr_wakeup_prevent_sleep_time_ms.attr,
592 #endif
593 #endif
594 NULL,
595 };
596 static const struct attribute_group pm_wakeup_attr_group = {
597 .name = power_group_name,
598 .attrs = wakeup_attrs,
599 };
600
601 static struct attribute *runtime_attrs[] = {
602 #ifndef CONFIG_PM_ADVANCED_DEBUG
603 &dev_attr_runtime_status.attr,
604 #endif
605 &dev_attr_control.attr,
606 &dev_attr_runtime_suspended_time.attr,
607 &dev_attr_runtime_active_time.attr,
608 &dev_attr_autosuspend_delay_ms.attr,
609 NULL,
610 };
611 static const struct attribute_group pm_runtime_attr_group = {
612 .name = power_group_name,
613 .attrs = runtime_attrs,
614 };
615
616 static struct attribute *pm_qos_resume_latency_attrs[] = {
617 &dev_attr_pm_qos_resume_latency_us.attr,
618 NULL,
619 };
620 static const struct attribute_group pm_qos_resume_latency_attr_group = {
621 .name = power_group_name,
622 .attrs = pm_qos_resume_latency_attrs,
623 };
624
625 static struct attribute *pm_qos_latency_tolerance_attrs[] = {
626 &dev_attr_pm_qos_latency_tolerance_us.attr,
627 NULL,
628 };
629 static const struct attribute_group pm_qos_latency_tolerance_attr_group = {
630 .name = power_group_name,
631 .attrs = pm_qos_latency_tolerance_attrs,
632 };
633
634 static struct attribute *pm_qos_flags_attrs[] = {
635 &dev_attr_pm_qos_no_power_off.attr,
636 NULL,
637 };
638 static const struct attribute_group pm_qos_flags_attr_group = {
639 .name = power_group_name,
640 .attrs = pm_qos_flags_attrs,
641 };
642
643 int dpm_sysfs_add(struct device *dev)
644 {
645 int rc;
646
647
648 if (device_pm_not_required(dev))
649 return 0;
650
651 rc = sysfs_create_group(&dev->kobj, &pm_attr_group);
652 if (rc)
653 return rc;
654
655 if (pm_runtime_callbacks_present(dev)) {
656 rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group);
657 if (rc)
658 goto err_out;
659 }
660 if (device_can_wakeup(dev)) {
661 rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
662 if (rc)
663 goto err_runtime;
664 }
665 if (dev->power.set_latency_tolerance) {
666 rc = sysfs_merge_group(&dev->kobj,
667 &pm_qos_latency_tolerance_attr_group);
668 if (rc)
669 goto err_wakeup;
670 }
671 rc = pm_wakeup_source_sysfs_add(dev);
672 if (rc)
673 goto err_latency;
674 return 0;
675
676 err_latency:
677 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
678 err_wakeup:
679 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
680 err_runtime:
681 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
682 err_out:
683 sysfs_remove_group(&dev->kobj, &pm_attr_group);
684 return rc;
685 }
686
687 int wakeup_sysfs_add(struct device *dev)
688 {
689 return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group);
690 }
691
692 void wakeup_sysfs_remove(struct device *dev)
693 {
694 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
695 }
696
697 int pm_qos_sysfs_add_resume_latency(struct device *dev)
698 {
699 return sysfs_merge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
700 }
701
702 void pm_qos_sysfs_remove_resume_latency(struct device *dev)
703 {
704 sysfs_unmerge_group(&dev->kobj, &pm_qos_resume_latency_attr_group);
705 }
706
707 int pm_qos_sysfs_add_flags(struct device *dev)
708 {
709 return sysfs_merge_group(&dev->kobj, &pm_qos_flags_attr_group);
710 }
711
712 void pm_qos_sysfs_remove_flags(struct device *dev)
713 {
714 sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group);
715 }
716
717 int pm_qos_sysfs_add_latency_tolerance(struct device *dev)
718 {
719 return sysfs_merge_group(&dev->kobj,
720 &pm_qos_latency_tolerance_attr_group);
721 }
722
723 void pm_qos_sysfs_remove_latency_tolerance(struct device *dev)
724 {
725 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
726 }
727
728 void rpm_sysfs_remove(struct device *dev)
729 {
730 sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group);
731 }
732
733 void dpm_sysfs_remove(struct device *dev)
734 {
735 if (device_pm_not_required(dev))
736 return;
737 sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group);
738 dev_pm_qos_constraints_destroy(dev);
739 rpm_sysfs_remove(dev);
740 sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group);
741 sysfs_remove_group(&dev->kobj, &pm_attr_group);
742 }