This source file includes following definitions.
- pm_show_wakelocks
- wakelocks_limit_exceeded
- increment_wakelocks_number
- decrement_wakelocks_number
- wakelocks_limit_exceeded
- increment_wakelocks_number
- decrement_wakelocks_number
- wakelocks_lru_add
- wakelocks_lru_most_recent
- __wakelocks_gc
- wakelocks_gc
- wakelocks_lru_add
- wakelocks_lru_most_recent
- wakelocks_gc
- wakelock_lookup_add
- pm_wake_lock
- pm_wake_unlock
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/capability.h>
14 #include <linux/ctype.h>
15 #include <linux/device.h>
16 #include <linux/err.h>
17 #include <linux/hrtimer.h>
18 #include <linux/list.h>
19 #include <linux/rbtree.h>
20 #include <linux/slab.h>
21 #include <linux/workqueue.h>
22
23 #include "power.h"
24
25 static DEFINE_MUTEX(wakelocks_lock);
26
27 struct wakelock {
28 char *name;
29 struct rb_node node;
30 struct wakeup_source *ws;
31 #ifdef CONFIG_PM_WAKELOCKS_GC
32 struct list_head lru;
33 #endif
34 };
35
36 static struct rb_root wakelocks_tree = RB_ROOT;
37
38 ssize_t pm_show_wakelocks(char *buf, bool show_active)
39 {
40 struct rb_node *node;
41 struct wakelock *wl;
42 char *str = buf;
43 char *end = buf + PAGE_SIZE;
44
45 mutex_lock(&wakelocks_lock);
46
47 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
48 wl = rb_entry(node, struct wakelock, node);
49 if (wl->ws->active == show_active)
50 str += scnprintf(str, end - str, "%s ", wl->name);
51 }
52 if (str > buf)
53 str--;
54
55 str += scnprintf(str, end - str, "\n");
56
57 mutex_unlock(&wakelocks_lock);
58 return (str - buf);
59 }
60
61 #if CONFIG_PM_WAKELOCKS_LIMIT > 0
62 static unsigned int number_of_wakelocks;
63
64 static inline bool wakelocks_limit_exceeded(void)
65 {
66 return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
67 }
68
69 static inline void increment_wakelocks_number(void)
70 {
71 number_of_wakelocks++;
72 }
73
74 static inline void decrement_wakelocks_number(void)
75 {
76 number_of_wakelocks--;
77 }
78 #else
79 static inline bool wakelocks_limit_exceeded(void) { return false; }
80 static inline void increment_wakelocks_number(void) {}
81 static inline void decrement_wakelocks_number(void) {}
82 #endif
83
84 #ifdef CONFIG_PM_WAKELOCKS_GC
85 #define WL_GC_COUNT_MAX 100
86 #define WL_GC_TIME_SEC 300
87
88 static void __wakelocks_gc(struct work_struct *work);
89 static LIST_HEAD(wakelocks_lru_list);
90 static DECLARE_WORK(wakelock_work, __wakelocks_gc);
91 static unsigned int wakelocks_gc_count;
92
93 static inline void wakelocks_lru_add(struct wakelock *wl)
94 {
95 list_add(&wl->lru, &wakelocks_lru_list);
96 }
97
98 static inline void wakelocks_lru_most_recent(struct wakelock *wl)
99 {
100 list_move(&wl->lru, &wakelocks_lru_list);
101 }
102
103 static void __wakelocks_gc(struct work_struct *work)
104 {
105 struct wakelock *wl, *aux;
106 ktime_t now;
107
108 mutex_lock(&wakelocks_lock);
109
110 now = ktime_get();
111 list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
112 u64 idle_time_ns;
113 bool active;
114
115 spin_lock_irq(&wl->ws->lock);
116 idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws->last_time));
117 active = wl->ws->active;
118 spin_unlock_irq(&wl->ws->lock);
119
120 if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
121 break;
122
123 if (!active) {
124 wakeup_source_unregister(wl->ws);
125 rb_erase(&wl->node, &wakelocks_tree);
126 list_del(&wl->lru);
127 kfree(wl->name);
128 kfree(wl);
129 decrement_wakelocks_number();
130 }
131 }
132 wakelocks_gc_count = 0;
133
134 mutex_unlock(&wakelocks_lock);
135 }
136
137 static void wakelocks_gc(void)
138 {
139 if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
140 return;
141
142 schedule_work(&wakelock_work);
143 }
144 #else
145 static inline void wakelocks_lru_add(struct wakelock *wl) {}
146 static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
147 static inline void wakelocks_gc(void) {}
148 #endif
149
150 static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
151 bool add_if_not_found)
152 {
153 struct rb_node **node = &wakelocks_tree.rb_node;
154 struct rb_node *parent = *node;
155 struct wakelock *wl;
156
157 while (*node) {
158 int diff;
159
160 parent = *node;
161 wl = rb_entry(*node, struct wakelock, node);
162 diff = strncmp(name, wl->name, len);
163 if (diff == 0) {
164 if (wl->name[len])
165 diff = -1;
166 else
167 return wl;
168 }
169 if (diff < 0)
170 node = &(*node)->rb_left;
171 else
172 node = &(*node)->rb_right;
173 }
174 if (!add_if_not_found)
175 return ERR_PTR(-EINVAL);
176
177 if (wakelocks_limit_exceeded())
178 return ERR_PTR(-ENOSPC);
179
180
181 wl = kzalloc(sizeof(*wl), GFP_KERNEL);
182 if (!wl)
183 return ERR_PTR(-ENOMEM);
184
185 wl->name = kstrndup(name, len, GFP_KERNEL);
186 if (!wl->name) {
187 kfree(wl);
188 return ERR_PTR(-ENOMEM);
189 }
190
191 wl->ws = wakeup_source_register(NULL, wl->name);
192 if (!wl->ws) {
193 kfree(wl->name);
194 kfree(wl);
195 return ERR_PTR(-ENOMEM);
196 }
197 wl->ws->last_time = ktime_get();
198
199 rb_link_node(&wl->node, parent, node);
200 rb_insert_color(&wl->node, &wakelocks_tree);
201 wakelocks_lru_add(wl);
202 increment_wakelocks_number();
203 return wl;
204 }
205
206 int pm_wake_lock(const char *buf)
207 {
208 const char *str = buf;
209 struct wakelock *wl;
210 u64 timeout_ns = 0;
211 size_t len;
212 int ret = 0;
213
214 if (!capable(CAP_BLOCK_SUSPEND))
215 return -EPERM;
216
217 while (*str && !isspace(*str))
218 str++;
219
220 len = str - buf;
221 if (!len)
222 return -EINVAL;
223
224 if (*str && *str != '\n') {
225
226 ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
227 if (ret)
228 return -EINVAL;
229 }
230
231 mutex_lock(&wakelocks_lock);
232
233 wl = wakelock_lookup_add(buf, len, true);
234 if (IS_ERR(wl)) {
235 ret = PTR_ERR(wl);
236 goto out;
237 }
238 if (timeout_ns) {
239 u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
240
241 do_div(timeout_ms, NSEC_PER_MSEC);
242 __pm_wakeup_event(wl->ws, timeout_ms);
243 } else {
244 __pm_stay_awake(wl->ws);
245 }
246
247 wakelocks_lru_most_recent(wl);
248
249 out:
250 mutex_unlock(&wakelocks_lock);
251 return ret;
252 }
253
254 int pm_wake_unlock(const char *buf)
255 {
256 struct wakelock *wl;
257 size_t len;
258 int ret = 0;
259
260 if (!capable(CAP_BLOCK_SUSPEND))
261 return -EPERM;
262
263 len = strlen(buf);
264 if (!len)
265 return -EINVAL;
266
267 if (buf[len-1] == '\n')
268 len--;
269
270 if (!len)
271 return -EINVAL;
272
273 mutex_lock(&wakelocks_lock);
274
275 wl = wakelock_lookup_add(buf, len, false);
276 if (IS_ERR(wl)) {
277 ret = PTR_ERR(wl);
278 goto out;
279 }
280 __pm_relax(wl->ws);
281
282 wakelocks_lru_most_recent(wl);
283 wakelocks_gc();
284
285 out:
286 mutex_unlock(&wakelocks_lock);
287 return ret;
288 }