1/*
2 * drivers/base/power/wakeup.c - System wakeup events framework
3 *
4 * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file is released under the GPLv2.
7 */
8
9#include <linux/device.h>
10#include <linux/slab.h>
11#include <linux/sched.h>
12#include <linux/capability.h>
13#include <linux/export.h>
14#include <linux/suspend.h>
15#include <linux/seq_file.h>
16#include <linux/debugfs.h>
17#include <linux/pm_wakeirq.h>
18#include <trace/events/power.h>
19
20#include "power.h"
21
22/*
23 * If set, the suspend/hibernate code will abort transitions to a sleep state
24 * if wakeup events are registered during or immediately before the transition.
25 */
26bool events_check_enabled __read_mostly;
27
28/* First wakeup IRQ seen by the kernel in the last cycle. */
29unsigned int pm_wakeup_irq __read_mostly;
30
31/* If set and the system is suspending, terminate the suspend. */
32static bool pm_abort_suspend __read_mostly;
33
34/*
35 * Combined counters of registered wakeup events and wakeup events in progress.
36 * They need to be modified together atomically, so it's better to use one
37 * atomic variable to hold them both.
38 */
39static atomic_t combined_event_count = ATOMIC_INIT(0);
40
41#define IN_PROGRESS_BITS	(sizeof(int) * 4)
42#define MAX_IN_PROGRESS		((1 << IN_PROGRESS_BITS) - 1)
43
44static void split_counters(unsigned int *cnt, unsigned int *inpr)
45{
46	unsigned int comb = atomic_read(&combined_event_count);
47
48	*cnt = (comb >> IN_PROGRESS_BITS);
49	*inpr = comb & MAX_IN_PROGRESS;
50}
51
52/* A preserved old value of the events counter. */
53static unsigned int saved_count;
54
55static DEFINE_SPINLOCK(events_lock);
56
57static void pm_wakeup_timer_fn(unsigned long data);
58
59static LIST_HEAD(wakeup_sources);
60
61static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
62
63static struct wakeup_source deleted_ws = {
64	.name = "deleted",
65	.lock =  __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
66};
67
68/**
69 * wakeup_source_prepare - Prepare a new wakeup source for initialization.
70 * @ws: Wakeup source to prepare.
71 * @name: Pointer to the name of the new wakeup source.
72 *
73 * Callers must ensure that the @name string won't be freed when @ws is still in
74 * use.
75 */
76void wakeup_source_prepare(struct wakeup_source *ws, const char *name)
77{
78	if (ws) {
79		memset(ws, 0, sizeof(*ws));
80		ws->name = name;
81	}
82}
83EXPORT_SYMBOL_GPL(wakeup_source_prepare);
84
85/**
86 * wakeup_source_create - Create a struct wakeup_source object.
87 * @name: Name of the new wakeup source.
88 */
89struct wakeup_source *wakeup_source_create(const char *name)
90{
91	struct wakeup_source *ws;
92
93	ws = kmalloc(sizeof(*ws), GFP_KERNEL);
94	if (!ws)
95		return NULL;
96
97	wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
98	return ws;
99}
100EXPORT_SYMBOL_GPL(wakeup_source_create);
101
102/**
103 * wakeup_source_drop - Prepare a struct wakeup_source object for destruction.
104 * @ws: Wakeup source to prepare for destruction.
105 *
106 * Callers must ensure that __pm_stay_awake() or __pm_wakeup_event() will never
107 * be run in parallel with this function for the same wakeup source object.
108 */
109void wakeup_source_drop(struct wakeup_source *ws)
110{
111	if (!ws)
112		return;
113
114	del_timer_sync(&ws->timer);
115	__pm_relax(ws);
116}
117EXPORT_SYMBOL_GPL(wakeup_source_drop);
118
119/*
120 * Record wakeup_source statistics being deleted into a dummy wakeup_source.
121 */
122static void wakeup_source_record(struct wakeup_source *ws)
123{
124	unsigned long flags;
125
126	spin_lock_irqsave(&deleted_ws.lock, flags);
127
128	if (ws->event_count) {
129		deleted_ws.total_time =
130			ktime_add(deleted_ws.total_time, ws->total_time);
131		deleted_ws.prevent_sleep_time =
132			ktime_add(deleted_ws.prevent_sleep_time,
133				  ws->prevent_sleep_time);
134		deleted_ws.max_time =
135			ktime_compare(deleted_ws.max_time, ws->max_time) > 0 ?
136				deleted_ws.max_time : ws->max_time;
137		deleted_ws.event_count += ws->event_count;
138		deleted_ws.active_count += ws->active_count;
139		deleted_ws.relax_count += ws->relax_count;
140		deleted_ws.expire_count += ws->expire_count;
141		deleted_ws.wakeup_count += ws->wakeup_count;
142	}
143
144	spin_unlock_irqrestore(&deleted_ws.lock, flags);
145}
146
147/**
148 * wakeup_source_destroy - Destroy a struct wakeup_source object.
149 * @ws: Wakeup source to destroy.
150 *
151 * Use only for wakeup source objects created with wakeup_source_create().
152 */
153void wakeup_source_destroy(struct wakeup_source *ws)
154{
155	if (!ws)
156		return;
157
158	wakeup_source_drop(ws);
159	wakeup_source_record(ws);
160	kfree_const(ws->name);
161	kfree(ws);
162}
163EXPORT_SYMBOL_GPL(wakeup_source_destroy);
164
165/**
166 * wakeup_source_add - Add given object to the list of wakeup sources.
167 * @ws: Wakeup source object to add to the list.
168 */
169void wakeup_source_add(struct wakeup_source *ws)
170{
171	unsigned long flags;
172
173	if (WARN_ON(!ws))
174		return;
175
176	spin_lock_init(&ws->lock);
177	setup_timer(&ws->timer, pm_wakeup_timer_fn, (unsigned long)ws);
178	ws->active = false;
179	ws->last_time = ktime_get();
180
181	spin_lock_irqsave(&events_lock, flags);
182	list_add_rcu(&ws->entry, &wakeup_sources);
183	spin_unlock_irqrestore(&events_lock, flags);
184}
185EXPORT_SYMBOL_GPL(wakeup_source_add);
186
187/**
188 * wakeup_source_remove - Remove given object from the wakeup sources list.
189 * @ws: Wakeup source object to remove from the list.
190 */
191void wakeup_source_remove(struct wakeup_source *ws)
192{
193	unsigned long flags;
194
195	if (WARN_ON(!ws))
196		return;
197
198	spin_lock_irqsave(&events_lock, flags);
199	list_del_rcu(&ws->entry);
200	spin_unlock_irqrestore(&events_lock, flags);
201	synchronize_rcu();
202}
203EXPORT_SYMBOL_GPL(wakeup_source_remove);
204
205/**
206 * wakeup_source_register - Create wakeup source and add it to the list.
207 * @name: Name of the wakeup source to register.
208 */
209struct wakeup_source *wakeup_source_register(const char *name)
210{
211	struct wakeup_source *ws;
212
213	ws = wakeup_source_create(name);
214	if (ws)
215		wakeup_source_add(ws);
216
217	return ws;
218}
219EXPORT_SYMBOL_GPL(wakeup_source_register);
220
221/**
222 * wakeup_source_unregister - Remove wakeup source from the list and remove it.
223 * @ws: Wakeup source object to unregister.
224 */
225void wakeup_source_unregister(struct wakeup_source *ws)
226{
227	if (ws) {
228		wakeup_source_remove(ws);
229		wakeup_source_destroy(ws);
230	}
231}
232EXPORT_SYMBOL_GPL(wakeup_source_unregister);
233
234/**
235 * device_wakeup_attach - Attach a wakeup source object to a device object.
236 * @dev: Device to handle.
237 * @ws: Wakeup source object to attach to @dev.
238 *
239 * This causes @dev to be treated as a wakeup device.
240 */
241static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws)
242{
243	spin_lock_irq(&dev->power.lock);
244	if (dev->power.wakeup) {
245		spin_unlock_irq(&dev->power.lock);
246		return -EEXIST;
247	}
248	dev->power.wakeup = ws;
249	spin_unlock_irq(&dev->power.lock);
250	return 0;
251}
252
253/**
254 * device_wakeup_enable - Enable given device to be a wakeup source.
255 * @dev: Device to handle.
256 *
257 * Create a wakeup source object, register it and attach it to @dev.
258 */
259int device_wakeup_enable(struct device *dev)
260{
261	struct wakeup_source *ws;
262	int ret;
263
264	if (!dev || !dev->power.can_wakeup)
265		return -EINVAL;
266
267	ws = wakeup_source_register(dev_name(dev));
268	if (!ws)
269		return -ENOMEM;
270
271	ret = device_wakeup_attach(dev, ws);
272	if (ret)
273		wakeup_source_unregister(ws);
274
275	return ret;
276}
277EXPORT_SYMBOL_GPL(device_wakeup_enable);
278
279/**
280 * device_wakeup_attach_irq - Attach a wakeirq to a wakeup source
281 * @dev: Device to handle
282 * @wakeirq: Device specific wakeirq entry
283 *
284 * Attach a device wakeirq to the wakeup source so the device
285 * wake IRQ can be configured automatically for suspend and
286 * resume.
287 *
288 * Call under the device's power.lock lock.
289 */
290int device_wakeup_attach_irq(struct device *dev,
291			     struct wake_irq *wakeirq)
292{
293	struct wakeup_source *ws;
294
295	ws = dev->power.wakeup;
296	if (!ws) {
297		dev_err(dev, "forgot to call call device_init_wakeup?\n");
298		return -EINVAL;
299	}
300
301	if (ws->wakeirq)
302		return -EEXIST;
303
304	ws->wakeirq = wakeirq;
305	return 0;
306}
307
308/**
309 * device_wakeup_detach_irq - Detach a wakeirq from a wakeup source
310 * @dev: Device to handle
311 *
312 * Removes a device wakeirq from the wakeup source.
313 *
314 * Call under the device's power.lock lock.
315 */
316void device_wakeup_detach_irq(struct device *dev)
317{
318	struct wakeup_source *ws;
319
320	ws = dev->power.wakeup;
321	if (ws)
322		ws->wakeirq = NULL;
323}
324
325/**
326 * device_wakeup_arm_wake_irqs(void)
327 *
328 * Itereates over the list of device wakeirqs to arm them.
329 */
330void device_wakeup_arm_wake_irqs(void)
331{
332	struct wakeup_source *ws;
333
334	rcu_read_lock();
335	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
336		if (ws->wakeirq)
337			dev_pm_arm_wake_irq(ws->wakeirq);
338	}
339	rcu_read_unlock();
340}
341
342/**
343 * device_wakeup_disarm_wake_irqs(void)
344 *
345 * Itereates over the list of device wakeirqs to disarm them.
346 */
347void device_wakeup_disarm_wake_irqs(void)
348{
349	struct wakeup_source *ws;
350
351	rcu_read_lock();
352	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
353		if (ws->wakeirq)
354			dev_pm_disarm_wake_irq(ws->wakeirq);
355	}
356	rcu_read_unlock();
357}
358
359/**
360 * device_wakeup_detach - Detach a device's wakeup source object from it.
361 * @dev: Device to detach the wakeup source object from.
362 *
363 * After it returns, @dev will not be treated as a wakeup device any more.
364 */
365static struct wakeup_source *device_wakeup_detach(struct device *dev)
366{
367	struct wakeup_source *ws;
368
369	spin_lock_irq(&dev->power.lock);
370	ws = dev->power.wakeup;
371	dev->power.wakeup = NULL;
372	spin_unlock_irq(&dev->power.lock);
373	return ws;
374}
375
376/**
377 * device_wakeup_disable - Do not regard a device as a wakeup source any more.
378 * @dev: Device to handle.
379 *
380 * Detach the @dev's wakeup source object from it, unregister this wakeup source
381 * object and destroy it.
382 */
383int device_wakeup_disable(struct device *dev)
384{
385	struct wakeup_source *ws;
386
387	if (!dev || !dev->power.can_wakeup)
388		return -EINVAL;
389
390	ws = device_wakeup_detach(dev);
391	if (ws)
392		wakeup_source_unregister(ws);
393
394	return 0;
395}
396EXPORT_SYMBOL_GPL(device_wakeup_disable);
397
398/**
399 * device_set_wakeup_capable - Set/reset device wakeup capability flag.
400 * @dev: Device to handle.
401 * @capable: Whether or not @dev is capable of waking up the system from sleep.
402 *
403 * If @capable is set, set the @dev's power.can_wakeup flag and add its
404 * wakeup-related attributes to sysfs.  Otherwise, unset the @dev's
405 * power.can_wakeup flag and remove its wakeup-related attributes from sysfs.
406 *
407 * This function may sleep and it can't be called from any context where
408 * sleeping is not allowed.
409 */
410void device_set_wakeup_capable(struct device *dev, bool capable)
411{
412	if (!!dev->power.can_wakeup == !!capable)
413		return;
414
415	if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
416		if (capable) {
417			if (wakeup_sysfs_add(dev))
418				return;
419		} else {
420			wakeup_sysfs_remove(dev);
421		}
422	}
423	dev->power.can_wakeup = capable;
424}
425EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
426
427/**
428 * device_init_wakeup - Device wakeup initialization.
429 * @dev: Device to handle.
430 * @enable: Whether or not to enable @dev as a wakeup device.
431 *
432 * By default, most devices should leave wakeup disabled.  The exceptions are
433 * devices that everyone expects to be wakeup sources: keyboards, power buttons,
434 * possibly network interfaces, etc.  Also, devices that don't generate their
435 * own wakeup requests but merely forward requests from one bus to another
436 * (like PCI bridges) should have wakeup enabled by default.
437 */
438int device_init_wakeup(struct device *dev, bool enable)
439{
440	int ret = 0;
441
442	if (!dev)
443		return -EINVAL;
444
445	if (enable) {
446		device_set_wakeup_capable(dev, true);
447		ret = device_wakeup_enable(dev);
448	} else {
449		if (dev->power.can_wakeup)
450			device_wakeup_disable(dev);
451
452		device_set_wakeup_capable(dev, false);
453	}
454
455	return ret;
456}
457EXPORT_SYMBOL_GPL(device_init_wakeup);
458
459/**
460 * device_set_wakeup_enable - Enable or disable a device to wake up the system.
461 * @dev: Device to handle.
462 */
463int device_set_wakeup_enable(struct device *dev, bool enable)
464{
465	if (!dev || !dev->power.can_wakeup)
466		return -EINVAL;
467
468	return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev);
469}
470EXPORT_SYMBOL_GPL(device_set_wakeup_enable);
471
472/**
473 * wakeup_source_not_registered - validate the given wakeup source.
474 * @ws: Wakeup source to be validated.
475 */
476static bool wakeup_source_not_registered(struct wakeup_source *ws)
477{
478	/*
479	 * Use timer struct to check if the given source is initialized
480	 * by wakeup_source_add.
481	 */
482	return ws->timer.function != pm_wakeup_timer_fn ||
483		   ws->timer.data != (unsigned long)ws;
484}
485
486/*
487 * The functions below use the observation that each wakeup event starts a
488 * period in which the system should not be suspended.  The moment this period
489 * will end depends on how the wakeup event is going to be processed after being
490 * detected and all of the possible cases can be divided into two distinct
491 * groups.
492 *
493 * First, a wakeup event may be detected by the same functional unit that will
494 * carry out the entire processing of it and possibly will pass it to user space
495 * for further processing.  In that case the functional unit that has detected
496 * the event may later "close" the "no suspend" period associated with it
497 * directly as soon as it has been dealt with.  The pair of pm_stay_awake() and
498 * pm_relax(), balanced with each other, is supposed to be used in such
499 * situations.
500 *
501 * Second, a wakeup event may be detected by one functional unit and processed
502 * by another one.  In that case the unit that has detected it cannot really
503 * "close" the "no suspend" period associated with it, unless it knows in
504 * advance what's going to happen to the event during processing.  This
505 * knowledge, however, may not be available to it, so it can simply specify time
506 * to wait before the system can be suspended and pass it as the second
507 * argument of pm_wakeup_event().
508 *
509 * It is valid to call pm_relax() after pm_wakeup_event(), in which case the
510 * "no suspend" period will be ended either by the pm_relax(), or by the timer
511 * function executed when the timer expires, whichever comes first.
512 */
513
514/**
515 * wakup_source_activate - Mark given wakeup source as active.
516 * @ws: Wakeup source to handle.
517 *
518 * Update the @ws' statistics and, if @ws has just been activated, notify the PM
519 * core of the event by incrementing the counter of of wakeup events being
520 * processed.
521 */
522static void wakeup_source_activate(struct wakeup_source *ws)
523{
524	unsigned int cec;
525
526	if (WARN_ONCE(wakeup_source_not_registered(ws),
527			"unregistered wakeup source\n"))
528		return;
529
530	/*
531	 * active wakeup source should bring the system
532	 * out of PM_SUSPEND_FREEZE state
533	 */
534	freeze_wake();
535
536	ws->active = true;
537	ws->active_count++;
538	ws->last_time = ktime_get();
539	if (ws->autosleep_enabled)
540		ws->start_prevent_time = ws->last_time;
541
542	/* Increment the counter of events in progress. */
543	cec = atomic_inc_return(&combined_event_count);
544
545	trace_wakeup_source_activate(ws->name, cec);
546}
547
548/**
549 * wakeup_source_report_event - Report wakeup event using the given source.
550 * @ws: Wakeup source to report the event for.
551 */
552static void wakeup_source_report_event(struct wakeup_source *ws)
553{
554	ws->event_count++;
555	/* This is racy, but the counter is approximate anyway. */
556	if (events_check_enabled)
557		ws->wakeup_count++;
558
559	if (!ws->active)
560		wakeup_source_activate(ws);
561}
562
563/**
564 * __pm_stay_awake - Notify the PM core of a wakeup event.
565 * @ws: Wakeup source object associated with the source of the event.
566 *
567 * It is safe to call this function from interrupt context.
568 */
569void __pm_stay_awake(struct wakeup_source *ws)
570{
571	unsigned long flags;
572
573	if (!ws)
574		return;
575
576	spin_lock_irqsave(&ws->lock, flags);
577
578	wakeup_source_report_event(ws);
579	del_timer(&ws->timer);
580	ws->timer_expires = 0;
581
582	spin_unlock_irqrestore(&ws->lock, flags);
583}
584EXPORT_SYMBOL_GPL(__pm_stay_awake);
585
586/**
587 * pm_stay_awake - Notify the PM core that a wakeup event is being processed.
588 * @dev: Device the wakeup event is related to.
589 *
590 * Notify the PM core of a wakeup event (signaled by @dev) by calling
591 * __pm_stay_awake for the @dev's wakeup source object.
592 *
593 * Call this function after detecting of a wakeup event if pm_relax() is going
594 * to be called directly after processing the event (and possibly passing it to
595 * user space for further processing).
596 */
597void pm_stay_awake(struct device *dev)
598{
599	unsigned long flags;
600
601	if (!dev)
602		return;
603
604	spin_lock_irqsave(&dev->power.lock, flags);
605	__pm_stay_awake(dev->power.wakeup);
606	spin_unlock_irqrestore(&dev->power.lock, flags);
607}
608EXPORT_SYMBOL_GPL(pm_stay_awake);
609
610#ifdef CONFIG_PM_AUTOSLEEP
611static void update_prevent_sleep_time(struct wakeup_source *ws, ktime_t now)
612{
613	ktime_t delta = ktime_sub(now, ws->start_prevent_time);
614	ws->prevent_sleep_time = ktime_add(ws->prevent_sleep_time, delta);
615}
616#else
617static inline void update_prevent_sleep_time(struct wakeup_source *ws,
618					     ktime_t now) {}
619#endif
620
621/**
622 * wakup_source_deactivate - Mark given wakeup source as inactive.
623 * @ws: Wakeup source to handle.
624 *
625 * Update the @ws' statistics and notify the PM core that the wakeup source has
626 * become inactive by decrementing the counter of wakeup events being processed
627 * and incrementing the counter of registered wakeup events.
628 */
629static void wakeup_source_deactivate(struct wakeup_source *ws)
630{
631	unsigned int cnt, inpr, cec;
632	ktime_t duration;
633	ktime_t now;
634
635	ws->relax_count++;
636	/*
637	 * __pm_relax() may be called directly or from a timer function.
638	 * If it is called directly right after the timer function has been
639	 * started, but before the timer function calls __pm_relax(), it is
640	 * possible that __pm_stay_awake() will be called in the meantime and
641	 * will set ws->active.  Then, ws->active may be cleared immediately
642	 * by the __pm_relax() called from the timer function, but in such a
643	 * case ws->relax_count will be different from ws->active_count.
644	 */
645	if (ws->relax_count != ws->active_count) {
646		ws->relax_count--;
647		return;
648	}
649
650	ws->active = false;
651
652	now = ktime_get();
653	duration = ktime_sub(now, ws->last_time);
654	ws->total_time = ktime_add(ws->total_time, duration);
655	if (ktime_to_ns(duration) > ktime_to_ns(ws->max_time))
656		ws->max_time = duration;
657
658	ws->last_time = now;
659	del_timer(&ws->timer);
660	ws->timer_expires = 0;
661
662	if (ws->autosleep_enabled)
663		update_prevent_sleep_time(ws, now);
664
665	/*
666	 * Increment the counter of registered wakeup events and decrement the
667	 * couter of wakeup events in progress simultaneously.
668	 */
669	cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
670	trace_wakeup_source_deactivate(ws->name, cec);
671
672	split_counters(&cnt, &inpr);
673	if (!inpr && waitqueue_active(&wakeup_count_wait_queue))
674		wake_up(&wakeup_count_wait_queue);
675}
676
677/**
678 * __pm_relax - Notify the PM core that processing of a wakeup event has ended.
679 * @ws: Wakeup source object associated with the source of the event.
680 *
681 * Call this function for wakeup events whose processing started with calling
682 * __pm_stay_awake().
683 *
684 * It is safe to call it from interrupt context.
685 */
686void __pm_relax(struct wakeup_source *ws)
687{
688	unsigned long flags;
689
690	if (!ws)
691		return;
692
693	spin_lock_irqsave(&ws->lock, flags);
694	if (ws->active)
695		wakeup_source_deactivate(ws);
696	spin_unlock_irqrestore(&ws->lock, flags);
697}
698EXPORT_SYMBOL_GPL(__pm_relax);
699
700/**
701 * pm_relax - Notify the PM core that processing of a wakeup event has ended.
702 * @dev: Device that signaled the event.
703 *
704 * Execute __pm_relax() for the @dev's wakeup source object.
705 */
706void pm_relax(struct device *dev)
707{
708	unsigned long flags;
709
710	if (!dev)
711		return;
712
713	spin_lock_irqsave(&dev->power.lock, flags);
714	__pm_relax(dev->power.wakeup);
715	spin_unlock_irqrestore(&dev->power.lock, flags);
716}
717EXPORT_SYMBOL_GPL(pm_relax);
718
719/**
720 * pm_wakeup_timer_fn - Delayed finalization of a wakeup event.
721 * @data: Address of the wakeup source object associated with the event source.
722 *
723 * Call wakeup_source_deactivate() for the wakeup source whose address is stored
724 * in @data if it is currently active and its timer has not been canceled and
725 * the expiration time of the timer is not in future.
726 */
727static void pm_wakeup_timer_fn(unsigned long data)
728{
729	struct wakeup_source *ws = (struct wakeup_source *)data;
730	unsigned long flags;
731
732	spin_lock_irqsave(&ws->lock, flags);
733
734	if (ws->active && ws->timer_expires
735	    && time_after_eq(jiffies, ws->timer_expires)) {
736		wakeup_source_deactivate(ws);
737		ws->expire_count++;
738	}
739
740	spin_unlock_irqrestore(&ws->lock, flags);
741}
742
743/**
744 * __pm_wakeup_event - Notify the PM core of a wakeup event.
745 * @ws: Wakeup source object associated with the event source.
746 * @msec: Anticipated event processing time (in milliseconds).
747 *
748 * Notify the PM core of a wakeup event whose source is @ws that will take
749 * approximately @msec milliseconds to be processed by the kernel.  If @ws is
750 * not active, activate it.  If @msec is nonzero, set up the @ws' timer to
751 * execute pm_wakeup_timer_fn() in future.
752 *
753 * It is safe to call this function from interrupt context.
754 */
755void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
756{
757	unsigned long flags;
758	unsigned long expires;
759
760	if (!ws)
761		return;
762
763	spin_lock_irqsave(&ws->lock, flags);
764
765	wakeup_source_report_event(ws);
766
767	if (!msec) {
768		wakeup_source_deactivate(ws);
769		goto unlock;
770	}
771
772	expires = jiffies + msecs_to_jiffies(msec);
773	if (!expires)
774		expires = 1;
775
776	if (!ws->timer_expires || time_after(expires, ws->timer_expires)) {
777		mod_timer(&ws->timer, expires);
778		ws->timer_expires = expires;
779	}
780
781 unlock:
782	spin_unlock_irqrestore(&ws->lock, flags);
783}
784EXPORT_SYMBOL_GPL(__pm_wakeup_event);
785
786
787/**
788 * pm_wakeup_event - Notify the PM core of a wakeup event.
789 * @dev: Device the wakeup event is related to.
790 * @msec: Anticipated event processing time (in milliseconds).
791 *
792 * Call __pm_wakeup_event() for the @dev's wakeup source object.
793 */
794void pm_wakeup_event(struct device *dev, unsigned int msec)
795{
796	unsigned long flags;
797
798	if (!dev)
799		return;
800
801	spin_lock_irqsave(&dev->power.lock, flags);
802	__pm_wakeup_event(dev->power.wakeup, msec);
803	spin_unlock_irqrestore(&dev->power.lock, flags);
804}
805EXPORT_SYMBOL_GPL(pm_wakeup_event);
806
807void pm_print_active_wakeup_sources(void)
808{
809	struct wakeup_source *ws;
810	int active = 0;
811	struct wakeup_source *last_activity_ws = NULL;
812
813	rcu_read_lock();
814	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
815		if (ws->active) {
816			pr_info("active wakeup source: %s\n", ws->name);
817			active = 1;
818		} else if (!active &&
819			   (!last_activity_ws ||
820			    ktime_to_ns(ws->last_time) >
821			    ktime_to_ns(last_activity_ws->last_time))) {
822			last_activity_ws = ws;
823		}
824	}
825
826	if (!active && last_activity_ws)
827		pr_info("last active wakeup source: %s\n",
828			last_activity_ws->name);
829	rcu_read_unlock();
830}
831EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
832
833/**
834 * pm_wakeup_pending - Check if power transition in progress should be aborted.
835 *
836 * Compare the current number of registered wakeup events with its preserved
837 * value from the past and return true if new wakeup events have been registered
838 * since the old value was stored.  Also return true if the current number of
839 * wakeup events being processed is different from zero.
840 */
841bool pm_wakeup_pending(void)
842{
843	unsigned long flags;
844	bool ret = false;
845
846	spin_lock_irqsave(&events_lock, flags);
847	if (events_check_enabled) {
848		unsigned int cnt, inpr;
849
850		split_counters(&cnt, &inpr);
851		ret = (cnt != saved_count || inpr > 0);
852		events_check_enabled = !ret;
853	}
854	spin_unlock_irqrestore(&events_lock, flags);
855
856	if (ret) {
857		pr_info("PM: Wakeup pending, aborting suspend\n");
858		pm_print_active_wakeup_sources();
859	}
860
861	return ret || pm_abort_suspend;
862}
863
864void pm_system_wakeup(void)
865{
866	pm_abort_suspend = true;
867	freeze_wake();
868}
869EXPORT_SYMBOL_GPL(pm_system_wakeup);
870
871void pm_wakeup_clear(void)
872{
873	pm_abort_suspend = false;
874	pm_wakeup_irq = 0;
875}
876
877void pm_system_irq_wakeup(unsigned int irq_number)
878{
879	if (pm_wakeup_irq == 0) {
880		pm_wakeup_irq = irq_number;
881		pm_system_wakeup();
882	}
883}
884
885/**
886 * pm_get_wakeup_count - Read the number of registered wakeup events.
887 * @count: Address to store the value at.
888 * @block: Whether or not to block.
889 *
890 * Store the number of registered wakeup events at the address in @count.  If
891 * @block is set, block until the current number of wakeup events being
892 * processed is zero.
893 *
894 * Return 'false' if the current number of wakeup events being processed is
895 * nonzero.  Otherwise return 'true'.
896 */
897bool pm_get_wakeup_count(unsigned int *count, bool block)
898{
899	unsigned int cnt, inpr;
900
901	if (block) {
902		DEFINE_WAIT(wait);
903
904		for (;;) {
905			prepare_to_wait(&wakeup_count_wait_queue, &wait,
906					TASK_INTERRUPTIBLE);
907			split_counters(&cnt, &inpr);
908			if (inpr == 0 || signal_pending(current))
909				break;
910
911			schedule();
912		}
913		finish_wait(&wakeup_count_wait_queue, &wait);
914	}
915
916	split_counters(&cnt, &inpr);
917	*count = cnt;
918	return !inpr;
919}
920
921/**
922 * pm_save_wakeup_count - Save the current number of registered wakeup events.
923 * @count: Value to compare with the current number of registered wakeup events.
924 *
925 * If @count is equal to the current number of registered wakeup events and the
926 * current number of wakeup events being processed is zero, store @count as the
927 * old number of registered wakeup events for pm_check_wakeup_events(), enable
928 * wakeup events detection and return 'true'.  Otherwise disable wakeup events
929 * detection and return 'false'.
930 */
931bool pm_save_wakeup_count(unsigned int count)
932{
933	unsigned int cnt, inpr;
934	unsigned long flags;
935
936	events_check_enabled = false;
937	spin_lock_irqsave(&events_lock, flags);
938	split_counters(&cnt, &inpr);
939	if (cnt == count && inpr == 0) {
940		saved_count = count;
941		events_check_enabled = true;
942	}
943	spin_unlock_irqrestore(&events_lock, flags);
944	return events_check_enabled;
945}
946
947#ifdef CONFIG_PM_AUTOSLEEP
948/**
949 * pm_wakep_autosleep_enabled - Modify autosleep_enabled for all wakeup sources.
950 * @enabled: Whether to set or to clear the autosleep_enabled flags.
951 */
952void pm_wakep_autosleep_enabled(bool set)
953{
954	struct wakeup_source *ws;
955	ktime_t now = ktime_get();
956
957	rcu_read_lock();
958	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
959		spin_lock_irq(&ws->lock);
960		if (ws->autosleep_enabled != set) {
961			ws->autosleep_enabled = set;
962			if (ws->active) {
963				if (set)
964					ws->start_prevent_time = now;
965				else
966					update_prevent_sleep_time(ws, now);
967			}
968		}
969		spin_unlock_irq(&ws->lock);
970	}
971	rcu_read_unlock();
972}
973#endif /* CONFIG_PM_AUTOSLEEP */
974
975static struct dentry *wakeup_sources_stats_dentry;
976
977/**
978 * print_wakeup_source_stats - Print wakeup source statistics information.
979 * @m: seq_file to print the statistics into.
980 * @ws: Wakeup source object to print the statistics for.
981 */
982static int print_wakeup_source_stats(struct seq_file *m,
983				     struct wakeup_source *ws)
984{
985	unsigned long flags;
986	ktime_t total_time;
987	ktime_t max_time;
988	unsigned long active_count;
989	ktime_t active_time;
990	ktime_t prevent_sleep_time;
991
992	spin_lock_irqsave(&ws->lock, flags);
993
994	total_time = ws->total_time;
995	max_time = ws->max_time;
996	prevent_sleep_time = ws->prevent_sleep_time;
997	active_count = ws->active_count;
998	if (ws->active) {
999		ktime_t now = ktime_get();
1000
1001		active_time = ktime_sub(now, ws->last_time);
1002		total_time = ktime_add(total_time, active_time);
1003		if (active_time.tv64 > max_time.tv64)
1004			max_time = active_time;
1005
1006		if (ws->autosleep_enabled)
1007			prevent_sleep_time = ktime_add(prevent_sleep_time,
1008				ktime_sub(now, ws->start_prevent_time));
1009	} else {
1010		active_time = ktime_set(0, 0);
1011	}
1012
1013	seq_printf(m, "%-12s\t%lu\t\t%lu\t\t%lu\t\t%lu\t\t%lld\t\t%lld\t\t%lld\t\t%lld\t\t%lld\n",
1014		   ws->name, active_count, ws->event_count,
1015		   ws->wakeup_count, ws->expire_count,
1016		   ktime_to_ms(active_time), ktime_to_ms(total_time),
1017		   ktime_to_ms(max_time), ktime_to_ms(ws->last_time),
1018		   ktime_to_ms(prevent_sleep_time));
1019
1020	spin_unlock_irqrestore(&ws->lock, flags);
1021
1022	return 0;
1023}
1024
1025/**
1026 * wakeup_sources_stats_show - Print wakeup sources statistics information.
1027 * @m: seq_file to print the statistics into.
1028 */
1029static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
1030{
1031	struct wakeup_source *ws;
1032
1033	seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
1034		"expire_count\tactive_since\ttotal_time\tmax_time\t"
1035		"last_change\tprevent_suspend_time\n");
1036
1037	rcu_read_lock();
1038	list_for_each_entry_rcu(ws, &wakeup_sources, entry)
1039		print_wakeup_source_stats(m, ws);
1040	rcu_read_unlock();
1041
1042	print_wakeup_source_stats(m, &deleted_ws);
1043
1044	return 0;
1045}
1046
1047static int wakeup_sources_stats_open(struct inode *inode, struct file *file)
1048{
1049	return single_open(file, wakeup_sources_stats_show, NULL);
1050}
1051
1052static const struct file_operations wakeup_sources_stats_fops = {
1053	.owner = THIS_MODULE,
1054	.open = wakeup_sources_stats_open,
1055	.read = seq_read,
1056	.llseek = seq_lseek,
1057	.release = single_release,
1058};
1059
1060static int __init wakeup_sources_debugfs_init(void)
1061{
1062	wakeup_sources_stats_dentry = debugfs_create_file("wakeup_sources",
1063			S_IRUGO, NULL, NULL, &wakeup_sources_stats_fops);
1064	return 0;
1065}
1066
1067postcore_initcall(wakeup_sources_debugfs_init);
1068