1/*
2 *	scsi_pm.c	Copyright (C) 2010 Alan Stern
3 *
4 *	SCSI dynamic Power Management
5 *		Initial version: Alan Stern <stern@rowland.harvard.edu>
6 */
7
8#include <linux/pm_runtime.h>
9#include <linux/export.h>
10#include <linux/async.h>
11
12#include <scsi/scsi.h>
13#include <scsi/scsi_device.h>
14#include <scsi/scsi_driver.h>
15#include <scsi/scsi_host.h>
16
17#include "scsi_priv.h"
18
19#ifdef CONFIG_PM_SLEEP
20
21static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
22{
23	return pm && pm->suspend ? pm->suspend(dev) : 0;
24}
25
26static int do_scsi_freeze(struct device *dev, const struct dev_pm_ops *pm)
27{
28	return pm && pm->freeze ? pm->freeze(dev) : 0;
29}
30
31static int do_scsi_poweroff(struct device *dev, const struct dev_pm_ops *pm)
32{
33	return pm && pm->poweroff ? pm->poweroff(dev) : 0;
34}
35
36static int do_scsi_resume(struct device *dev, const struct dev_pm_ops *pm)
37{
38	return pm && pm->resume ? pm->resume(dev) : 0;
39}
40
41static int do_scsi_thaw(struct device *dev, const struct dev_pm_ops *pm)
42{
43	return pm && pm->thaw ? pm->thaw(dev) : 0;
44}
45
46static int do_scsi_restore(struct device *dev, const struct dev_pm_ops *pm)
47{
48	return pm && pm->restore ? pm->restore(dev) : 0;
49}
50
51static int scsi_dev_type_suspend(struct device *dev,
52		int (*cb)(struct device *, const struct dev_pm_ops *))
53{
54	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
55	int err;
56
57	/* flush pending in-flight resume operations, suspend is synchronous */
58	async_synchronize_full_domain(&scsi_sd_pm_domain);
59
60	err = scsi_device_quiesce(to_scsi_device(dev));
61	if (err == 0) {
62		err = cb(dev, pm);
63		if (err)
64			scsi_device_resume(to_scsi_device(dev));
65	}
66	dev_dbg(dev, "scsi suspend: %d\n", err);
67	return err;
68}
69
70static int scsi_dev_type_resume(struct device *dev,
71		int (*cb)(struct device *, const struct dev_pm_ops *))
72{
73	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
74	int err = 0;
75
76	err = cb(dev, pm);
77	scsi_device_resume(to_scsi_device(dev));
78	dev_dbg(dev, "scsi resume: %d\n", err);
79
80	if (err == 0) {
81		pm_runtime_disable(dev);
82		pm_runtime_set_active(dev);
83		pm_runtime_enable(dev);
84	}
85
86	return err;
87}
88
89static int
90scsi_bus_suspend_common(struct device *dev,
91		int (*cb)(struct device *, const struct dev_pm_ops *))
92{
93	int err = 0;
94
95	if (scsi_is_sdev_device(dev)) {
96		/*
97		 * All the high-level SCSI drivers that implement runtime
98		 * PM treat runtime suspend, system suspend, and system
99		 * hibernate nearly identically. In all cases the requirements
100		 * for runtime suspension are stricter.
101		 */
102		if (pm_runtime_suspended(dev))
103			return 0;
104
105		err = scsi_dev_type_suspend(dev, cb);
106	}
107
108	return err;
109}
110
111static void async_sdev_resume(void *dev, async_cookie_t cookie)
112{
113	scsi_dev_type_resume(dev, do_scsi_resume);
114}
115
116static void async_sdev_thaw(void *dev, async_cookie_t cookie)
117{
118	scsi_dev_type_resume(dev, do_scsi_thaw);
119}
120
121static void async_sdev_restore(void *dev, async_cookie_t cookie)
122{
123	scsi_dev_type_resume(dev, do_scsi_restore);
124}
125
126static int scsi_bus_resume_common(struct device *dev,
127		int (*cb)(struct device *, const struct dev_pm_ops *))
128{
129	async_func_t fn;
130
131	if (!scsi_is_sdev_device(dev))
132		fn = NULL;
133	else if (cb == do_scsi_resume)
134		fn = async_sdev_resume;
135	else if (cb == do_scsi_thaw)
136		fn = async_sdev_thaw;
137	else if (cb == do_scsi_restore)
138		fn = async_sdev_restore;
139	else
140		fn = NULL;
141
142	if (fn) {
143		async_schedule_domain(fn, dev, &scsi_sd_pm_domain);
144
145		/*
146		 * If a user has disabled async probing a likely reason
147		 * is due to a storage enclosure that does not inject
148		 * staggered spin-ups.  For safety, make resume
149		 * synchronous as well in that case.
150		 */
151		if (strncmp(scsi_scan_type, "async", 5) != 0)
152			async_synchronize_full_domain(&scsi_sd_pm_domain);
153	} else {
154		pm_runtime_disable(dev);
155		pm_runtime_set_active(dev);
156		pm_runtime_enable(dev);
157	}
158	return 0;
159}
160
161static int scsi_bus_prepare(struct device *dev)
162{
163	if (scsi_is_sdev_device(dev)) {
164		/* sd probing uses async_schedule.  Wait until it finishes. */
165		async_synchronize_full_domain(&scsi_sd_probe_domain);
166
167	} else if (scsi_is_host_device(dev)) {
168		/* Wait until async scanning is finished */
169		scsi_complete_async_scans();
170	}
171	return 0;
172}
173
174static int scsi_bus_suspend(struct device *dev)
175{
176	return scsi_bus_suspend_common(dev, do_scsi_suspend);
177}
178
179static int scsi_bus_resume(struct device *dev)
180{
181	return scsi_bus_resume_common(dev, do_scsi_resume);
182}
183
184static int scsi_bus_freeze(struct device *dev)
185{
186	return scsi_bus_suspend_common(dev, do_scsi_freeze);
187}
188
189static int scsi_bus_thaw(struct device *dev)
190{
191	return scsi_bus_resume_common(dev, do_scsi_thaw);
192}
193
194static int scsi_bus_poweroff(struct device *dev)
195{
196	return scsi_bus_suspend_common(dev, do_scsi_poweroff);
197}
198
199static int scsi_bus_restore(struct device *dev)
200{
201	return scsi_bus_resume_common(dev, do_scsi_restore);
202}
203
204#else /* CONFIG_PM_SLEEP */
205
206#define scsi_bus_prepare		NULL
207#define scsi_bus_suspend		NULL
208#define scsi_bus_resume			NULL
209#define scsi_bus_freeze			NULL
210#define scsi_bus_thaw			NULL
211#define scsi_bus_poweroff		NULL
212#define scsi_bus_restore		NULL
213
214#endif /* CONFIG_PM_SLEEP */
215
216static int sdev_runtime_suspend(struct device *dev)
217{
218	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
219	struct scsi_device *sdev = to_scsi_device(dev);
220	int err = 0;
221
222	if (pm && pm->runtime_suspend) {
223		err = blk_pre_runtime_suspend(sdev->request_queue);
224		if (err)
225			return err;
226		err = pm->runtime_suspend(dev);
227		blk_post_runtime_suspend(sdev->request_queue, err);
228	}
229	return err;
230}
231
232static int scsi_runtime_suspend(struct device *dev)
233{
234	int err = 0;
235
236	dev_dbg(dev, "scsi_runtime_suspend\n");
237	if (scsi_is_sdev_device(dev))
238		err = sdev_runtime_suspend(dev);
239
240	/* Insert hooks here for targets, hosts, and transport classes */
241
242	return err;
243}
244
245static int sdev_runtime_resume(struct device *dev)
246{
247	struct scsi_device *sdev = to_scsi_device(dev);
248	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
249	int err = 0;
250
251	if (pm && pm->runtime_resume) {
252		blk_pre_runtime_resume(sdev->request_queue);
253		err = pm->runtime_resume(dev);
254		blk_post_runtime_resume(sdev->request_queue, err);
255	}
256	return err;
257}
258
259static int scsi_runtime_resume(struct device *dev)
260{
261	int err = 0;
262
263	dev_dbg(dev, "scsi_runtime_resume\n");
264	if (scsi_is_sdev_device(dev))
265		err = sdev_runtime_resume(dev);
266
267	/* Insert hooks here for targets, hosts, and transport classes */
268
269	return err;
270}
271
272static int scsi_runtime_idle(struct device *dev)
273{
274	dev_dbg(dev, "scsi_runtime_idle\n");
275
276	/* Insert hooks here for targets, hosts, and transport classes */
277
278	if (scsi_is_sdev_device(dev)) {
279		pm_runtime_mark_last_busy(dev);
280		pm_runtime_autosuspend(dev);
281		return -EBUSY;
282	}
283
284	return 0;
285}
286
287int scsi_autopm_get_device(struct scsi_device *sdev)
288{
289	int	err;
290
291	err = pm_runtime_get_sync(&sdev->sdev_gendev);
292	if (err < 0 && err !=-EACCES)
293		pm_runtime_put_sync(&sdev->sdev_gendev);
294	else
295		err = 0;
296	return err;
297}
298EXPORT_SYMBOL_GPL(scsi_autopm_get_device);
299
300void scsi_autopm_put_device(struct scsi_device *sdev)
301{
302	pm_runtime_put_sync(&sdev->sdev_gendev);
303}
304EXPORT_SYMBOL_GPL(scsi_autopm_put_device);
305
306void scsi_autopm_get_target(struct scsi_target *starget)
307{
308	pm_runtime_get_sync(&starget->dev);
309}
310
311void scsi_autopm_put_target(struct scsi_target *starget)
312{
313	pm_runtime_put_sync(&starget->dev);
314}
315
316int scsi_autopm_get_host(struct Scsi_Host *shost)
317{
318	int	err;
319
320	err = pm_runtime_get_sync(&shost->shost_gendev);
321	if (err < 0 && err !=-EACCES)
322		pm_runtime_put_sync(&shost->shost_gendev);
323	else
324		err = 0;
325	return err;
326}
327
328void scsi_autopm_put_host(struct Scsi_Host *shost)
329{
330	pm_runtime_put_sync(&shost->shost_gendev);
331}
332
333const struct dev_pm_ops scsi_bus_pm_ops = {
334	.prepare =		scsi_bus_prepare,
335	.suspend =		scsi_bus_suspend,
336	.resume =		scsi_bus_resume,
337	.freeze =		scsi_bus_freeze,
338	.thaw =			scsi_bus_thaw,
339	.poweroff =		scsi_bus_poweroff,
340	.restore =		scsi_bus_restore,
341	.runtime_suspend =	scsi_runtime_suspend,
342	.runtime_resume =	scsi_runtime_resume,
343	.runtime_idle =		scsi_runtime_idle,
344};
345