1/*
2 *  bus driver for ccw devices
3 *
4 *    Copyright IBM Corp. 2002, 2008
5 *    Author(s): Arnd Bergmann (arndb@de.ibm.com)
6 *		 Cornelia Huck (cornelia.huck@de.ibm.com)
7 *		 Martin Schwidefsky (schwidefsky@de.ibm.com)
8 */
9
10#define KMSG_COMPONENT "cio"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/spinlock.h>
16#include <linux/errno.h>
17#include <linux/err.h>
18#include <linux/slab.h>
19#include <linux/list.h>
20#include <linux/device.h>
21#include <linux/workqueue.h>
22#include <linux/delay.h>
23#include <linux/timer.h>
24#include <linux/kernel_stat.h>
25
26#include <asm/ccwdev.h>
27#include <asm/cio.h>
28#include <asm/param.h>		/* HZ */
29#include <asm/cmb.h>
30#include <asm/isc.h>
31
32#include "chp.h"
33#include "cio.h"
34#include "cio_debug.h"
35#include "css.h"
36#include "device.h"
37#include "ioasm.h"
38#include "io_sch.h"
39#include "blacklist.h"
40#include "chsc.h"
41
42static struct timer_list recovery_timer;
43static DEFINE_SPINLOCK(recovery_lock);
44static int recovery_phase;
45static const unsigned long recovery_delay[] = { 3, 30, 300 };
46
47static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
48static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
49static struct bus_type ccw_bus_type;
50
51/******************* bus type handling ***********************/
52
53/* The Linux driver model distinguishes between a bus type and
54 * the bus itself. Of course we only have one channel
55 * subsystem driver and one channel system per machine, but
56 * we still use the abstraction. T.R. says it's a good idea. */
57static int
58ccw_bus_match (struct device * dev, struct device_driver * drv)
59{
60	struct ccw_device *cdev = to_ccwdev(dev);
61	struct ccw_driver *cdrv = to_ccwdrv(drv);
62	const struct ccw_device_id *ids = cdrv->ids, *found;
63
64	if (!ids)
65		return 0;
66
67	found = ccw_device_id_match(ids, &cdev->id);
68	if (!found)
69		return 0;
70
71	cdev->id.driver_info = found->driver_info;
72
73	return 1;
74}
75
76/* Store modalias string delimited by prefix/suffix string into buffer with
77 * specified size. Return length of resulting string (excluding trailing '\0')
78 * even if string doesn't fit buffer (snprintf semantics). */
79static int snprint_alias(char *buf, size_t size,
80			 struct ccw_device_id *id, const char *suffix)
81{
82	int len;
83
84	len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model);
85	if (len > size)
86		return len;
87	buf += len;
88	size -= len;
89
90	if (id->dev_type != 0)
91		len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
92				id->dev_model, suffix);
93	else
94		len += snprintf(buf, size, "dtdm%s", suffix);
95
96	return len;
97}
98
99/* Set up environment variables for ccw device uevent. Return 0 on success,
100 * non-zero otherwise. */
101static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
102{
103	struct ccw_device *cdev = to_ccwdev(dev);
104	struct ccw_device_id *id = &(cdev->id);
105	int ret;
106	char modalias_buf[30];
107
108	/* CU_TYPE= */
109	ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type);
110	if (ret)
111		return ret;
112
113	/* CU_MODEL= */
114	ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model);
115	if (ret)
116		return ret;
117
118	/* The next two can be zero, that's ok for us */
119	/* DEV_TYPE= */
120	ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type);
121	if (ret)
122		return ret;
123
124	/* DEV_MODEL= */
125	ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model);
126	if (ret)
127		return ret;
128
129	/* MODALIAS=  */
130	snprint_alias(modalias_buf, sizeof(modalias_buf), id, "");
131	ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf);
132	return ret;
133}
134
135static void io_subchannel_irq(struct subchannel *);
136static int io_subchannel_probe(struct subchannel *);
137static int io_subchannel_remove(struct subchannel *);
138static void io_subchannel_shutdown(struct subchannel *);
139static int io_subchannel_sch_event(struct subchannel *, int);
140static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
141				   int);
142static void recovery_func(unsigned long data);
143
144static struct css_device_id io_subchannel_ids[] = {
145	{ .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, },
146	{ /* end of list */ },
147};
148MODULE_DEVICE_TABLE(css, io_subchannel_ids);
149
150static int io_subchannel_prepare(struct subchannel *sch)
151{
152	struct ccw_device *cdev;
153	/*
154	 * Don't allow suspend while a ccw device registration
155	 * is still outstanding.
156	 */
157	cdev = sch_get_cdev(sch);
158	if (cdev && !device_is_registered(&cdev->dev))
159		return -EAGAIN;
160	return 0;
161}
162
163static int io_subchannel_settle(void)
164{
165	int ret;
166
167	ret = wait_event_interruptible(ccw_device_init_wq,
168				atomic_read(&ccw_device_init_count) == 0);
169	if (ret)
170		return -EINTR;
171	flush_workqueue(cio_work_q);
172	return 0;
173}
174
175static struct css_driver io_subchannel_driver = {
176	.drv = {
177		.owner = THIS_MODULE,
178		.name = "io_subchannel",
179	},
180	.subchannel_type = io_subchannel_ids,
181	.irq = io_subchannel_irq,
182	.sch_event = io_subchannel_sch_event,
183	.chp_event = io_subchannel_chp_event,
184	.probe = io_subchannel_probe,
185	.remove = io_subchannel_remove,
186	.shutdown = io_subchannel_shutdown,
187	.prepare = io_subchannel_prepare,
188	.settle = io_subchannel_settle,
189};
190
191int __init io_subchannel_init(void)
192{
193	int ret;
194
195	setup_timer(&recovery_timer, recovery_func, 0);
196	ret = bus_register(&ccw_bus_type);
197	if (ret)
198		return ret;
199	ret = css_driver_register(&io_subchannel_driver);
200	if (ret)
201		bus_unregister(&ccw_bus_type);
202
203	return ret;
204}
205
206
207/************************ device handling **************************/
208
209/*
210 * A ccw_device has some interfaces in sysfs in addition to the
211 * standard ones.
212 * The following entries are designed to export the information which
213 * resided in 2.4 in /proc/subchannels. Subchannel and device number
214 * are obvious, so they don't have an entry :)
215 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree?
216 */
217static ssize_t
218chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
219{
220	struct subchannel *sch = to_subchannel(dev);
221	struct chsc_ssd_info *ssd = &sch->ssd_info;
222	ssize_t ret = 0;
223	int chp;
224	int mask;
225
226	for (chp = 0; chp < 8; chp++) {
227		mask = 0x80 >> chp;
228		if (ssd->path_mask & mask)
229			ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
230		else
231			ret += sprintf(buf + ret, "00 ");
232	}
233	ret += sprintf (buf+ret, "\n");
234	return min((ssize_t)PAGE_SIZE, ret);
235}
236
237static ssize_t
238pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf)
239{
240	struct subchannel *sch = to_subchannel(dev);
241	struct pmcw *pmcw = &sch->schib.pmcw;
242
243	return sprintf (buf, "%02x %02x %02x\n",
244			pmcw->pim, pmcw->pam, pmcw->pom);
245}
246
247static ssize_t
248devtype_show (struct device *dev, struct device_attribute *attr, char *buf)
249{
250	struct ccw_device *cdev = to_ccwdev(dev);
251	struct ccw_device_id *id = &(cdev->id);
252
253	if (id->dev_type != 0)
254		return sprintf(buf, "%04x/%02x\n",
255				id->dev_type, id->dev_model);
256	else
257		return sprintf(buf, "n/a\n");
258}
259
260static ssize_t
261cutype_show (struct device *dev, struct device_attribute *attr, char *buf)
262{
263	struct ccw_device *cdev = to_ccwdev(dev);
264	struct ccw_device_id *id = &(cdev->id);
265
266	return sprintf(buf, "%04x/%02x\n",
267		       id->cu_type, id->cu_model);
268}
269
270static ssize_t
271modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
272{
273	struct ccw_device *cdev = to_ccwdev(dev);
274	struct ccw_device_id *id = &(cdev->id);
275	int len;
276
277	len = snprint_alias(buf, PAGE_SIZE, id, "\n");
278
279	return len > PAGE_SIZE ? PAGE_SIZE : len;
280}
281
282static ssize_t
283online_show (struct device *dev, struct device_attribute *attr, char *buf)
284{
285	struct ccw_device *cdev = to_ccwdev(dev);
286
287	return sprintf(buf, cdev->online ? "1\n" : "0\n");
288}
289
290int ccw_device_is_orphan(struct ccw_device *cdev)
291{
292	return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
293}
294
295static void ccw_device_unregister(struct ccw_device *cdev)
296{
297	if (device_is_registered(&cdev->dev)) {
298		/* Undo device_add(). */
299		device_del(&cdev->dev);
300	}
301	if (cdev->private->flags.initialized) {
302		cdev->private->flags.initialized = 0;
303		/* Release reference from device_initialize(). */
304		put_device(&cdev->dev);
305	}
306}
307
308static void io_subchannel_quiesce(struct subchannel *);
309
310/**
311 * ccw_device_set_offline() - disable a ccw device for I/O
312 * @cdev: target ccw device
313 *
314 * This function calls the driver's set_offline() function for @cdev, if
315 * given, and then disables @cdev.
316 * Returns:
317 *   %0 on success and a negative error value on failure.
318 * Context:
319 *  enabled, ccw device lock not held
320 */
321int ccw_device_set_offline(struct ccw_device *cdev)
322{
323	struct subchannel *sch;
324	int ret, state;
325
326	if (!cdev)
327		return -ENODEV;
328	if (!cdev->online || !cdev->drv)
329		return -EINVAL;
330
331	if (cdev->drv->set_offline) {
332		ret = cdev->drv->set_offline(cdev);
333		if (ret != 0)
334			return ret;
335	}
336	spin_lock_irq(cdev->ccwlock);
337	sch = to_subchannel(cdev->dev.parent);
338	cdev->online = 0;
339	/* Wait until a final state or DISCONNECTED is reached */
340	while (!dev_fsm_final_state(cdev) &&
341	       cdev->private->state != DEV_STATE_DISCONNECTED) {
342		spin_unlock_irq(cdev->ccwlock);
343		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
344			   cdev->private->state == DEV_STATE_DISCONNECTED));
345		spin_lock_irq(cdev->ccwlock);
346	}
347	do {
348		ret = ccw_device_offline(cdev);
349		if (!ret)
350			break;
351		CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device "
352			      "0.%x.%04x\n", ret, cdev->private->dev_id.ssid,
353			      cdev->private->dev_id.devno);
354		if (ret != -EBUSY)
355			goto error;
356		state = cdev->private->state;
357		spin_unlock_irq(cdev->ccwlock);
358		io_subchannel_quiesce(sch);
359		spin_lock_irq(cdev->ccwlock);
360		cdev->private->state = state;
361	} while (ret == -EBUSY);
362	spin_unlock_irq(cdev->ccwlock);
363	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
364		   cdev->private->state == DEV_STATE_DISCONNECTED));
365	/* Inform the user if set offline failed. */
366	if (cdev->private->state == DEV_STATE_BOXED) {
367		pr_warning("%s: The device entered boxed state while "
368			   "being set offline\n", dev_name(&cdev->dev));
369	} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
370		pr_warning("%s: The device stopped operating while "
371			   "being set offline\n", dev_name(&cdev->dev));
372	}
373	/* Give up reference from ccw_device_set_online(). */
374	put_device(&cdev->dev);
375	return 0;
376
377error:
378	cdev->private->state = DEV_STATE_OFFLINE;
379	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
380	spin_unlock_irq(cdev->ccwlock);
381	/* Give up reference from ccw_device_set_online(). */
382	put_device(&cdev->dev);
383	return -ENODEV;
384}
385
386/**
387 * ccw_device_set_online() - enable a ccw device for I/O
388 * @cdev: target ccw device
389 *
390 * This function first enables @cdev and then calls the driver's set_online()
391 * function for @cdev, if given. If set_online() returns an error, @cdev is
392 * disabled again.
393 * Returns:
394 *   %0 on success and a negative error value on failure.
395 * Context:
396 *  enabled, ccw device lock not held
397 */
398int ccw_device_set_online(struct ccw_device *cdev)
399{
400	int ret;
401	int ret2;
402
403	if (!cdev)
404		return -ENODEV;
405	if (cdev->online || !cdev->drv)
406		return -EINVAL;
407	/* Hold on to an extra reference while device is online. */
408	if (!get_device(&cdev->dev))
409		return -ENODEV;
410
411	spin_lock_irq(cdev->ccwlock);
412	ret = ccw_device_online(cdev);
413	spin_unlock_irq(cdev->ccwlock);
414	if (ret == 0)
415		wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
416	else {
417		CIO_MSG_EVENT(0, "ccw_device_online returned %d, "
418			      "device 0.%x.%04x\n",
419			      ret, cdev->private->dev_id.ssid,
420			      cdev->private->dev_id.devno);
421		/* Give up online reference since onlining failed. */
422		put_device(&cdev->dev);
423		return ret;
424	}
425	spin_lock_irq(cdev->ccwlock);
426	/* Check if online processing was successful */
427	if ((cdev->private->state != DEV_STATE_ONLINE) &&
428	    (cdev->private->state != DEV_STATE_W4SENSE)) {
429		spin_unlock_irq(cdev->ccwlock);
430		/* Inform the user that set online failed. */
431		if (cdev->private->state == DEV_STATE_BOXED) {
432			pr_warning("%s: Setting the device online failed "
433				   "because it is boxed\n",
434				   dev_name(&cdev->dev));
435		} else if (cdev->private->state == DEV_STATE_NOT_OPER) {
436			pr_warning("%s: Setting the device online failed "
437				   "because it is not operational\n",
438				   dev_name(&cdev->dev));
439		}
440		/* Give up online reference since onlining failed. */
441		put_device(&cdev->dev);
442		return -ENODEV;
443	}
444	spin_unlock_irq(cdev->ccwlock);
445	if (cdev->drv->set_online)
446		ret = cdev->drv->set_online(cdev);
447	if (ret)
448		goto rollback;
449
450	spin_lock_irq(cdev->ccwlock);
451	cdev->online = 1;
452	spin_unlock_irq(cdev->ccwlock);
453	return 0;
454
455rollback:
456	spin_lock_irq(cdev->ccwlock);
457	/* Wait until a final state or DISCONNECTED is reached */
458	while (!dev_fsm_final_state(cdev) &&
459	       cdev->private->state != DEV_STATE_DISCONNECTED) {
460		spin_unlock_irq(cdev->ccwlock);
461		wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
462			   cdev->private->state == DEV_STATE_DISCONNECTED));
463		spin_lock_irq(cdev->ccwlock);
464	}
465	ret2 = ccw_device_offline(cdev);
466	if (ret2)
467		goto error;
468	spin_unlock_irq(cdev->ccwlock);
469	wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) ||
470		   cdev->private->state == DEV_STATE_DISCONNECTED));
471	/* Give up online reference since onlining failed. */
472	put_device(&cdev->dev);
473	return ret;
474
475error:
476	CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, "
477		      "device 0.%x.%04x\n",
478		      ret2, cdev->private->dev_id.ssid,
479		      cdev->private->dev_id.devno);
480	cdev->private->state = DEV_STATE_OFFLINE;
481	spin_unlock_irq(cdev->ccwlock);
482	/* Give up online reference since onlining failed. */
483	put_device(&cdev->dev);
484	return ret;
485}
486
487static int online_store_handle_offline(struct ccw_device *cdev)
488{
489	if (cdev->private->state == DEV_STATE_DISCONNECTED) {
490		spin_lock_irq(cdev->ccwlock);
491		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
492		spin_unlock_irq(cdev->ccwlock);
493		return 0;
494	}
495	if (cdev->drv && cdev->drv->set_offline)
496		return ccw_device_set_offline(cdev);
497	return -EINVAL;
498}
499
500static int online_store_recog_and_online(struct ccw_device *cdev)
501{
502	/* Do device recognition, if needed. */
503	if (cdev->private->state == DEV_STATE_BOXED) {
504		spin_lock_irq(cdev->ccwlock);
505		ccw_device_recognition(cdev);
506		spin_unlock_irq(cdev->ccwlock);
507		wait_event(cdev->private->wait_q,
508			   cdev->private->flags.recog_done);
509		if (cdev->private->state != DEV_STATE_OFFLINE)
510			/* recognition failed */
511			return -EAGAIN;
512	}
513	if (cdev->drv && cdev->drv->set_online)
514		return ccw_device_set_online(cdev);
515	return -EINVAL;
516}
517
518static int online_store_handle_online(struct ccw_device *cdev, int force)
519{
520	int ret;
521
522	ret = online_store_recog_and_online(cdev);
523	if (ret && !force)
524		return ret;
525	if (force && cdev->private->state == DEV_STATE_BOXED) {
526		ret = ccw_device_stlck(cdev);
527		if (ret)
528			return ret;
529		if (cdev->id.cu_type == 0)
530			cdev->private->state = DEV_STATE_NOT_OPER;
531		ret = online_store_recog_and_online(cdev);
532		if (ret)
533			return ret;
534	}
535	return 0;
536}
537
538static ssize_t online_store (struct device *dev, struct device_attribute *attr,
539			     const char *buf, size_t count)
540{
541	struct ccw_device *cdev = to_ccwdev(dev);
542	int force, ret;
543	unsigned long i;
544
545	/* Prevent conflict between multiple on-/offline processing requests. */
546	if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
547		return -EAGAIN;
548	/* Prevent conflict between internal I/Os and on-/offline processing. */
549	if (!dev_fsm_final_state(cdev) &&
550	    cdev->private->state != DEV_STATE_DISCONNECTED) {
551		ret = -EAGAIN;
552		goto out;
553	}
554	/* Prevent conflict between pending work and on-/offline processing.*/
555	if (work_pending(&cdev->private->todo_work)) {
556		ret = -EAGAIN;
557		goto out;
558	}
559	if (!strncmp(buf, "force\n", count)) {
560		force = 1;
561		i = 1;
562		ret = 0;
563	} else {
564		force = 0;
565		ret = kstrtoul(buf, 16, &i);
566	}
567	if (ret)
568		goto out;
569
570	device_lock(dev);
571	switch (i) {
572	case 0:
573		ret = online_store_handle_offline(cdev);
574		break;
575	case 1:
576		ret = online_store_handle_online(cdev, force);
577		break;
578	default:
579		ret = -EINVAL;
580	}
581	device_unlock(dev);
582
583out:
584	atomic_set(&cdev->private->onoff, 0);
585	return (ret < 0) ? ret : count;
586}
587
588static ssize_t
589available_show (struct device *dev, struct device_attribute *attr, char *buf)
590{
591	struct ccw_device *cdev = to_ccwdev(dev);
592	struct subchannel *sch;
593
594	if (ccw_device_is_orphan(cdev))
595		return sprintf(buf, "no device\n");
596	switch (cdev->private->state) {
597	case DEV_STATE_BOXED:
598		return sprintf(buf, "boxed\n");
599	case DEV_STATE_DISCONNECTED:
600	case DEV_STATE_DISCONNECTED_SENSE_ID:
601	case DEV_STATE_NOT_OPER:
602		sch = to_subchannel(dev->parent);
603		if (!sch->lpm)
604			return sprintf(buf, "no path\n");
605		else
606			return sprintf(buf, "no device\n");
607	default:
608		/* All other states considered fine. */
609		return sprintf(buf, "good\n");
610	}
611}
612
613static ssize_t
614initiate_logging(struct device *dev, struct device_attribute *attr,
615		 const char *buf, size_t count)
616{
617	struct subchannel *sch = to_subchannel(dev);
618	int rc;
619
620	rc = chsc_siosl(sch->schid);
621	if (rc < 0) {
622		pr_warning("Logging for subchannel 0.%x.%04x failed with "
623			   "errno=%d\n",
624			   sch->schid.ssid, sch->schid.sch_no, rc);
625		return rc;
626	}
627	pr_notice("Logging for subchannel 0.%x.%04x was triggered\n",
628		  sch->schid.ssid, sch->schid.sch_no);
629	return count;
630}
631
632static ssize_t vpm_show(struct device *dev, struct device_attribute *attr,
633			char *buf)
634{
635	struct subchannel *sch = to_subchannel(dev);
636
637	return sprintf(buf, "%02x\n", sch->vpm);
638}
639
640static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
641static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
642static DEVICE_ATTR(devtype, 0444, devtype_show, NULL);
643static DEVICE_ATTR(cutype, 0444, cutype_show, NULL);
644static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
645static DEVICE_ATTR(online, 0644, online_show, online_store);
646static DEVICE_ATTR(availability, 0444, available_show, NULL);
647static DEVICE_ATTR(logging, 0200, NULL, initiate_logging);
648static DEVICE_ATTR(vpm, 0444, vpm_show, NULL);
649
650static struct attribute *io_subchannel_attrs[] = {
651	&dev_attr_chpids.attr,
652	&dev_attr_pimpampom.attr,
653	&dev_attr_logging.attr,
654	&dev_attr_vpm.attr,
655	NULL,
656};
657
658static struct attribute_group io_subchannel_attr_group = {
659	.attrs = io_subchannel_attrs,
660};
661
662static struct attribute * ccwdev_attrs[] = {
663	&dev_attr_devtype.attr,
664	&dev_attr_cutype.attr,
665	&dev_attr_modalias.attr,
666	&dev_attr_online.attr,
667	&dev_attr_cmb_enable.attr,
668	&dev_attr_availability.attr,
669	NULL,
670};
671
672static struct attribute_group ccwdev_attr_group = {
673	.attrs = ccwdev_attrs,
674};
675
676static const struct attribute_group *ccwdev_attr_groups[] = {
677	&ccwdev_attr_group,
678	NULL,
679};
680
681static int ccw_device_add(struct ccw_device *cdev)
682{
683	struct device *dev = &cdev->dev;
684
685	dev->bus = &ccw_bus_type;
686	return device_add(dev);
687}
688
689static int match_dev_id(struct device *dev, void *data)
690{
691	struct ccw_device *cdev = to_ccwdev(dev);
692	struct ccw_dev_id *dev_id = data;
693
694	return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
695}
696
697/**
698 * get_ccwdev_by_dev_id() - obtain device from a ccw device id
699 * @dev_id: id of the device to be searched
700 *
701 * This function searches all devices attached to the ccw bus for a device
702 * matching @dev_id.
703 * Returns:
704 *  If a device is found its reference count is increased and returned;
705 *  else %NULL is returned.
706 */
707struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id)
708{
709	struct device *dev;
710
711	dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id);
712
713	return dev ? to_ccwdev(dev) : NULL;
714}
715EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id);
716
717static void ccw_device_do_unbind_bind(struct ccw_device *cdev)
718{
719	int ret;
720
721	if (device_is_registered(&cdev->dev)) {
722		device_release_driver(&cdev->dev);
723		ret = device_attach(&cdev->dev);
724		WARN_ON(ret == -ENODEV);
725	}
726}
727
728static void
729ccw_device_release(struct device *dev)
730{
731	struct ccw_device *cdev;
732
733	cdev = to_ccwdev(dev);
734	/* Release reference of parent subchannel. */
735	put_device(cdev->dev.parent);
736	kfree(cdev->private);
737	kfree(cdev);
738}
739
740static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
741{
742	struct ccw_device *cdev;
743
744	cdev  = kzalloc(sizeof(*cdev), GFP_KERNEL);
745	if (cdev) {
746		cdev->private = kzalloc(sizeof(struct ccw_device_private),
747					GFP_KERNEL | GFP_DMA);
748		if (cdev->private)
749			return cdev;
750	}
751	kfree(cdev);
752	return ERR_PTR(-ENOMEM);
753}
754
755static void ccw_device_todo(struct work_struct *work);
756
757static int io_subchannel_initialize_dev(struct subchannel *sch,
758					struct ccw_device *cdev)
759{
760	struct ccw_device_private *priv = cdev->private;
761	int ret;
762
763	priv->cdev = cdev;
764	priv->int_class = IRQIO_CIO;
765	priv->state = DEV_STATE_NOT_OPER;
766	priv->dev_id.devno = sch->schib.pmcw.dev;
767	priv->dev_id.ssid = sch->schid.ssid;
768	priv->schid = sch->schid;
769
770	INIT_WORK(&priv->todo_work, ccw_device_todo);
771	INIT_LIST_HEAD(&priv->cmb_list);
772	init_waitqueue_head(&priv->wait_q);
773	init_timer(&priv->timer);
774
775	atomic_set(&priv->onoff, 0);
776	cdev->ccwlock = sch->lock;
777	cdev->dev.parent = &sch->dev;
778	cdev->dev.release = ccw_device_release;
779	cdev->dev.groups = ccwdev_attr_groups;
780	/* Do first half of device_register. */
781	device_initialize(&cdev->dev);
782	ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
783			   cdev->private->dev_id.devno);
784	if (ret)
785		goto out_put;
786	if (!get_device(&sch->dev)) {
787		ret = -ENODEV;
788		goto out_put;
789	}
790	priv->flags.initialized = 1;
791	spin_lock_irq(sch->lock);
792	sch_set_cdev(sch, cdev);
793	spin_unlock_irq(sch->lock);
794	return 0;
795
796out_put:
797	/* Release reference from device_initialize(). */
798	put_device(&cdev->dev);
799	return ret;
800}
801
802static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
803{
804	struct ccw_device *cdev;
805	int ret;
806
807	cdev = io_subchannel_allocate_dev(sch);
808	if (!IS_ERR(cdev)) {
809		ret = io_subchannel_initialize_dev(sch, cdev);
810		if (ret)
811			cdev = ERR_PTR(ret);
812	}
813	return cdev;
814}
815
816static void io_subchannel_recog(struct ccw_device *, struct subchannel *);
817
818static void sch_create_and_recog_new_device(struct subchannel *sch)
819{
820	struct ccw_device *cdev;
821
822	/* Need to allocate a new ccw device. */
823	cdev = io_subchannel_create_ccwdev(sch);
824	if (IS_ERR(cdev)) {
825		/* OK, we did everything we could... */
826		css_sch_device_unregister(sch);
827		return;
828	}
829	/* Start recognition for the new ccw device. */
830	io_subchannel_recog(cdev, sch);
831}
832
833/*
834 * Register recognized device.
835 */
836static void io_subchannel_register(struct ccw_device *cdev)
837{
838	struct subchannel *sch;
839	int ret, adjust_init_count = 1;
840	unsigned long flags;
841
842	sch = to_subchannel(cdev->dev.parent);
843	/*
844	 * Check if subchannel is still registered. It may have become
845	 * unregistered if a machine check hit us after finishing
846	 * device recognition but before the register work could be
847	 * queued.
848	 */
849	if (!device_is_registered(&sch->dev))
850		goto out_err;
851	css_update_ssd_info(sch);
852	/*
853	 * io_subchannel_register() will also be called after device
854	 * recognition has been done for a boxed device (which will already
855	 * be registered). We need to reprobe since we may now have sense id
856	 * information.
857	 */
858	if (device_is_registered(&cdev->dev)) {
859		if (!cdev->drv) {
860			ret = device_reprobe(&cdev->dev);
861			if (ret)
862				/* We can't do much here. */
863				CIO_MSG_EVENT(0, "device_reprobe() returned"
864					      " %d for 0.%x.%04x\n", ret,
865					      cdev->private->dev_id.ssid,
866					      cdev->private->dev_id.devno);
867		}
868		adjust_init_count = 0;
869		goto out;
870	}
871	/*
872	 * Now we know this subchannel will stay, we can throw
873	 * our delayed uevent.
874	 */
875	dev_set_uevent_suppress(&sch->dev, 0);
876	kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
877	/* make it known to the system */
878	ret = ccw_device_add(cdev);
879	if (ret) {
880		CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
881			      cdev->private->dev_id.ssid,
882			      cdev->private->dev_id.devno, ret);
883		spin_lock_irqsave(sch->lock, flags);
884		sch_set_cdev(sch, NULL);
885		spin_unlock_irqrestore(sch->lock, flags);
886		/* Release initial device reference. */
887		put_device(&cdev->dev);
888		goto out_err;
889	}
890out:
891	cdev->private->flags.recog_done = 1;
892	wake_up(&cdev->private->wait_q);
893out_err:
894	if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count))
895		wake_up(&ccw_device_init_wq);
896}
897
898static void ccw_device_call_sch_unregister(struct ccw_device *cdev)
899{
900	struct subchannel *sch;
901
902	/* Get subchannel reference for local processing. */
903	if (!get_device(cdev->dev.parent))
904		return;
905	sch = to_subchannel(cdev->dev.parent);
906	css_sch_device_unregister(sch);
907	/* Release subchannel reference for local processing. */
908	put_device(&sch->dev);
909}
910
911/*
912 * subchannel recognition done. Called from the state machine.
913 */
914void
915io_subchannel_recog_done(struct ccw_device *cdev)
916{
917	if (css_init_done == 0) {
918		cdev->private->flags.recog_done = 1;
919		return;
920	}
921	switch (cdev->private->state) {
922	case DEV_STATE_BOXED:
923		/* Device did not respond in time. */
924	case DEV_STATE_NOT_OPER:
925		cdev->private->flags.recog_done = 1;
926		/* Remove device found not operational. */
927		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
928		if (atomic_dec_and_test(&ccw_device_init_count))
929			wake_up(&ccw_device_init_wq);
930		break;
931	case DEV_STATE_OFFLINE:
932		/*
933		 * We can't register the device in interrupt context so
934		 * we schedule a work item.
935		 */
936		ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER);
937		break;
938	}
939}
940
941static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
942{
943	/* Increase counter of devices currently in recognition. */
944	atomic_inc(&ccw_device_init_count);
945
946	/* Start async. device sensing. */
947	spin_lock_irq(sch->lock);
948	ccw_device_recognition(cdev);
949	spin_unlock_irq(sch->lock);
950}
951
952static int ccw_device_move_to_sch(struct ccw_device *cdev,
953				  struct subchannel *sch)
954{
955	struct subchannel *old_sch;
956	int rc, old_enabled = 0;
957
958	old_sch = to_subchannel(cdev->dev.parent);
959	/* Obtain child reference for new parent. */
960	if (!get_device(&sch->dev))
961		return -ENODEV;
962
963	if (!sch_is_pseudo_sch(old_sch)) {
964		spin_lock_irq(old_sch->lock);
965		old_enabled = old_sch->schib.pmcw.ena;
966		rc = 0;
967		if (old_enabled)
968			rc = cio_disable_subchannel(old_sch);
969		spin_unlock_irq(old_sch->lock);
970		if (rc == -EBUSY) {
971			/* Release child reference for new parent. */
972			put_device(&sch->dev);
973			return rc;
974		}
975	}
976
977	mutex_lock(&sch->reg_mutex);
978	rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
979	mutex_unlock(&sch->reg_mutex);
980	if (rc) {
981		CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n",
982			      cdev->private->dev_id.ssid,
983			      cdev->private->dev_id.devno, sch->schid.ssid,
984			      sch->schib.pmcw.dev, rc);
985		if (old_enabled) {
986			/* Try to reenable the old subchannel. */
987			spin_lock_irq(old_sch->lock);
988			cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch);
989			spin_unlock_irq(old_sch->lock);
990		}
991		/* Release child reference for new parent. */
992		put_device(&sch->dev);
993		return rc;
994	}
995	/* Clean up old subchannel. */
996	if (!sch_is_pseudo_sch(old_sch)) {
997		spin_lock_irq(old_sch->lock);
998		sch_set_cdev(old_sch, NULL);
999		spin_unlock_irq(old_sch->lock);
1000		css_schedule_eval(old_sch->schid);
1001	}
1002	/* Release child reference for old parent. */
1003	put_device(&old_sch->dev);
1004	/* Initialize new subchannel. */
1005	spin_lock_irq(sch->lock);
1006	cdev->private->schid = sch->schid;
1007	cdev->ccwlock = sch->lock;
1008	if (!sch_is_pseudo_sch(sch))
1009		sch_set_cdev(sch, cdev);
1010	spin_unlock_irq(sch->lock);
1011	if (!sch_is_pseudo_sch(sch))
1012		css_update_ssd_info(sch);
1013	return 0;
1014}
1015
1016static int ccw_device_move_to_orph(struct ccw_device *cdev)
1017{
1018	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1019	struct channel_subsystem *css = to_css(sch->dev.parent);
1020
1021	return ccw_device_move_to_sch(cdev, css->pseudo_subchannel);
1022}
1023
1024static void io_subchannel_irq(struct subchannel *sch)
1025{
1026	struct ccw_device *cdev;
1027
1028	cdev = sch_get_cdev(sch);
1029
1030	CIO_TRACE_EVENT(6, "IRQ");
1031	CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1032	if (cdev)
1033		dev_fsm_event(cdev, DEV_EVENT_INTERRUPT);
1034	else
1035		inc_irq_stat(IRQIO_CIO);
1036}
1037
1038void io_subchannel_init_config(struct subchannel *sch)
1039{
1040	memset(&sch->config, 0, sizeof(sch->config));
1041	sch->config.csense = 1;
1042}
1043
1044static void io_subchannel_init_fields(struct subchannel *sch)
1045{
1046	if (cio_is_console(sch->schid))
1047		sch->opm = 0xff;
1048	else
1049		sch->opm = chp_get_sch_opm(sch);
1050	sch->lpm = sch->schib.pmcw.pam & sch->opm;
1051	sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1052
1053	CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X"
1054		      " - PIM = %02X, PAM = %02X, POM = %02X\n",
1055		      sch->schib.pmcw.dev, sch->schid.ssid,
1056		      sch->schid.sch_no, sch->schib.pmcw.pim,
1057		      sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1058
1059	io_subchannel_init_config(sch);
1060}
1061
1062/*
1063 * Note: We always return 0 so that we bind to the device even on error.
1064 * This is needed so that our remove function is called on unregister.
1065 */
1066static int io_subchannel_probe(struct subchannel *sch)
1067{
1068	struct io_subchannel_private *io_priv;
1069	struct ccw_device *cdev;
1070	int rc;
1071
1072	if (cio_is_console(sch->schid)) {
1073		rc = sysfs_create_group(&sch->dev.kobj,
1074					&io_subchannel_attr_group);
1075		if (rc)
1076			CIO_MSG_EVENT(0, "Failed to create io subchannel "
1077				      "attributes for subchannel "
1078				      "0.%x.%04x (rc=%d)\n",
1079				      sch->schid.ssid, sch->schid.sch_no, rc);
1080		/*
1081		 * The console subchannel already has an associated ccw_device.
1082		 * Throw the delayed uevent for the subchannel, register
1083		 * the ccw_device and exit.
1084		 */
1085		dev_set_uevent_suppress(&sch->dev, 0);
1086		kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1087		cdev = sch_get_cdev(sch);
1088		rc = ccw_device_add(cdev);
1089		if (rc) {
1090			/* Release online reference. */
1091			put_device(&cdev->dev);
1092			goto out_schedule;
1093		}
1094		if (atomic_dec_and_test(&ccw_device_init_count))
1095			wake_up(&ccw_device_init_wq);
1096		return 0;
1097	}
1098	io_subchannel_init_fields(sch);
1099	rc = cio_commit_config(sch);
1100	if (rc)
1101		goto out_schedule;
1102	rc = sysfs_create_group(&sch->dev.kobj,
1103				&io_subchannel_attr_group);
1104	if (rc)
1105		goto out_schedule;
1106	/* Allocate I/O subchannel private data. */
1107	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1108	if (!io_priv)
1109		goto out_schedule;
1110
1111	set_io_private(sch, io_priv);
1112	css_schedule_eval(sch->schid);
1113	return 0;
1114
1115out_schedule:
1116	spin_lock_irq(sch->lock);
1117	css_sched_sch_todo(sch, SCH_TODO_UNREG);
1118	spin_unlock_irq(sch->lock);
1119	return 0;
1120}
1121
1122static int
1123io_subchannel_remove (struct subchannel *sch)
1124{
1125	struct io_subchannel_private *io_priv = to_io_private(sch);
1126	struct ccw_device *cdev;
1127
1128	cdev = sch_get_cdev(sch);
1129	if (!cdev)
1130		goto out_free;
1131	io_subchannel_quiesce(sch);
1132	/* Set ccw device to not operational and drop reference. */
1133	spin_lock_irq(cdev->ccwlock);
1134	sch_set_cdev(sch, NULL);
1135	set_io_private(sch, NULL);
1136	cdev->private->state = DEV_STATE_NOT_OPER;
1137	spin_unlock_irq(cdev->ccwlock);
1138	ccw_device_unregister(cdev);
1139out_free:
1140	kfree(io_priv);
1141	sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1142	return 0;
1143}
1144
1145static void io_subchannel_verify(struct subchannel *sch)
1146{
1147	struct ccw_device *cdev;
1148
1149	cdev = sch_get_cdev(sch);
1150	if (cdev)
1151		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1152}
1153
1154static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1155{
1156	struct ccw_device *cdev;
1157
1158	cdev = sch_get_cdev(sch);
1159	if (!cdev)
1160		return;
1161	if (cio_update_schib(sch))
1162		goto err;
1163	/* Check for I/O on path. */
1164	if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1165		goto out;
1166	if (cdev->private->state == DEV_STATE_ONLINE) {
1167		ccw_device_kill_io(cdev);
1168		goto out;
1169	}
1170	if (cio_clear(sch))
1171		goto err;
1172out:
1173	/* Trigger path verification. */
1174	dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1175	return;
1176
1177err:
1178	dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1179}
1180
1181static int io_subchannel_chp_event(struct subchannel *sch,
1182				   struct chp_link *link, int event)
1183{
1184	struct ccw_device *cdev = sch_get_cdev(sch);
1185	int mask;
1186
1187	mask = chp_ssd_get_mask(&sch->ssd_info, link);
1188	if (!mask)
1189		return 0;
1190	switch (event) {
1191	case CHP_VARY_OFF:
1192		sch->opm &= ~mask;
1193		sch->lpm &= ~mask;
1194		if (cdev)
1195			cdev->private->path_gone_mask |= mask;
1196		io_subchannel_terminate_path(sch, mask);
1197		break;
1198	case CHP_VARY_ON:
1199		sch->opm |= mask;
1200		sch->lpm |= mask;
1201		if (cdev)
1202			cdev->private->path_new_mask |= mask;
1203		io_subchannel_verify(sch);
1204		break;
1205	case CHP_OFFLINE:
1206		if (cio_update_schib(sch))
1207			return -ENODEV;
1208		if (cdev)
1209			cdev->private->path_gone_mask |= mask;
1210		io_subchannel_terminate_path(sch, mask);
1211		break;
1212	case CHP_ONLINE:
1213		if (cio_update_schib(sch))
1214			return -ENODEV;
1215		sch->lpm |= mask & sch->opm;
1216		if (cdev)
1217			cdev->private->path_new_mask |= mask;
1218		io_subchannel_verify(sch);
1219		break;
1220	}
1221	return 0;
1222}
1223
1224static void io_subchannel_quiesce(struct subchannel *sch)
1225{
1226	struct ccw_device *cdev;
1227	int ret;
1228
1229	spin_lock_irq(sch->lock);
1230	cdev = sch_get_cdev(sch);
1231	if (cio_is_console(sch->schid))
1232		goto out_unlock;
1233	if (!sch->schib.pmcw.ena)
1234		goto out_unlock;
1235	ret = cio_disable_subchannel(sch);
1236	if (ret != -EBUSY)
1237		goto out_unlock;
1238	if (cdev->handler)
1239		cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1240	while (ret == -EBUSY) {
1241		cdev->private->state = DEV_STATE_QUIESCE;
1242		cdev->private->iretry = 255;
1243		ret = ccw_device_cancel_halt_clear(cdev);
1244		if (ret == -EBUSY) {
1245			ccw_device_set_timeout(cdev, HZ/10);
1246			spin_unlock_irq(sch->lock);
1247			wait_event(cdev->private->wait_q,
1248				   cdev->private->state != DEV_STATE_QUIESCE);
1249			spin_lock_irq(sch->lock);
1250		}
1251		ret = cio_disable_subchannel(sch);
1252	}
1253out_unlock:
1254	spin_unlock_irq(sch->lock);
1255}
1256
1257static void io_subchannel_shutdown(struct subchannel *sch)
1258{
1259	io_subchannel_quiesce(sch);
1260}
1261
1262static int device_is_disconnected(struct ccw_device *cdev)
1263{
1264	if (!cdev)
1265		return 0;
1266	return (cdev->private->state == DEV_STATE_DISCONNECTED ||
1267		cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID);
1268}
1269
1270static int recovery_check(struct device *dev, void *data)
1271{
1272	struct ccw_device *cdev = to_ccwdev(dev);
1273	int *redo = data;
1274
1275	spin_lock_irq(cdev->ccwlock);
1276	switch (cdev->private->state) {
1277	case DEV_STATE_DISCONNECTED:
1278		CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1279			      cdev->private->dev_id.ssid,
1280			      cdev->private->dev_id.devno);
1281		dev_fsm_event(cdev, DEV_EVENT_VERIFY);
1282		*redo = 1;
1283		break;
1284	case DEV_STATE_DISCONNECTED_SENSE_ID:
1285		*redo = 1;
1286		break;
1287	}
1288	spin_unlock_irq(cdev->ccwlock);
1289
1290	return 0;
1291}
1292
1293static void recovery_work_func(struct work_struct *unused)
1294{
1295	int redo = 0;
1296
1297	bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check);
1298	if (redo) {
1299		spin_lock_irq(&recovery_lock);
1300		if (!timer_pending(&recovery_timer)) {
1301			if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1)
1302				recovery_phase++;
1303			mod_timer(&recovery_timer, jiffies +
1304				  recovery_delay[recovery_phase] * HZ);
1305		}
1306		spin_unlock_irq(&recovery_lock);
1307	} else
1308		CIO_MSG_EVENT(4, "recovery: end\n");
1309}
1310
1311static DECLARE_WORK(recovery_work, recovery_work_func);
1312
1313static void recovery_func(unsigned long data)
1314{
1315	/*
1316	 * We can't do our recovery in softirq context and it's not
1317	 * performance critical, so we schedule it.
1318	 */
1319	schedule_work(&recovery_work);
1320}
1321
1322static void ccw_device_schedule_recovery(void)
1323{
1324	unsigned long flags;
1325
1326	CIO_MSG_EVENT(4, "recovery: schedule\n");
1327	spin_lock_irqsave(&recovery_lock, flags);
1328	if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1329		recovery_phase = 0;
1330		mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ);
1331	}
1332	spin_unlock_irqrestore(&recovery_lock, flags);
1333}
1334
1335static int purge_fn(struct device *dev, void *data)
1336{
1337	struct ccw_device *cdev = to_ccwdev(dev);
1338	struct ccw_dev_id *id = &cdev->private->dev_id;
1339
1340	spin_lock_irq(cdev->ccwlock);
1341	if (is_blacklisted(id->ssid, id->devno) &&
1342	    (cdev->private->state == DEV_STATE_OFFLINE) &&
1343	    (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1344		CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1345			      id->devno);
1346		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1347		atomic_set(&cdev->private->onoff, 0);
1348	}
1349	spin_unlock_irq(cdev->ccwlock);
1350	/* Abort loop in case of pending signal. */
1351	if (signal_pending(current))
1352		return -EINTR;
1353
1354	return 0;
1355}
1356
1357/**
1358 * ccw_purge_blacklisted - purge unused, blacklisted devices
1359 *
1360 * Unregister all ccw devices that are offline and on the blacklist.
1361 */
1362int ccw_purge_blacklisted(void)
1363{
1364	CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n");
1365	bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn);
1366	return 0;
1367}
1368
1369void ccw_device_set_disconnected(struct ccw_device *cdev)
1370{
1371	if (!cdev)
1372		return;
1373	ccw_device_set_timeout(cdev, 0);
1374	cdev->private->flags.fake_irb = 0;
1375	cdev->private->state = DEV_STATE_DISCONNECTED;
1376	if (cdev->online)
1377		ccw_device_schedule_recovery();
1378}
1379
1380void ccw_device_set_notoper(struct ccw_device *cdev)
1381{
1382	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1383
1384	CIO_TRACE_EVENT(2, "notoper");
1385	CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1386	ccw_device_set_timeout(cdev, 0);
1387	cio_disable_subchannel(sch);
1388	cdev->private->state = DEV_STATE_NOT_OPER;
1389}
1390
1391enum io_sch_action {
1392	IO_SCH_UNREG,
1393	IO_SCH_ORPH_UNREG,
1394	IO_SCH_ATTACH,
1395	IO_SCH_UNREG_ATTACH,
1396	IO_SCH_ORPH_ATTACH,
1397	IO_SCH_REPROBE,
1398	IO_SCH_VERIFY,
1399	IO_SCH_DISC,
1400	IO_SCH_NOP,
1401};
1402
1403static enum io_sch_action sch_get_action(struct subchannel *sch)
1404{
1405	struct ccw_device *cdev;
1406
1407	cdev = sch_get_cdev(sch);
1408	if (cio_update_schib(sch)) {
1409		/* Not operational. */
1410		if (!cdev)
1411			return IO_SCH_UNREG;
1412		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1413			return IO_SCH_UNREG;
1414		return IO_SCH_ORPH_UNREG;
1415	}
1416	/* Operational. */
1417	if (!cdev)
1418		return IO_SCH_ATTACH;
1419	if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1420		if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1421			return IO_SCH_UNREG_ATTACH;
1422		return IO_SCH_ORPH_ATTACH;
1423	}
1424	if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1425		if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1426			return IO_SCH_UNREG;
1427		return IO_SCH_DISC;
1428	}
1429	if (device_is_disconnected(cdev))
1430		return IO_SCH_REPROBE;
1431	if (cdev->online && !cdev->private->flags.resuming)
1432		return IO_SCH_VERIFY;
1433	if (cdev->private->state == DEV_STATE_NOT_OPER)
1434		return IO_SCH_UNREG_ATTACH;
1435	return IO_SCH_NOP;
1436}
1437
1438/**
1439 * io_subchannel_sch_event - process subchannel event
1440 * @sch: subchannel
1441 * @process: non-zero if function is called in process context
1442 *
1443 * An unspecified event occurred for this subchannel. Adjust data according
1444 * to the current operational state of the subchannel and device. Return
1445 * zero when the event has been handled sufficiently or -EAGAIN when this
1446 * function should be called again in process context.
1447 */
1448static int io_subchannel_sch_event(struct subchannel *sch, int process)
1449{
1450	unsigned long flags;
1451	struct ccw_device *cdev;
1452	struct ccw_dev_id dev_id;
1453	enum io_sch_action action;
1454	int rc = -EAGAIN;
1455
1456	spin_lock_irqsave(sch->lock, flags);
1457	if (!device_is_registered(&sch->dev))
1458		goto out_unlock;
1459	if (work_pending(&sch->todo_work))
1460		goto out_unlock;
1461	cdev = sch_get_cdev(sch);
1462	if (cdev && work_pending(&cdev->private->todo_work))
1463		goto out_unlock;
1464	action = sch_get_action(sch);
1465	CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1466		      sch->schid.ssid, sch->schid.sch_no, process,
1467		      action);
1468	/* Perform immediate actions while holding the lock. */
1469	switch (action) {
1470	case IO_SCH_REPROBE:
1471		/* Trigger device recognition. */
1472		ccw_device_trigger_reprobe(cdev);
1473		rc = 0;
1474		goto out_unlock;
1475	case IO_SCH_VERIFY:
1476		/* Trigger path verification. */
1477		io_subchannel_verify(sch);
1478		rc = 0;
1479		goto out_unlock;
1480	case IO_SCH_DISC:
1481		ccw_device_set_disconnected(cdev);
1482		rc = 0;
1483		goto out_unlock;
1484	case IO_SCH_ORPH_UNREG:
1485	case IO_SCH_ORPH_ATTACH:
1486		ccw_device_set_disconnected(cdev);
1487		break;
1488	case IO_SCH_UNREG_ATTACH:
1489	case IO_SCH_UNREG:
1490		if (!cdev)
1491			break;
1492		if (cdev->private->state == DEV_STATE_SENSE_ID) {
1493			/*
1494			 * Note: delayed work triggered by this event
1495			 * and repeated calls to sch_event are synchronized
1496			 * by the above check for work_pending(cdev).
1497			 */
1498			dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1499		} else
1500			ccw_device_set_notoper(cdev);
1501		break;
1502	case IO_SCH_NOP:
1503		rc = 0;
1504		goto out_unlock;
1505	default:
1506		break;
1507	}
1508	spin_unlock_irqrestore(sch->lock, flags);
1509	/* All other actions require process context. */
1510	if (!process)
1511		goto out;
1512	/* Handle attached ccw device. */
1513	switch (action) {
1514	case IO_SCH_ORPH_UNREG:
1515	case IO_SCH_ORPH_ATTACH:
1516		/* Move ccw device to orphanage. */
1517		rc = ccw_device_move_to_orph(cdev);
1518		if (rc)
1519			goto out;
1520		break;
1521	case IO_SCH_UNREG_ATTACH:
1522		spin_lock_irqsave(sch->lock, flags);
1523		if (cdev->private->flags.resuming) {
1524			/* Device will be handled later. */
1525			rc = 0;
1526			goto out_unlock;
1527		}
1528		sch_set_cdev(sch, NULL);
1529		spin_unlock_irqrestore(sch->lock, flags);
1530		/* Unregister ccw device. */
1531		ccw_device_unregister(cdev);
1532		break;
1533	default:
1534		break;
1535	}
1536	/* Handle subchannel. */
1537	switch (action) {
1538	case IO_SCH_ORPH_UNREG:
1539	case IO_SCH_UNREG:
1540		if (!cdev || !cdev->private->flags.resuming)
1541			css_sch_device_unregister(sch);
1542		break;
1543	case IO_SCH_ORPH_ATTACH:
1544	case IO_SCH_UNREG_ATTACH:
1545	case IO_SCH_ATTACH:
1546		dev_id.ssid = sch->schid.ssid;
1547		dev_id.devno = sch->schib.pmcw.dev;
1548		cdev = get_ccwdev_by_dev_id(&dev_id);
1549		if (!cdev) {
1550			sch_create_and_recog_new_device(sch);
1551			break;
1552		}
1553		rc = ccw_device_move_to_sch(cdev, sch);
1554		if (rc) {
1555			/* Release reference from get_ccwdev_by_dev_id() */
1556			put_device(&cdev->dev);
1557			goto out;
1558		}
1559		spin_lock_irqsave(sch->lock, flags);
1560		ccw_device_trigger_reprobe(cdev);
1561		spin_unlock_irqrestore(sch->lock, flags);
1562		/* Release reference from get_ccwdev_by_dev_id() */
1563		put_device(&cdev->dev);
1564		break;
1565	default:
1566		break;
1567	}
1568	return 0;
1569
1570out_unlock:
1571	spin_unlock_irqrestore(sch->lock, flags);
1572out:
1573	return rc;
1574}
1575
1576static void ccw_device_set_int_class(struct ccw_device *cdev)
1577{
1578	struct ccw_driver *cdrv = cdev->drv;
1579
1580	/* Note: we interpret class 0 in this context as an uninitialized
1581	 * field since it translates to a non-I/O interrupt class. */
1582	if (cdrv->int_class != 0)
1583		cdev->private->int_class = cdrv->int_class;
1584	else
1585		cdev->private->int_class = IRQIO_CIO;
1586}
1587
1588#ifdef CONFIG_CCW_CONSOLE
1589int __init ccw_device_enable_console(struct ccw_device *cdev)
1590{
1591	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1592	int rc;
1593
1594	if (!cdev->drv || !cdev->handler)
1595		return -EINVAL;
1596
1597	io_subchannel_init_fields(sch);
1598	rc = cio_commit_config(sch);
1599	if (rc)
1600		return rc;
1601	sch->driver = &io_subchannel_driver;
1602	io_subchannel_recog(cdev, sch);
1603	/* Now wait for the async. recognition to come to an end. */
1604	spin_lock_irq(cdev->ccwlock);
1605	while (!dev_fsm_final_state(cdev))
1606		ccw_device_wait_idle(cdev);
1607
1608	/* Hold on to an extra reference while device is online. */
1609	get_device(&cdev->dev);
1610	rc = ccw_device_online(cdev);
1611	if (rc)
1612		goto out_unlock;
1613
1614	while (!dev_fsm_final_state(cdev))
1615		ccw_device_wait_idle(cdev);
1616
1617	if (cdev->private->state == DEV_STATE_ONLINE)
1618		cdev->online = 1;
1619	else
1620		rc = -EIO;
1621out_unlock:
1622	spin_unlock_irq(cdev->ccwlock);
1623	if (rc) /* Give up online reference since onlining failed. */
1624		put_device(&cdev->dev);
1625	return rc;
1626}
1627
1628struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
1629{
1630	struct io_subchannel_private *io_priv;
1631	struct ccw_device *cdev;
1632	struct subchannel *sch;
1633
1634	sch = cio_probe_console();
1635	if (IS_ERR(sch))
1636		return ERR_CAST(sch);
1637
1638	io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1639	if (!io_priv) {
1640		put_device(&sch->dev);
1641		return ERR_PTR(-ENOMEM);
1642	}
1643	set_io_private(sch, io_priv);
1644	cdev = io_subchannel_create_ccwdev(sch);
1645	if (IS_ERR(cdev)) {
1646		put_device(&sch->dev);
1647		kfree(io_priv);
1648		return cdev;
1649	}
1650	cdev->drv = drv;
1651	ccw_device_set_int_class(cdev);
1652	return cdev;
1653}
1654
1655void __init ccw_device_destroy_console(struct ccw_device *cdev)
1656{
1657	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1658	struct io_subchannel_private *io_priv = to_io_private(sch);
1659
1660	set_io_private(sch, NULL);
1661	put_device(&sch->dev);
1662	put_device(&cdev->dev);
1663	kfree(io_priv);
1664}
1665
1666/**
1667 * ccw_device_wait_idle() - busy wait for device to become idle
1668 * @cdev: ccw device
1669 *
1670 * Poll until activity control is zero, that is, no function or data
1671 * transfer is pending/active.
1672 * Called with device lock being held.
1673 */
1674void ccw_device_wait_idle(struct ccw_device *cdev)
1675{
1676	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1677
1678	while (1) {
1679		cio_tsch(sch);
1680		if (sch->schib.scsw.cmd.actl == 0)
1681			break;
1682		udelay_simple(100);
1683	}
1684}
1685
1686static int ccw_device_pm_restore(struct device *dev);
1687
1688int ccw_device_force_console(struct ccw_device *cdev)
1689{
1690	return ccw_device_pm_restore(&cdev->dev);
1691}
1692EXPORT_SYMBOL_GPL(ccw_device_force_console);
1693#endif
1694
1695/*
1696 * get ccw_device matching the busid, but only if owned by cdrv
1697 */
1698static int
1699__ccwdev_check_busid(struct device *dev, void *id)
1700{
1701	char *bus_id;
1702
1703	bus_id = id;
1704
1705	return (strcmp(bus_id, dev_name(dev)) == 0);
1706}
1707
1708
1709/**
1710 * get_ccwdev_by_busid() - obtain device from a bus id
1711 * @cdrv: driver the device is owned by
1712 * @bus_id: bus id of the device to be searched
1713 *
1714 * This function searches all devices owned by @cdrv for a device with a bus
1715 * id matching @bus_id.
1716 * Returns:
1717 *  If a match is found, its reference count of the found device is increased
1718 *  and it is returned; else %NULL is returned.
1719 */
1720struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv,
1721				       const char *bus_id)
1722{
1723	struct device *dev;
1724
1725	dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id,
1726				 __ccwdev_check_busid);
1727
1728	return dev ? to_ccwdev(dev) : NULL;
1729}
1730
1731/************************** device driver handling ************************/
1732
1733/* This is the implementation of the ccw_driver class. The probe, remove
1734 * and release methods are initially very similar to the device_driver
1735 * implementations, with the difference that they have ccw_device
1736 * arguments.
1737 *
1738 * A ccw driver also contains the information that is needed for
1739 * device matching.
1740 */
1741static int
1742ccw_device_probe (struct device *dev)
1743{
1744	struct ccw_device *cdev = to_ccwdev(dev);
1745	struct ccw_driver *cdrv = to_ccwdrv(dev->driver);
1746	int ret;
1747
1748	cdev->drv = cdrv; /* to let the driver call _set_online */
1749	ccw_device_set_int_class(cdev);
1750	ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV;
1751	if (ret) {
1752		cdev->drv = NULL;
1753		cdev->private->int_class = IRQIO_CIO;
1754		return ret;
1755	}
1756
1757	return 0;
1758}
1759
1760static int ccw_device_remove(struct device *dev)
1761{
1762	struct ccw_device *cdev = to_ccwdev(dev);
1763	struct ccw_driver *cdrv = cdev->drv;
1764	int ret;
1765
1766	if (cdrv->remove)
1767		cdrv->remove(cdev);
1768
1769	spin_lock_irq(cdev->ccwlock);
1770	if (cdev->online) {
1771		cdev->online = 0;
1772		ret = ccw_device_offline(cdev);
1773		spin_unlock_irq(cdev->ccwlock);
1774		if (ret == 0)
1775			wait_event(cdev->private->wait_q,
1776				   dev_fsm_final_state(cdev));
1777		else
1778			CIO_MSG_EVENT(0, "ccw_device_offline returned %d, "
1779				      "device 0.%x.%04x\n",
1780				      ret, cdev->private->dev_id.ssid,
1781				      cdev->private->dev_id.devno);
1782		/* Give up reference obtained in ccw_device_set_online(). */
1783		put_device(&cdev->dev);
1784		spin_lock_irq(cdev->ccwlock);
1785	}
1786	ccw_device_set_timeout(cdev, 0);
1787	cdev->drv = NULL;
1788	cdev->private->int_class = IRQIO_CIO;
1789	spin_unlock_irq(cdev->ccwlock);
1790	return 0;
1791}
1792
1793static void ccw_device_shutdown(struct device *dev)
1794{
1795	struct ccw_device *cdev;
1796
1797	cdev = to_ccwdev(dev);
1798	if (cdev->drv && cdev->drv->shutdown)
1799		cdev->drv->shutdown(cdev);
1800	disable_cmf(cdev);
1801}
1802
1803static int ccw_device_pm_prepare(struct device *dev)
1804{
1805	struct ccw_device *cdev = to_ccwdev(dev);
1806
1807	if (work_pending(&cdev->private->todo_work))
1808		return -EAGAIN;
1809	/* Fail while device is being set online/offline. */
1810	if (atomic_read(&cdev->private->onoff))
1811		return -EAGAIN;
1812
1813	if (cdev->online && cdev->drv && cdev->drv->prepare)
1814		return cdev->drv->prepare(cdev);
1815
1816	return 0;
1817}
1818
1819static void ccw_device_pm_complete(struct device *dev)
1820{
1821	struct ccw_device *cdev = to_ccwdev(dev);
1822
1823	if (cdev->online && cdev->drv && cdev->drv->complete)
1824		cdev->drv->complete(cdev);
1825}
1826
1827static int ccw_device_pm_freeze(struct device *dev)
1828{
1829	struct ccw_device *cdev = to_ccwdev(dev);
1830	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1831	int ret, cm_enabled;
1832
1833	/* Fail suspend while device is in transistional state. */
1834	if (!dev_fsm_final_state(cdev))
1835		return -EAGAIN;
1836	if (!cdev->online)
1837		return 0;
1838	if (cdev->drv && cdev->drv->freeze) {
1839		ret = cdev->drv->freeze(cdev);
1840		if (ret)
1841			return ret;
1842	}
1843
1844	spin_lock_irq(sch->lock);
1845	cm_enabled = cdev->private->cmb != NULL;
1846	spin_unlock_irq(sch->lock);
1847	if (cm_enabled) {
1848		/* Don't have the css write on memory. */
1849		ret = ccw_set_cmf(cdev, 0);
1850		if (ret)
1851			return ret;
1852	}
1853	/* From here on, disallow device driver I/O. */
1854	spin_lock_irq(sch->lock);
1855	ret = cio_disable_subchannel(sch);
1856	spin_unlock_irq(sch->lock);
1857
1858	return ret;
1859}
1860
1861static int ccw_device_pm_thaw(struct device *dev)
1862{
1863	struct ccw_device *cdev = to_ccwdev(dev);
1864	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1865	int ret, cm_enabled;
1866
1867	if (!cdev->online)
1868		return 0;
1869
1870	spin_lock_irq(sch->lock);
1871	/* Allow device driver I/O again. */
1872	ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1873	cm_enabled = cdev->private->cmb != NULL;
1874	spin_unlock_irq(sch->lock);
1875	if (ret)
1876		return ret;
1877
1878	if (cm_enabled) {
1879		ret = ccw_set_cmf(cdev, 1);
1880		if (ret)
1881			return ret;
1882	}
1883
1884	if (cdev->drv && cdev->drv->thaw)
1885		ret = cdev->drv->thaw(cdev);
1886
1887	return ret;
1888}
1889
1890static void __ccw_device_pm_restore(struct ccw_device *cdev)
1891{
1892	struct subchannel *sch = to_subchannel(cdev->dev.parent);
1893
1894	spin_lock_irq(sch->lock);
1895	if (cio_is_console(sch->schid)) {
1896		cio_enable_subchannel(sch, (u32)(addr_t)sch);
1897		goto out_unlock;
1898	}
1899	/*
1900	 * While we were sleeping, devices may have gone or become
1901	 * available again. Kick re-detection.
1902	 */
1903	cdev->private->flags.resuming = 1;
1904	cdev->private->path_new_mask = LPM_ANYPATH;
1905	css_sched_sch_todo(sch, SCH_TODO_EVAL);
1906	spin_unlock_irq(sch->lock);
1907	css_wait_for_slow_path();
1908
1909	/* cdev may have been moved to a different subchannel. */
1910	sch = to_subchannel(cdev->dev.parent);
1911	spin_lock_irq(sch->lock);
1912	if (cdev->private->state != DEV_STATE_ONLINE &&
1913	    cdev->private->state != DEV_STATE_OFFLINE)
1914		goto out_unlock;
1915
1916	ccw_device_recognition(cdev);
1917	spin_unlock_irq(sch->lock);
1918	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1919		   cdev->private->state == DEV_STATE_DISCONNECTED);
1920	spin_lock_irq(sch->lock);
1921
1922out_unlock:
1923	cdev->private->flags.resuming = 0;
1924	spin_unlock_irq(sch->lock);
1925}
1926
1927static int resume_handle_boxed(struct ccw_device *cdev)
1928{
1929	cdev->private->state = DEV_STATE_BOXED;
1930	if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1931		return 0;
1932	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1933	return -ENODEV;
1934}
1935
1936static int resume_handle_disc(struct ccw_device *cdev)
1937{
1938	cdev->private->state = DEV_STATE_DISCONNECTED;
1939	if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1940		return 0;
1941	ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1942	return -ENODEV;
1943}
1944
1945static int ccw_device_pm_restore(struct device *dev)
1946{
1947	struct ccw_device *cdev = to_ccwdev(dev);
1948	struct subchannel *sch;
1949	int ret = 0;
1950
1951	__ccw_device_pm_restore(cdev);
1952	sch = to_subchannel(cdev->dev.parent);
1953	spin_lock_irq(sch->lock);
1954	if (cio_is_console(sch->schid))
1955		goto out_restore;
1956
1957	/* check recognition results */
1958	switch (cdev->private->state) {
1959	case DEV_STATE_OFFLINE:
1960	case DEV_STATE_ONLINE:
1961		cdev->private->flags.donotify = 0;
1962		break;
1963	case DEV_STATE_BOXED:
1964		ret = resume_handle_boxed(cdev);
1965		if (ret)
1966			goto out_unlock;
1967		goto out_restore;
1968	default:
1969		ret = resume_handle_disc(cdev);
1970		if (ret)
1971			goto out_unlock;
1972		goto out_restore;
1973	}
1974	/* check if the device type has changed */
1975	if (!ccw_device_test_sense_data(cdev)) {
1976		ccw_device_update_sense_data(cdev);
1977		ccw_device_sched_todo(cdev, CDEV_TODO_REBIND);
1978		ret = -ENODEV;
1979		goto out_unlock;
1980	}
1981	if (!cdev->online)
1982		goto out_unlock;
1983
1984	if (ccw_device_online(cdev)) {
1985		ret = resume_handle_disc(cdev);
1986		if (ret)
1987			goto out_unlock;
1988		goto out_restore;
1989	}
1990	spin_unlock_irq(sch->lock);
1991	wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1992	spin_lock_irq(sch->lock);
1993
1994	if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1995		ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1996		ret = -ENODEV;
1997		goto out_unlock;
1998	}
1999
2000	/* reenable cmf, if needed */
2001	if (cdev->private->cmb) {
2002		spin_unlock_irq(sch->lock);
2003		ret = ccw_set_cmf(cdev, 1);
2004		spin_lock_irq(sch->lock);
2005		if (ret) {
2006			CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
2007				      "(rc=%d)\n", cdev->private->dev_id.ssid,
2008				      cdev->private->dev_id.devno, ret);
2009			ret = 0;
2010		}
2011	}
2012
2013out_restore:
2014	spin_unlock_irq(sch->lock);
2015	if (cdev->online && cdev->drv && cdev->drv->restore)
2016		ret = cdev->drv->restore(cdev);
2017	return ret;
2018
2019out_unlock:
2020	spin_unlock_irq(sch->lock);
2021	return ret;
2022}
2023
2024static const struct dev_pm_ops ccw_pm_ops = {
2025	.prepare = ccw_device_pm_prepare,
2026	.complete = ccw_device_pm_complete,
2027	.freeze = ccw_device_pm_freeze,
2028	.thaw = ccw_device_pm_thaw,
2029	.restore = ccw_device_pm_restore,
2030};
2031
2032static struct bus_type ccw_bus_type = {
2033	.name   = "ccw",
2034	.match  = ccw_bus_match,
2035	.uevent = ccw_uevent,
2036	.probe  = ccw_device_probe,
2037	.remove = ccw_device_remove,
2038	.shutdown = ccw_device_shutdown,
2039	.pm = &ccw_pm_ops,
2040};
2041
2042/**
2043 * ccw_driver_register() - register a ccw driver
2044 * @cdriver: driver to be registered
2045 *
2046 * This function is mainly a wrapper around driver_register().
2047 * Returns:
2048 *   %0 on success and a negative error value on failure.
2049 */
2050int ccw_driver_register(struct ccw_driver *cdriver)
2051{
2052	struct device_driver *drv = &cdriver->driver;
2053
2054	drv->bus = &ccw_bus_type;
2055
2056	return driver_register(drv);
2057}
2058
2059/**
2060 * ccw_driver_unregister() - deregister a ccw driver
2061 * @cdriver: driver to be deregistered
2062 *
2063 * This function is mainly a wrapper around driver_unregister().
2064 */
2065void ccw_driver_unregister(struct ccw_driver *cdriver)
2066{
2067	driver_unregister(&cdriver->driver);
2068}
2069
2070static void ccw_device_todo(struct work_struct *work)
2071{
2072	struct ccw_device_private *priv;
2073	struct ccw_device *cdev;
2074	struct subchannel *sch;
2075	enum cdev_todo todo;
2076
2077	priv = container_of(work, struct ccw_device_private, todo_work);
2078	cdev = priv->cdev;
2079	sch = to_subchannel(cdev->dev.parent);
2080	/* Find out todo. */
2081	spin_lock_irq(cdev->ccwlock);
2082	todo = priv->todo;
2083	priv->todo = CDEV_TODO_NOTHING;
2084	CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n",
2085		      priv->dev_id.ssid, priv->dev_id.devno, todo);
2086	spin_unlock_irq(cdev->ccwlock);
2087	/* Perform todo. */
2088	switch (todo) {
2089	case CDEV_TODO_ENABLE_CMF:
2090		cmf_reenable(cdev);
2091		break;
2092	case CDEV_TODO_REBIND:
2093		ccw_device_do_unbind_bind(cdev);
2094		break;
2095	case CDEV_TODO_REGISTER:
2096		io_subchannel_register(cdev);
2097		break;
2098	case CDEV_TODO_UNREG_EVAL:
2099		if (!sch_is_pseudo_sch(sch))
2100			css_schedule_eval(sch->schid);
2101		/* fall-through */
2102	case CDEV_TODO_UNREG:
2103		if (sch_is_pseudo_sch(sch))
2104			ccw_device_unregister(cdev);
2105		else
2106			ccw_device_call_sch_unregister(cdev);
2107		break;
2108	default:
2109		break;
2110	}
2111	/* Release workqueue ref. */
2112	put_device(&cdev->dev);
2113}
2114
2115/**
2116 * ccw_device_sched_todo - schedule ccw device operation
2117 * @cdev: ccw device
2118 * @todo: todo
2119 *
2120 * Schedule the operation identified by @todo to be performed on the slow path
2121 * workqueue. Do nothing if another operation with higher priority is already
2122 * scheduled. Needs to be called with ccwdev lock held.
2123 */
2124void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2125{
2126	CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n",
2127		      cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
2128		      todo);
2129	if (cdev->private->todo >= todo)
2130		return;
2131	cdev->private->todo = todo;
2132	/* Get workqueue ref. */
2133	if (!get_device(&cdev->dev))
2134		return;
2135	if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2136		/* Already queued, release workqueue ref. */
2137		put_device(&cdev->dev);
2138	}
2139}
2140
2141/**
2142 * ccw_device_siosl() - initiate logging
2143 * @cdev: ccw device
2144 *
2145 * This function is used to invoke model-dependent logging within the channel
2146 * subsystem.
2147 */
2148int ccw_device_siosl(struct ccw_device *cdev)
2149{
2150	struct subchannel *sch = to_subchannel(cdev->dev.parent);
2151
2152	return chsc_siosl(sch->schid);
2153}
2154EXPORT_SYMBOL_GPL(ccw_device_siosl);
2155
2156MODULE_LICENSE("GPL");
2157EXPORT_SYMBOL(ccw_device_set_online);
2158EXPORT_SYMBOL(ccw_device_set_offline);
2159EXPORT_SYMBOL(ccw_driver_register);
2160EXPORT_SYMBOL(ccw_driver_unregister);
2161EXPORT_SYMBOL(get_ccwdev_by_busid);
2162