1/*
2 * core function to access sclp interface
3 *
4 * Copyright IBM Corp. 1999, 2009
5 *
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */
9
10#include <linux/kernel_stat.h>
11#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/timer.h>
16#include <linux/reboot.h>
17#include <linux/jiffies.h>
18#include <linux/init.h>
19#include <linux/suspend.h>
20#include <linux/completion.h>
21#include <linux/platform_device.h>
22#include <asm/types.h>
23#include <asm/irq.h>
24
25#include "sclp.h"
26
27#define SCLP_HEADER		"sclp: "
28
29/* Lock to protect internal data consistency. */
30static DEFINE_SPINLOCK(sclp_lock);
31
32/* Mask of events that we can send to the sclp interface. */
33static sccb_mask_t sclp_receive_mask;
34
35/* Mask of events that we can receive from the sclp interface. */
36static sccb_mask_t sclp_send_mask;
37
38/* List of registered event listeners and senders. */
39static struct list_head sclp_reg_list;
40
41/* List of queued requests. */
42static struct list_head sclp_req_queue;
43
44/* Data for read and and init requests. */
45static struct sclp_req sclp_read_req;
46static struct sclp_req sclp_init_req;
47static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49
50/* Suspend request */
51static DECLARE_COMPLETION(sclp_request_queue_flushed);
52
53/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
54int sclp_console_pages = SCLP_CONSOLE_PAGES;
55/* Flag to indicate if buffer pages are dropped on buffer full condition */
56int sclp_console_drop = 0;
57/* Number of times the console dropped buffer pages */
58unsigned long sclp_console_full;
59
60static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
61{
62	complete(&sclp_request_queue_flushed);
63}
64
65static int __init sclp_setup_console_pages(char *str)
66{
67	int pages, rc;
68
69	rc = kstrtoint(str, 0, &pages);
70	if (!rc && pages >= SCLP_CONSOLE_PAGES)
71		sclp_console_pages = pages;
72	return 1;
73}
74
75__setup("sclp_con_pages=", sclp_setup_console_pages);
76
77static int __init sclp_setup_console_drop(char *str)
78{
79	int drop, rc;
80
81	rc = kstrtoint(str, 0, &drop);
82	if (!rc && drop)
83		sclp_console_drop = 1;
84	return 1;
85}
86
87__setup("sclp_con_drop=", sclp_setup_console_drop);
88
89static struct sclp_req sclp_suspend_req;
90
91/* Timer for request retries. */
92static struct timer_list sclp_request_timer;
93
94/* Timer for queued requests. */
95static struct timer_list sclp_queue_timer;
96
97/* Internal state: is the driver initialized? */
98static volatile enum sclp_init_state_t {
99	sclp_init_state_uninitialized,
100	sclp_init_state_initializing,
101	sclp_init_state_initialized
102} sclp_init_state = sclp_init_state_uninitialized;
103
104/* Internal state: is a request active at the sclp? */
105static volatile enum sclp_running_state_t {
106	sclp_running_state_idle,
107	sclp_running_state_running,
108	sclp_running_state_reset_pending
109} sclp_running_state = sclp_running_state_idle;
110
111/* Internal state: is a read request pending? */
112static volatile enum sclp_reading_state_t {
113	sclp_reading_state_idle,
114	sclp_reading_state_reading
115} sclp_reading_state = sclp_reading_state_idle;
116
117/* Internal state: is the driver currently serving requests? */
118static volatile enum sclp_activation_state_t {
119	sclp_activation_state_active,
120	sclp_activation_state_deactivating,
121	sclp_activation_state_inactive,
122	sclp_activation_state_activating
123} sclp_activation_state = sclp_activation_state_active;
124
125/* Internal state: is an init mask request pending? */
126static volatile enum sclp_mask_state_t {
127	sclp_mask_state_idle,
128	sclp_mask_state_initializing
129} sclp_mask_state = sclp_mask_state_idle;
130
131/* Internal state: is the driver suspended? */
132static enum sclp_suspend_state_t {
133	sclp_suspend_state_running,
134	sclp_suspend_state_suspended,
135} sclp_suspend_state = sclp_suspend_state_running;
136
137/* Maximum retry counts */
138#define SCLP_INIT_RETRY		3
139#define SCLP_MASK_RETRY		3
140
141/* Timeout intervals in seconds.*/
142#define SCLP_BUSY_INTERVAL	10
143#define SCLP_RETRY_INTERVAL	30
144
145static void sclp_process_queue(void);
146static void __sclp_make_read_req(void);
147static int sclp_init_mask(int calculate);
148static int sclp_init(void);
149
150/* Perform service call. Return 0 on success, non-zero otherwise. */
151int
152sclp_service_call(sclp_cmdw_t command, void *sccb)
153{
154	int cc = 4; /* Initialize for program check handling */
155
156	asm volatile(
157		"0:	.insn	rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
158		"1:	ipm	%0\n"
159		"	srl	%0,28\n"
160		"2:\n"
161		EX_TABLE(0b, 2b)
162		EX_TABLE(1b, 2b)
163		: "+&d" (cc) : "d" (command), "a" (__pa(sccb))
164		: "cc", "memory");
165	if (cc == 4)
166		return -EINVAL;
167	if (cc == 3)
168		return -EIO;
169	if (cc == 2)
170		return -EBUSY;
171	return 0;
172}
173
174
175static void
176__sclp_queue_read_req(void)
177{
178	if (sclp_reading_state == sclp_reading_state_idle) {
179		sclp_reading_state = sclp_reading_state_reading;
180		__sclp_make_read_req();
181		/* Add request to head of queue */
182		list_add(&sclp_read_req.list, &sclp_req_queue);
183	}
184}
185
186/* Set up request retry timer. Called while sclp_lock is locked. */
187static inline void
188__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
189			 unsigned long data)
190{
191	del_timer(&sclp_request_timer);
192	sclp_request_timer.function = function;
193	sclp_request_timer.data = data;
194	sclp_request_timer.expires = jiffies + time;
195	add_timer(&sclp_request_timer);
196}
197
198/* Request timeout handler. Restart the request queue. If DATA is non-zero,
199 * force restart of running request. */
200static void
201sclp_request_timeout(unsigned long data)
202{
203	unsigned long flags;
204
205	spin_lock_irqsave(&sclp_lock, flags);
206	if (data) {
207		if (sclp_running_state == sclp_running_state_running) {
208			/* Break running state and queue NOP read event request
209			 * to get a defined interface state. */
210			__sclp_queue_read_req();
211			sclp_running_state = sclp_running_state_idle;
212		}
213	} else {
214		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
215					 sclp_request_timeout, 0);
216	}
217	spin_unlock_irqrestore(&sclp_lock, flags);
218	sclp_process_queue();
219}
220
221/*
222 * Returns the expire value in jiffies of the next pending request timeout,
223 * if any. Needs to be called with sclp_lock.
224 */
225static unsigned long __sclp_req_queue_find_next_timeout(void)
226{
227	unsigned long expires_next = 0;
228	struct sclp_req *req;
229
230	list_for_each_entry(req, &sclp_req_queue, list) {
231		if (!req->queue_expires)
232			continue;
233		if (!expires_next ||
234		   (time_before(req->queue_expires, expires_next)))
235				expires_next = req->queue_expires;
236	}
237	return expires_next;
238}
239
240/*
241 * Returns expired request, if any, and removes it from the list.
242 */
243static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
244{
245	unsigned long flags, now;
246	struct sclp_req *req;
247
248	spin_lock_irqsave(&sclp_lock, flags);
249	now = jiffies;
250	/* Don't need list_for_each_safe because we break out after list_del */
251	list_for_each_entry(req, &sclp_req_queue, list) {
252		if (!req->queue_expires)
253			continue;
254		if (time_before_eq(req->queue_expires, now)) {
255			if (req->status == SCLP_REQ_QUEUED) {
256				req->status = SCLP_REQ_QUEUED_TIMEOUT;
257				list_del(&req->list);
258				goto out;
259			}
260		}
261	}
262	req = NULL;
263out:
264	spin_unlock_irqrestore(&sclp_lock, flags);
265	return req;
266}
267
268/*
269 * Timeout handler for queued requests. Removes request from list and
270 * invokes callback. This timer can be set per request in situations where
271 * waiting too long would be harmful to the system, e.g. during SE reboot.
272 */
273static void sclp_req_queue_timeout(unsigned long data)
274{
275	unsigned long flags, expires_next;
276	struct sclp_req *req;
277
278	do {
279		req = __sclp_req_queue_remove_expired_req();
280		if (req && req->callback)
281			req->callback(req, req->callback_data);
282	} while (req);
283
284	spin_lock_irqsave(&sclp_lock, flags);
285	expires_next = __sclp_req_queue_find_next_timeout();
286	if (expires_next)
287		mod_timer(&sclp_queue_timer, expires_next);
288	spin_unlock_irqrestore(&sclp_lock, flags);
289}
290
291/* Try to start a request. Return zero if the request was successfully
292 * started or if it will be started at a later time. Return non-zero otherwise.
293 * Called while sclp_lock is locked. */
294static int
295__sclp_start_request(struct sclp_req *req)
296{
297	int rc;
298
299	if (sclp_running_state != sclp_running_state_idle)
300		return 0;
301	del_timer(&sclp_request_timer);
302	rc = sclp_service_call(req->command, req->sccb);
303	req->start_count++;
304
305	if (rc == 0) {
306		/* Successfully started request */
307		req->status = SCLP_REQ_RUNNING;
308		sclp_running_state = sclp_running_state_running;
309		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
310					 sclp_request_timeout, 1);
311		return 0;
312	} else if (rc == -EBUSY) {
313		/* Try again later */
314		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
315					 sclp_request_timeout, 0);
316		return 0;
317	}
318	/* Request failed */
319	req->status = SCLP_REQ_FAILED;
320	return rc;
321}
322
323/* Try to start queued requests. */
324static void
325sclp_process_queue(void)
326{
327	struct sclp_req *req;
328	int rc;
329	unsigned long flags;
330
331	spin_lock_irqsave(&sclp_lock, flags);
332	if (sclp_running_state != sclp_running_state_idle) {
333		spin_unlock_irqrestore(&sclp_lock, flags);
334		return;
335	}
336	del_timer(&sclp_request_timer);
337	while (!list_empty(&sclp_req_queue)) {
338		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
339		if (!req->sccb)
340			goto do_post;
341		rc = __sclp_start_request(req);
342		if (rc == 0)
343			break;
344		/* Request failed */
345		if (req->start_count > 1) {
346			/* Cannot abort already submitted request - could still
347			 * be active at the SCLP */
348			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
349						 sclp_request_timeout, 0);
350			break;
351		}
352do_post:
353		/* Post-processing for aborted request */
354		list_del(&req->list);
355		if (req->callback) {
356			spin_unlock_irqrestore(&sclp_lock, flags);
357			req->callback(req, req->callback_data);
358			spin_lock_irqsave(&sclp_lock, flags);
359		}
360	}
361	spin_unlock_irqrestore(&sclp_lock, flags);
362}
363
364static int __sclp_can_add_request(struct sclp_req *req)
365{
366	if (req == &sclp_suspend_req || req == &sclp_init_req)
367		return 1;
368	if (sclp_suspend_state != sclp_suspend_state_running)
369		return 0;
370	if (sclp_init_state != sclp_init_state_initialized)
371		return 0;
372	if (sclp_activation_state != sclp_activation_state_active)
373		return 0;
374	return 1;
375}
376
377/* Queue a new request. Return zero on success, non-zero otherwise. */
378int
379sclp_add_request(struct sclp_req *req)
380{
381	unsigned long flags;
382	int rc;
383
384	spin_lock_irqsave(&sclp_lock, flags);
385	if (!__sclp_can_add_request(req)) {
386		spin_unlock_irqrestore(&sclp_lock, flags);
387		return -EIO;
388	}
389	req->status = SCLP_REQ_QUEUED;
390	req->start_count = 0;
391	list_add_tail(&req->list, &sclp_req_queue);
392	rc = 0;
393	if (req->queue_timeout) {
394		req->queue_expires = jiffies + req->queue_timeout * HZ;
395		if (!timer_pending(&sclp_queue_timer) ||
396		    time_after(sclp_queue_timer.expires, req->queue_expires))
397			mod_timer(&sclp_queue_timer, req->queue_expires);
398	} else
399		req->queue_expires = 0;
400	/* Start if request is first in list */
401	if (sclp_running_state == sclp_running_state_idle &&
402	    req->list.prev == &sclp_req_queue) {
403		if (!req->sccb) {
404			list_del(&req->list);
405			rc = -ENODATA;
406			goto out;
407		}
408		rc = __sclp_start_request(req);
409		if (rc)
410			list_del(&req->list);
411	}
412out:
413	spin_unlock_irqrestore(&sclp_lock, flags);
414	return rc;
415}
416
417EXPORT_SYMBOL(sclp_add_request);
418
419/* Dispatch events found in request buffer to registered listeners. Return 0
420 * if all events were dispatched, non-zero otherwise. */
421static int
422sclp_dispatch_evbufs(struct sccb_header *sccb)
423{
424	unsigned long flags;
425	struct evbuf_header *evbuf;
426	struct list_head *l;
427	struct sclp_register *reg;
428	int offset;
429	int rc;
430
431	spin_lock_irqsave(&sclp_lock, flags);
432	rc = 0;
433	for (offset = sizeof(struct sccb_header); offset < sccb->length;
434	     offset += evbuf->length) {
435		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
436		/* Check for malformed hardware response */
437		if (evbuf->length == 0)
438			break;
439		/* Search for event handler */
440		reg = NULL;
441		list_for_each(l, &sclp_reg_list) {
442			reg = list_entry(l, struct sclp_register, list);
443			if (reg->receive_mask & (1 << (32 - evbuf->type)))
444				break;
445			else
446				reg = NULL;
447		}
448		if (reg && reg->receiver_fn) {
449			spin_unlock_irqrestore(&sclp_lock, flags);
450			reg->receiver_fn(evbuf);
451			spin_lock_irqsave(&sclp_lock, flags);
452		} else if (reg == NULL)
453			rc = -EOPNOTSUPP;
454	}
455	spin_unlock_irqrestore(&sclp_lock, flags);
456	return rc;
457}
458
459/* Read event data request callback. */
460static void
461sclp_read_cb(struct sclp_req *req, void *data)
462{
463	unsigned long flags;
464	struct sccb_header *sccb;
465
466	sccb = (struct sccb_header *) req->sccb;
467	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
468	    sccb->response_code == 0x220))
469		sclp_dispatch_evbufs(sccb);
470	spin_lock_irqsave(&sclp_lock, flags);
471	sclp_reading_state = sclp_reading_state_idle;
472	spin_unlock_irqrestore(&sclp_lock, flags);
473}
474
475/* Prepare read event data request. Called while sclp_lock is locked. */
476static void __sclp_make_read_req(void)
477{
478	struct sccb_header *sccb;
479
480	sccb = (struct sccb_header *) sclp_read_sccb;
481	clear_page(sccb);
482	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
483	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
484	sclp_read_req.status = SCLP_REQ_QUEUED;
485	sclp_read_req.start_count = 0;
486	sclp_read_req.callback = sclp_read_cb;
487	sclp_read_req.sccb = sccb;
488	sccb->length = PAGE_SIZE;
489	sccb->function_code = 0;
490	sccb->control_mask[2] = 0x80;
491}
492
493/* Search request list for request with matching sccb. Return request if found,
494 * NULL otherwise. Called while sclp_lock is locked. */
495static inline struct sclp_req *
496__sclp_find_req(u32 sccb)
497{
498	struct list_head *l;
499	struct sclp_req *req;
500
501	list_for_each(l, &sclp_req_queue) {
502		req = list_entry(l, struct sclp_req, list);
503		if (sccb == (u32) (addr_t) req->sccb)
504				return req;
505	}
506	return NULL;
507}
508
509/* Handler for external interruption. Perform request post-processing.
510 * Prepare read event data request if necessary. Start processing of next
511 * request on queue. */
512static void sclp_interrupt_handler(struct ext_code ext_code,
513				   unsigned int param32, unsigned long param64)
514{
515	struct sclp_req *req;
516	u32 finished_sccb;
517	u32 evbuf_pending;
518
519	inc_irq_stat(IRQEXT_SCP);
520	spin_lock(&sclp_lock);
521	finished_sccb = param32 & 0xfffffff8;
522	evbuf_pending = param32 & 0x3;
523	if (finished_sccb) {
524		del_timer(&sclp_request_timer);
525		sclp_running_state = sclp_running_state_reset_pending;
526		req = __sclp_find_req(finished_sccb);
527		if (req) {
528			/* Request post-processing */
529			list_del(&req->list);
530			req->status = SCLP_REQ_DONE;
531			if (req->callback) {
532				spin_unlock(&sclp_lock);
533				req->callback(req, req->callback_data);
534				spin_lock(&sclp_lock);
535			}
536		}
537		sclp_running_state = sclp_running_state_idle;
538	}
539	if (evbuf_pending &&
540	    sclp_activation_state == sclp_activation_state_active)
541		__sclp_queue_read_req();
542	spin_unlock(&sclp_lock);
543	sclp_process_queue();
544}
545
546/* Convert interval in jiffies to TOD ticks. */
547static inline u64
548sclp_tod_from_jiffies(unsigned long jiffies)
549{
550	return (u64) (jiffies / HZ) << 32;
551}
552
553/* Wait until a currently running request finished. Note: while this function
554 * is running, no timers are served on the calling CPU. */
555void
556sclp_sync_wait(void)
557{
558	unsigned long long old_tick;
559	unsigned long flags;
560	unsigned long cr0, cr0_sync;
561	u64 timeout;
562	int irq_context;
563
564	/* We'll be disabling timer interrupts, so we need a custom timeout
565	 * mechanism */
566	timeout = 0;
567	if (timer_pending(&sclp_request_timer)) {
568		/* Get timeout TOD value */
569		timeout = get_tod_clock_fast() +
570			  sclp_tod_from_jiffies(sclp_request_timer.expires -
571						jiffies);
572	}
573	local_irq_save(flags);
574	/* Prevent bottom half from executing once we force interrupts open */
575	irq_context = in_interrupt();
576	if (!irq_context)
577		local_bh_disable();
578	/* Enable service-signal interruption, disable timer interrupts */
579	old_tick = local_tick_disable();
580	trace_hardirqs_on();
581	__ctl_store(cr0, 0, 0);
582	cr0_sync = cr0;
583	cr0_sync &= 0xffff00a0;
584	cr0_sync |= 0x00000200;
585	__ctl_load(cr0_sync, 0, 0);
586	__arch_local_irq_stosm(0x01);
587	/* Loop until driver state indicates finished request */
588	while (sclp_running_state != sclp_running_state_idle) {
589		/* Check for expired request timer */
590		if (timer_pending(&sclp_request_timer) &&
591		    get_tod_clock_fast() > timeout &&
592		    del_timer(&sclp_request_timer))
593			sclp_request_timer.function(sclp_request_timer.data);
594		cpu_relax();
595	}
596	local_irq_disable();
597	__ctl_load(cr0, 0, 0);
598	if (!irq_context)
599		_local_bh_enable();
600	local_tick_enable(old_tick);
601	local_irq_restore(flags);
602}
603EXPORT_SYMBOL(sclp_sync_wait);
604
605/* Dispatch changes in send and receive mask to registered listeners. */
606static void
607sclp_dispatch_state_change(void)
608{
609	struct list_head *l;
610	struct sclp_register *reg;
611	unsigned long flags;
612	sccb_mask_t receive_mask;
613	sccb_mask_t send_mask;
614
615	do {
616		spin_lock_irqsave(&sclp_lock, flags);
617		reg = NULL;
618		list_for_each(l, &sclp_reg_list) {
619			reg = list_entry(l, struct sclp_register, list);
620			receive_mask = reg->send_mask & sclp_receive_mask;
621			send_mask = reg->receive_mask & sclp_send_mask;
622			if (reg->sclp_receive_mask != receive_mask ||
623			    reg->sclp_send_mask != send_mask) {
624				reg->sclp_receive_mask = receive_mask;
625				reg->sclp_send_mask = send_mask;
626				break;
627			} else
628				reg = NULL;
629		}
630		spin_unlock_irqrestore(&sclp_lock, flags);
631		if (reg && reg->state_change_fn)
632			reg->state_change_fn(reg);
633	} while (reg);
634}
635
636struct sclp_statechangebuf {
637	struct evbuf_header	header;
638	u8		validity_sclp_active_facility_mask : 1;
639	u8		validity_sclp_receive_mask : 1;
640	u8		validity_sclp_send_mask : 1;
641	u8		validity_read_data_function_mask : 1;
642	u16		_zeros : 12;
643	u16		mask_length;
644	u64		sclp_active_facility_mask;
645	sccb_mask_t	sclp_receive_mask;
646	sccb_mask_t	sclp_send_mask;
647	u32		read_data_function_mask;
648} __attribute__((packed));
649
650
651/* State change event callback. Inform listeners of changes. */
652static void
653sclp_state_change_cb(struct evbuf_header *evbuf)
654{
655	unsigned long flags;
656	struct sclp_statechangebuf *scbuf;
657
658	scbuf = (struct sclp_statechangebuf *) evbuf;
659	if (scbuf->mask_length != sizeof(sccb_mask_t))
660		return;
661	spin_lock_irqsave(&sclp_lock, flags);
662	if (scbuf->validity_sclp_receive_mask)
663		sclp_receive_mask = scbuf->sclp_receive_mask;
664	if (scbuf->validity_sclp_send_mask)
665		sclp_send_mask = scbuf->sclp_send_mask;
666	spin_unlock_irqrestore(&sclp_lock, flags);
667	if (scbuf->validity_sclp_active_facility_mask)
668		sclp_facilities = scbuf->sclp_active_facility_mask;
669	sclp_dispatch_state_change();
670}
671
672static struct sclp_register sclp_state_change_event = {
673	.receive_mask = EVTYP_STATECHANGE_MASK,
674	.receiver_fn = sclp_state_change_cb
675};
676
677/* Calculate receive and send mask of currently registered listeners.
678 * Called while sclp_lock is locked. */
679static inline void
680__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
681{
682	struct list_head *l;
683	struct sclp_register *t;
684
685	*receive_mask = 0;
686	*send_mask = 0;
687	list_for_each(l, &sclp_reg_list) {
688		t = list_entry(l, struct sclp_register, list);
689		*receive_mask |= t->receive_mask;
690		*send_mask |= t->send_mask;
691	}
692}
693
694/* Register event listener. Return 0 on success, non-zero otherwise. */
695int
696sclp_register(struct sclp_register *reg)
697{
698	unsigned long flags;
699	sccb_mask_t receive_mask;
700	sccb_mask_t send_mask;
701	int rc;
702
703	rc = sclp_init();
704	if (rc)
705		return rc;
706	spin_lock_irqsave(&sclp_lock, flags);
707	/* Check event mask for collisions */
708	__sclp_get_mask(&receive_mask, &send_mask);
709	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
710		spin_unlock_irqrestore(&sclp_lock, flags);
711		return -EBUSY;
712	}
713	/* Trigger initial state change callback */
714	reg->sclp_receive_mask = 0;
715	reg->sclp_send_mask = 0;
716	reg->pm_event_posted = 0;
717	list_add(&reg->list, &sclp_reg_list);
718	spin_unlock_irqrestore(&sclp_lock, flags);
719	rc = sclp_init_mask(1);
720	if (rc) {
721		spin_lock_irqsave(&sclp_lock, flags);
722		list_del(&reg->list);
723		spin_unlock_irqrestore(&sclp_lock, flags);
724	}
725	return rc;
726}
727
728EXPORT_SYMBOL(sclp_register);
729
730/* Unregister event listener. */
731void
732sclp_unregister(struct sclp_register *reg)
733{
734	unsigned long flags;
735
736	spin_lock_irqsave(&sclp_lock, flags);
737	list_del(&reg->list);
738	spin_unlock_irqrestore(&sclp_lock, flags);
739	sclp_init_mask(1);
740}
741
742EXPORT_SYMBOL(sclp_unregister);
743
744/* Remove event buffers which are marked processed. Return the number of
745 * remaining event buffers. */
746int
747sclp_remove_processed(struct sccb_header *sccb)
748{
749	struct evbuf_header *evbuf;
750	int unprocessed;
751	u16 remaining;
752
753	evbuf = (struct evbuf_header *) (sccb + 1);
754	unprocessed = 0;
755	remaining = sccb->length - sizeof(struct sccb_header);
756	while (remaining > 0) {
757		remaining -= evbuf->length;
758		if (evbuf->flags & 0x80) {
759			sccb->length -= evbuf->length;
760			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
761			       remaining);
762		} else {
763			unprocessed++;
764			evbuf = (struct evbuf_header *)
765					((addr_t) evbuf + evbuf->length);
766		}
767	}
768	return unprocessed;
769}
770
771EXPORT_SYMBOL(sclp_remove_processed);
772
773/* Prepare init mask request. Called while sclp_lock is locked. */
774static inline void
775__sclp_make_init_req(u32 receive_mask, u32 send_mask)
776{
777	struct init_sccb *sccb;
778
779	sccb = (struct init_sccb *) sclp_init_sccb;
780	clear_page(sccb);
781	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
782	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
783	sclp_init_req.status = SCLP_REQ_FILLED;
784	sclp_init_req.start_count = 0;
785	sclp_init_req.callback = NULL;
786	sclp_init_req.callback_data = NULL;
787	sclp_init_req.sccb = sccb;
788	sccb->header.length = sizeof(struct init_sccb);
789	sccb->mask_length = sizeof(sccb_mask_t);
790	sccb->receive_mask = receive_mask;
791	sccb->send_mask = send_mask;
792	sccb->sclp_receive_mask = 0;
793	sccb->sclp_send_mask = 0;
794}
795
796/* Start init mask request. If calculate is non-zero, calculate the mask as
797 * requested by registered listeners. Use zero mask otherwise. Return 0 on
798 * success, non-zero otherwise. */
799static int
800sclp_init_mask(int calculate)
801{
802	unsigned long flags;
803	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
804	sccb_mask_t receive_mask;
805	sccb_mask_t send_mask;
806	int retry;
807	int rc;
808	unsigned long wait;
809
810	spin_lock_irqsave(&sclp_lock, flags);
811	/* Check if interface is in appropriate state */
812	if (sclp_mask_state != sclp_mask_state_idle) {
813		spin_unlock_irqrestore(&sclp_lock, flags);
814		return -EBUSY;
815	}
816	if (sclp_activation_state == sclp_activation_state_inactive) {
817		spin_unlock_irqrestore(&sclp_lock, flags);
818		return -EINVAL;
819	}
820	sclp_mask_state = sclp_mask_state_initializing;
821	/* Determine mask */
822	if (calculate)
823		__sclp_get_mask(&receive_mask, &send_mask);
824	else {
825		receive_mask = 0;
826		send_mask = 0;
827	}
828	rc = -EIO;
829	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
830		/* Prepare request */
831		__sclp_make_init_req(receive_mask, send_mask);
832		spin_unlock_irqrestore(&sclp_lock, flags);
833		if (sclp_add_request(&sclp_init_req)) {
834			/* Try again later */
835			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
836			while (time_before(jiffies, wait))
837				sclp_sync_wait();
838			spin_lock_irqsave(&sclp_lock, flags);
839			continue;
840		}
841		while (sclp_init_req.status != SCLP_REQ_DONE &&
842		       sclp_init_req.status != SCLP_REQ_FAILED)
843			sclp_sync_wait();
844		spin_lock_irqsave(&sclp_lock, flags);
845		if (sclp_init_req.status == SCLP_REQ_DONE &&
846		    sccb->header.response_code == 0x20) {
847			/* Successful request */
848			if (calculate) {
849				sclp_receive_mask = sccb->sclp_receive_mask;
850				sclp_send_mask = sccb->sclp_send_mask;
851			} else {
852				sclp_receive_mask = 0;
853				sclp_send_mask = 0;
854			}
855			spin_unlock_irqrestore(&sclp_lock, flags);
856			sclp_dispatch_state_change();
857			spin_lock_irqsave(&sclp_lock, flags);
858			rc = 0;
859			break;
860		}
861	}
862	sclp_mask_state = sclp_mask_state_idle;
863	spin_unlock_irqrestore(&sclp_lock, flags);
864	return rc;
865}
866
867/* Deactivate SCLP interface. On success, new requests will be rejected,
868 * events will no longer be dispatched. Return 0 on success, non-zero
869 * otherwise. */
870int
871sclp_deactivate(void)
872{
873	unsigned long flags;
874	int rc;
875
876	spin_lock_irqsave(&sclp_lock, flags);
877	/* Deactivate can only be called when active */
878	if (sclp_activation_state != sclp_activation_state_active) {
879		spin_unlock_irqrestore(&sclp_lock, flags);
880		return -EINVAL;
881	}
882	sclp_activation_state = sclp_activation_state_deactivating;
883	spin_unlock_irqrestore(&sclp_lock, flags);
884	rc = sclp_init_mask(0);
885	spin_lock_irqsave(&sclp_lock, flags);
886	if (rc == 0)
887		sclp_activation_state = sclp_activation_state_inactive;
888	else
889		sclp_activation_state = sclp_activation_state_active;
890	spin_unlock_irqrestore(&sclp_lock, flags);
891	return rc;
892}
893
894EXPORT_SYMBOL(sclp_deactivate);
895
896/* Reactivate SCLP interface after sclp_deactivate. On success, new
897 * requests will be accepted, events will be dispatched again. Return 0 on
898 * success, non-zero otherwise. */
899int
900sclp_reactivate(void)
901{
902	unsigned long flags;
903	int rc;
904
905	spin_lock_irqsave(&sclp_lock, flags);
906	/* Reactivate can only be called when inactive */
907	if (sclp_activation_state != sclp_activation_state_inactive) {
908		spin_unlock_irqrestore(&sclp_lock, flags);
909		return -EINVAL;
910	}
911	sclp_activation_state = sclp_activation_state_activating;
912	spin_unlock_irqrestore(&sclp_lock, flags);
913	rc = sclp_init_mask(1);
914	spin_lock_irqsave(&sclp_lock, flags);
915	if (rc == 0)
916		sclp_activation_state = sclp_activation_state_active;
917	else
918		sclp_activation_state = sclp_activation_state_inactive;
919	spin_unlock_irqrestore(&sclp_lock, flags);
920	return rc;
921}
922
923EXPORT_SYMBOL(sclp_reactivate);
924
925/* Handler for external interruption used during initialization. Modify
926 * request state to done. */
927static void sclp_check_handler(struct ext_code ext_code,
928			       unsigned int param32, unsigned long param64)
929{
930	u32 finished_sccb;
931
932	inc_irq_stat(IRQEXT_SCP);
933	finished_sccb = param32 & 0xfffffff8;
934	/* Is this the interrupt we are waiting for? */
935	if (finished_sccb == 0)
936		return;
937	if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
938		panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
939		      finished_sccb);
940	spin_lock(&sclp_lock);
941	if (sclp_running_state == sclp_running_state_running) {
942		sclp_init_req.status = SCLP_REQ_DONE;
943		sclp_running_state = sclp_running_state_idle;
944	}
945	spin_unlock(&sclp_lock);
946}
947
948/* Initial init mask request timed out. Modify request state to failed. */
949static void
950sclp_check_timeout(unsigned long data)
951{
952	unsigned long flags;
953
954	spin_lock_irqsave(&sclp_lock, flags);
955	if (sclp_running_state == sclp_running_state_running) {
956		sclp_init_req.status = SCLP_REQ_FAILED;
957		sclp_running_state = sclp_running_state_idle;
958	}
959	spin_unlock_irqrestore(&sclp_lock, flags);
960}
961
962/* Perform a check of the SCLP interface. Return zero if the interface is
963 * available and there are no pending requests from a previous instance.
964 * Return non-zero otherwise. */
965static int
966sclp_check_interface(void)
967{
968	struct init_sccb *sccb;
969	unsigned long flags;
970	int retry;
971	int rc;
972
973	spin_lock_irqsave(&sclp_lock, flags);
974	/* Prepare init mask command */
975	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
976	if (rc) {
977		spin_unlock_irqrestore(&sclp_lock, flags);
978		return rc;
979	}
980	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
981		__sclp_make_init_req(0, 0);
982		sccb = (struct init_sccb *) sclp_init_req.sccb;
983		rc = sclp_service_call(sclp_init_req.command, sccb);
984		if (rc == -EIO)
985			break;
986		sclp_init_req.status = SCLP_REQ_RUNNING;
987		sclp_running_state = sclp_running_state_running;
988		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
989					 sclp_check_timeout, 0);
990		spin_unlock_irqrestore(&sclp_lock, flags);
991		/* Enable service-signal interruption - needs to happen
992		 * with IRQs enabled. */
993		irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
994		/* Wait for signal from interrupt or timeout */
995		sclp_sync_wait();
996		/* Disable service-signal interruption - needs to happen
997		 * with IRQs enabled. */
998		irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
999		spin_lock_irqsave(&sclp_lock, flags);
1000		del_timer(&sclp_request_timer);
1001		if (sclp_init_req.status == SCLP_REQ_DONE &&
1002		    sccb->header.response_code == 0x20) {
1003			rc = 0;
1004			break;
1005		} else
1006			rc = -EBUSY;
1007	}
1008	unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
1009	spin_unlock_irqrestore(&sclp_lock, flags);
1010	return rc;
1011}
1012
1013/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1014 * events from interfering with rebooted system. */
1015static int
1016sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1017{
1018	sclp_deactivate();
1019	return NOTIFY_DONE;
1020}
1021
1022static struct notifier_block sclp_reboot_notifier = {
1023	.notifier_call = sclp_reboot_event
1024};
1025
1026/*
1027 * Suspend/resume SCLP notifier implementation
1028 */
1029
1030static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
1031{
1032	struct sclp_register *reg;
1033	unsigned long flags;
1034
1035	if (!rollback) {
1036		spin_lock_irqsave(&sclp_lock, flags);
1037		list_for_each_entry(reg, &sclp_reg_list, list)
1038			reg->pm_event_posted = 0;
1039		spin_unlock_irqrestore(&sclp_lock, flags);
1040	}
1041	do {
1042		spin_lock_irqsave(&sclp_lock, flags);
1043		list_for_each_entry(reg, &sclp_reg_list, list) {
1044			if (rollback && reg->pm_event_posted)
1045				goto found;
1046			if (!rollback && !reg->pm_event_posted)
1047				goto found;
1048		}
1049		spin_unlock_irqrestore(&sclp_lock, flags);
1050		return;
1051found:
1052		spin_unlock_irqrestore(&sclp_lock, flags);
1053		if (reg->pm_event_fn)
1054			reg->pm_event_fn(reg, sclp_pm_event);
1055		reg->pm_event_posted = rollback ? 0 : 1;
1056	} while (1);
1057}
1058
1059/*
1060 * Susend/resume callbacks for platform device
1061 */
1062
1063static int sclp_freeze(struct device *dev)
1064{
1065	unsigned long flags;
1066	int rc;
1067
1068	sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
1069
1070	spin_lock_irqsave(&sclp_lock, flags);
1071	sclp_suspend_state = sclp_suspend_state_suspended;
1072	spin_unlock_irqrestore(&sclp_lock, flags);
1073
1074	/* Init supend data */
1075	memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
1076	sclp_suspend_req.callback = sclp_suspend_req_cb;
1077	sclp_suspend_req.status = SCLP_REQ_FILLED;
1078	init_completion(&sclp_request_queue_flushed);
1079
1080	rc = sclp_add_request(&sclp_suspend_req);
1081	if (rc == 0)
1082		wait_for_completion(&sclp_request_queue_flushed);
1083	else if (rc != -ENODATA)
1084		goto fail_thaw;
1085
1086	rc = sclp_deactivate();
1087	if (rc)
1088		goto fail_thaw;
1089	return 0;
1090
1091fail_thaw:
1092	spin_lock_irqsave(&sclp_lock, flags);
1093	sclp_suspend_state = sclp_suspend_state_running;
1094	spin_unlock_irqrestore(&sclp_lock, flags);
1095	sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
1096	return rc;
1097}
1098
1099static int sclp_undo_suspend(enum sclp_pm_event event)
1100{
1101	unsigned long flags;
1102	int rc;
1103
1104	rc = sclp_reactivate();
1105	if (rc)
1106		return rc;
1107
1108	spin_lock_irqsave(&sclp_lock, flags);
1109	sclp_suspend_state = sclp_suspend_state_running;
1110	spin_unlock_irqrestore(&sclp_lock, flags);
1111
1112	sclp_pm_event(event, 0);
1113	return 0;
1114}
1115
1116static int sclp_thaw(struct device *dev)
1117{
1118	return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1119}
1120
1121static int sclp_restore(struct device *dev)
1122{
1123	return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1124}
1125
1126static const struct dev_pm_ops sclp_pm_ops = {
1127	.freeze		= sclp_freeze,
1128	.thaw		= sclp_thaw,
1129	.restore	= sclp_restore,
1130};
1131
1132static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
1133{
1134	return sprintf(buf, "%i\n", sclp_console_pages);
1135}
1136
1137static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
1138
1139static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
1140{
1141	return sprintf(buf, "%i\n", sclp_console_drop);
1142}
1143
1144static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
1145
1146static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
1147{
1148	return sprintf(buf, "%lu\n", sclp_console_full);
1149}
1150
1151static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
1152
1153static struct attribute *sclp_drv_attrs[] = {
1154	&driver_attr_con_pages.attr,
1155	&driver_attr_con_drop.attr,
1156	&driver_attr_con_full.attr,
1157	NULL,
1158};
1159static struct attribute_group sclp_drv_attr_group = {
1160	.attrs = sclp_drv_attrs,
1161};
1162static const struct attribute_group *sclp_drv_attr_groups[] = {
1163	&sclp_drv_attr_group,
1164	NULL,
1165};
1166
1167static struct platform_driver sclp_pdrv = {
1168	.driver = {
1169		.name	= "sclp",
1170		.pm	= &sclp_pm_ops,
1171		.groups = sclp_drv_attr_groups,
1172	},
1173};
1174
1175static struct platform_device *sclp_pdev;
1176
1177/* Initialize SCLP driver. Return zero if driver is operational, non-zero
1178 * otherwise. */
1179static int
1180sclp_init(void)
1181{
1182	unsigned long flags;
1183	int rc = 0;
1184
1185	spin_lock_irqsave(&sclp_lock, flags);
1186	/* Check for previous or running initialization */
1187	if (sclp_init_state != sclp_init_state_uninitialized)
1188		goto fail_unlock;
1189	sclp_init_state = sclp_init_state_initializing;
1190	/* Set up variables */
1191	INIT_LIST_HEAD(&sclp_req_queue);
1192	INIT_LIST_HEAD(&sclp_reg_list);
1193	list_add(&sclp_state_change_event.list, &sclp_reg_list);
1194	init_timer(&sclp_request_timer);
1195	init_timer(&sclp_queue_timer);
1196	sclp_queue_timer.function = sclp_req_queue_timeout;
1197	/* Check interface */
1198	spin_unlock_irqrestore(&sclp_lock, flags);
1199	rc = sclp_check_interface();
1200	spin_lock_irqsave(&sclp_lock, flags);
1201	if (rc)
1202		goto fail_init_state_uninitialized;
1203	/* Register reboot handler */
1204	rc = register_reboot_notifier(&sclp_reboot_notifier);
1205	if (rc)
1206		goto fail_init_state_uninitialized;
1207	/* Register interrupt handler */
1208	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
1209	if (rc)
1210		goto fail_unregister_reboot_notifier;
1211	sclp_init_state = sclp_init_state_initialized;
1212	spin_unlock_irqrestore(&sclp_lock, flags);
1213	/* Enable service-signal external interruption - needs to happen with
1214	 * IRQs enabled. */
1215	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1216	sclp_init_mask(1);
1217	return 0;
1218
1219fail_unregister_reboot_notifier:
1220	unregister_reboot_notifier(&sclp_reboot_notifier);
1221fail_init_state_uninitialized:
1222	sclp_init_state = sclp_init_state_uninitialized;
1223fail_unlock:
1224	spin_unlock_irqrestore(&sclp_lock, flags);
1225	return rc;
1226}
1227
1228/*
1229 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1230 * to print the panic message.
1231 */
1232static int sclp_panic_notify(struct notifier_block *self,
1233			     unsigned long event, void *data)
1234{
1235	if (sclp_suspend_state == sclp_suspend_state_suspended)
1236		sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1237	return NOTIFY_OK;
1238}
1239
1240static struct notifier_block sclp_on_panic_nb = {
1241	.notifier_call = sclp_panic_notify,
1242	.priority = SCLP_PANIC_PRIO,
1243};
1244
1245static __init int sclp_initcall(void)
1246{
1247	int rc;
1248
1249	rc = platform_driver_register(&sclp_pdrv);
1250	if (rc)
1251		return rc;
1252
1253	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1254	rc = PTR_ERR_OR_ZERO(sclp_pdev);
1255	if (rc)
1256		goto fail_platform_driver_unregister;
1257
1258	rc = atomic_notifier_chain_register(&panic_notifier_list,
1259					    &sclp_on_panic_nb);
1260	if (rc)
1261		goto fail_platform_device_unregister;
1262
1263	return sclp_init();
1264
1265fail_platform_device_unregister:
1266	platform_device_unregister(sclp_pdev);
1267fail_platform_driver_unregister:
1268	platform_driver_unregister(&sclp_pdrv);
1269	return rc;
1270}
1271
1272arch_initcall(sclp_initcall);
1273