1/*
2 * IUCV network driver
3 *
4 * Copyright IBM Corp. 2001, 2009
5 *
6 * Author(s):
7 *	Original netiucv driver:
8 *		Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
9 *	Sysfs integration and all bugs therein:
10 *		Cornelia Huck (cornelia.huck@de.ibm.com)
11 *	PM functions:
12 *		Ursula Braun (ursula.braun@de.ibm.com)
13 *
14 * Documentation used:
15 *  the source of the original IUCV driver by:
16 *    Stefan Hegewald <hegewald@de.ibm.com>
17 *    Hartmut Penner <hpenner@de.ibm.com>
18 *    Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
19 *    Martin Schwidefsky (schwidefsky@de.ibm.com)
20 *    Alan Altmark (Alan_Altmark@us.ibm.com)  Sept. 2000
21 *
22 * This program is free software; you can redistribute it and/or modify
23 * it under the terms of the GNU General Public License as published by
24 * the Free Software Foundation; either version 2, or (at your option)
25 * any later version.
26 *
27 * This program is distributed in the hope that it will be useful,
28 * but WITHOUT ANY WARRANTY; without even the implied warranty of
29 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
30 * GNU General Public License for more details.
31 *
32 * You should have received a copy of the GNU General Public License
33 * along with this program; if not, write to the Free Software
34 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 *
36 */
37
38#define KMSG_COMPONENT "netiucv"
39#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
40
41#undef DEBUG
42
43#include <linux/module.h>
44#include <linux/init.h>
45#include <linux/kernel.h>
46#include <linux/slab.h>
47#include <linux/errno.h>
48#include <linux/types.h>
49#include <linux/interrupt.h>
50#include <linux/timer.h>
51#include <linux/bitops.h>
52
53#include <linux/signal.h>
54#include <linux/string.h>
55#include <linux/device.h>
56
57#include <linux/ip.h>
58#include <linux/if_arp.h>
59#include <linux/tcp.h>
60#include <linux/skbuff.h>
61#include <linux/ctype.h>
62#include <net/dst.h>
63
64#include <asm/io.h>
65#include <asm/uaccess.h>
66#include <asm/ebcdic.h>
67
68#include <net/iucv/iucv.h>
69#include "fsm.h"
70
71MODULE_AUTHOR
72    ("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
73MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
74
75/**
76 * Debug Facility stuff
77 */
78#define IUCV_DBF_SETUP_NAME "iucv_setup"
79#define IUCV_DBF_SETUP_LEN 64
80#define IUCV_DBF_SETUP_PAGES 2
81#define IUCV_DBF_SETUP_NR_AREAS 1
82#define IUCV_DBF_SETUP_LEVEL 3
83
84#define IUCV_DBF_DATA_NAME "iucv_data"
85#define IUCV_DBF_DATA_LEN 128
86#define IUCV_DBF_DATA_PAGES 2
87#define IUCV_DBF_DATA_NR_AREAS 1
88#define IUCV_DBF_DATA_LEVEL 2
89
90#define IUCV_DBF_TRACE_NAME "iucv_trace"
91#define IUCV_DBF_TRACE_LEN 16
92#define IUCV_DBF_TRACE_PAGES 4
93#define IUCV_DBF_TRACE_NR_AREAS 1
94#define IUCV_DBF_TRACE_LEVEL 3
95
96#define IUCV_DBF_TEXT(name,level,text) \
97	do { \
98		debug_text_event(iucv_dbf_##name,level,text); \
99	} while (0)
100
101#define IUCV_DBF_HEX(name,level,addr,len) \
102	do { \
103		debug_event(iucv_dbf_##name,level,(void*)(addr),len); \
104	} while (0)
105
106DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
107
108#define IUCV_DBF_TEXT_(name, level, text...) \
109	do { \
110		if (debug_level_enabled(iucv_dbf_##name, level)) { \
111			char* __buf = get_cpu_var(iucv_dbf_txt_buf); \
112			sprintf(__buf, text); \
113			debug_text_event(iucv_dbf_##name, level, __buf); \
114			put_cpu_var(iucv_dbf_txt_buf); \
115		} \
116	} while (0)
117
118#define IUCV_DBF_SPRINTF(name,level,text...) \
119	do { \
120		debug_sprintf_event(iucv_dbf_trace, level, ##text ); \
121		debug_sprintf_event(iucv_dbf_trace, level, text ); \
122	} while (0)
123
124/**
125 * some more debug stuff
126 */
127#define PRINTK_HEADER " iucv: "       /* for debugging */
128
129/* dummy device to make sure netiucv_pm functions are called */
130static struct device *netiucv_dev;
131
132static int netiucv_pm_prepare(struct device *);
133static void netiucv_pm_complete(struct device *);
134static int netiucv_pm_freeze(struct device *);
135static int netiucv_pm_restore_thaw(struct device *);
136
137static const struct dev_pm_ops netiucv_pm_ops = {
138	.prepare = netiucv_pm_prepare,
139	.complete = netiucv_pm_complete,
140	.freeze = netiucv_pm_freeze,
141	.thaw = netiucv_pm_restore_thaw,
142	.restore = netiucv_pm_restore_thaw,
143};
144
145static struct device_driver netiucv_driver = {
146	.owner = THIS_MODULE,
147	.name = "netiucv",
148	.bus  = &iucv_bus,
149	.pm = &netiucv_pm_ops,
150};
151
152static int netiucv_callback_connreq(struct iucv_path *,
153				    u8 ipvmid[8], u8 ipuser[16]);
154static void netiucv_callback_connack(struct iucv_path *, u8 ipuser[16]);
155static void netiucv_callback_connrej(struct iucv_path *, u8 ipuser[16]);
156static void netiucv_callback_connsusp(struct iucv_path *, u8 ipuser[16]);
157static void netiucv_callback_connres(struct iucv_path *, u8 ipuser[16]);
158static void netiucv_callback_rx(struct iucv_path *, struct iucv_message *);
159static void netiucv_callback_txdone(struct iucv_path *, struct iucv_message *);
160
161static struct iucv_handler netiucv_handler = {
162	.path_pending	  = netiucv_callback_connreq,
163	.path_complete	  = netiucv_callback_connack,
164	.path_severed	  = netiucv_callback_connrej,
165	.path_quiesced	  = netiucv_callback_connsusp,
166	.path_resumed	  = netiucv_callback_connres,
167	.message_pending  = netiucv_callback_rx,
168	.message_complete = netiucv_callback_txdone
169};
170
171/**
172 * Per connection profiling data
173 */
174struct connection_profile {
175	unsigned long maxmulti;
176	unsigned long maxcqueue;
177	unsigned long doios_single;
178	unsigned long doios_multi;
179	unsigned long txlen;
180	unsigned long tx_time;
181	unsigned long send_stamp;
182	unsigned long tx_pending;
183	unsigned long tx_max_pending;
184};
185
186/**
187 * Representation of one iucv connection
188 */
189struct iucv_connection {
190	struct list_head	  list;
191	struct iucv_path	  *path;
192	struct sk_buff            *rx_buff;
193	struct sk_buff            *tx_buff;
194	struct sk_buff_head       collect_queue;
195	struct sk_buff_head	  commit_queue;
196	spinlock_t                collect_lock;
197	int                       collect_len;
198	int                       max_buffsize;
199	fsm_timer                 timer;
200	fsm_instance              *fsm;
201	struct net_device         *netdev;
202	struct connection_profile prof;
203	char                      userid[9];
204	char			  userdata[17];
205};
206
207/**
208 * Linked list of all connection structs.
209 */
210static LIST_HEAD(iucv_connection_list);
211static DEFINE_RWLOCK(iucv_connection_rwlock);
212
213/**
214 * Representation of event-data for the
215 * connection state machine.
216 */
217struct iucv_event {
218	struct iucv_connection *conn;
219	void                   *data;
220};
221
222/**
223 * Private part of the network device structure
224 */
225struct netiucv_priv {
226	struct net_device_stats stats;
227	unsigned long           tbusy;
228	fsm_instance            *fsm;
229        struct iucv_connection  *conn;
230	struct device           *dev;
231	int			 pm_state;
232};
233
234/**
235 * Link level header for a packet.
236 */
237struct ll_header {
238	u16 next;
239};
240
241#define NETIUCV_HDRLEN		 (sizeof(struct ll_header))
242#define NETIUCV_BUFSIZE_MAX	 65537
243#define NETIUCV_BUFSIZE_DEFAULT  NETIUCV_BUFSIZE_MAX
244#define NETIUCV_MTU_MAX          (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
245#define NETIUCV_MTU_DEFAULT      9216
246#define NETIUCV_QUEUELEN_DEFAULT 50
247#define NETIUCV_TIMEOUT_5SEC     5000
248
249/**
250 * Compatibility macros for busy handling
251 * of network devices.
252 */
253static inline void netiucv_clear_busy(struct net_device *dev)
254{
255	struct netiucv_priv *priv = netdev_priv(dev);
256	clear_bit(0, &priv->tbusy);
257	netif_wake_queue(dev);
258}
259
260static inline int netiucv_test_and_set_busy(struct net_device *dev)
261{
262	struct netiucv_priv *priv = netdev_priv(dev);
263	netif_stop_queue(dev);
264	return test_and_set_bit(0, &priv->tbusy);
265}
266
267static u8 iucvMagic_ascii[16] = {
268	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
269	0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
270};
271
272static u8 iucvMagic_ebcdic[16] = {
273	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
274	0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
275};
276
277/**
278 * Convert an iucv userId to its printable
279 * form (strip whitespace at end).
280 *
281 * @param An iucv userId
282 *
283 * @returns The printable string (static data!!)
284 */
285static char *netiucv_printname(char *name, int len)
286{
287	static char tmp[17];
288	char *p = tmp;
289	memcpy(tmp, name, len);
290	tmp[len] = '\0';
291	while (*p && ((p - tmp) < len) && (!isspace(*p)))
292		p++;
293	*p = '\0';
294	return tmp;
295}
296
297static char *netiucv_printuser(struct iucv_connection *conn)
298{
299	static char tmp_uid[9];
300	static char tmp_udat[17];
301	static char buf[100];
302
303	if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
304		tmp_uid[8] = '\0';
305		tmp_udat[16] = '\0';
306		memcpy(tmp_uid, conn->userid, 8);
307		memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
308		memcpy(tmp_udat, conn->userdata, 16);
309		EBCASC(tmp_udat, 16);
310		memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
311		sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
312		return buf;
313	} else
314		return netiucv_printname(conn->userid, 8);
315}
316
317/**
318 * States of the interface statemachine.
319 */
320enum dev_states {
321	DEV_STATE_STOPPED,
322	DEV_STATE_STARTWAIT,
323	DEV_STATE_STOPWAIT,
324	DEV_STATE_RUNNING,
325	/**
326	 * MUST be always the last element!!
327	 */
328	NR_DEV_STATES
329};
330
331static const char *dev_state_names[] = {
332	"Stopped",
333	"StartWait",
334	"StopWait",
335	"Running",
336};
337
338/**
339 * Events of the interface statemachine.
340 */
341enum dev_events {
342	DEV_EVENT_START,
343	DEV_EVENT_STOP,
344	DEV_EVENT_CONUP,
345	DEV_EVENT_CONDOWN,
346	/**
347	 * MUST be always the last element!!
348	 */
349	NR_DEV_EVENTS
350};
351
352static const char *dev_event_names[] = {
353	"Start",
354	"Stop",
355	"Connection up",
356	"Connection down",
357};
358
359/**
360 * Events of the connection statemachine
361 */
362enum conn_events {
363	/**
364	 * Events, representing callbacks from
365	 * lowlevel iucv layer)
366	 */
367	CONN_EVENT_CONN_REQ,
368	CONN_EVENT_CONN_ACK,
369	CONN_EVENT_CONN_REJ,
370	CONN_EVENT_CONN_SUS,
371	CONN_EVENT_CONN_RES,
372	CONN_EVENT_RX,
373	CONN_EVENT_TXDONE,
374
375	/**
376	 * Events, representing errors return codes from
377	 * calls to lowlevel iucv layer
378	 */
379
380	/**
381	 * Event, representing timer expiry.
382	 */
383	CONN_EVENT_TIMER,
384
385	/**
386	 * Events, representing commands from upper levels.
387	 */
388	CONN_EVENT_START,
389	CONN_EVENT_STOP,
390
391	/**
392	 * MUST be always the last element!!
393	 */
394	NR_CONN_EVENTS,
395};
396
397static const char *conn_event_names[] = {
398	"Remote connection request",
399	"Remote connection acknowledge",
400	"Remote connection reject",
401	"Connection suspended",
402	"Connection resumed",
403	"Data received",
404	"Data sent",
405
406	"Timer",
407
408	"Start",
409	"Stop",
410};
411
412/**
413 * States of the connection statemachine.
414 */
415enum conn_states {
416	/**
417	 * Connection not assigned to any device,
418	 * initial state, invalid
419	 */
420	CONN_STATE_INVALID,
421
422	/**
423	 * Userid assigned but not operating
424	 */
425	CONN_STATE_STOPPED,
426
427	/**
428	 * Connection registered,
429	 * no connection request sent yet,
430	 * no connection request received
431	 */
432	CONN_STATE_STARTWAIT,
433
434	/**
435	 * Connection registered and connection request sent,
436	 * no acknowledge and no connection request received yet.
437	 */
438	CONN_STATE_SETUPWAIT,
439
440	/**
441	 * Connection up and running idle
442	 */
443	CONN_STATE_IDLE,
444
445	/**
446	 * Data sent, awaiting CONN_EVENT_TXDONE
447	 */
448	CONN_STATE_TX,
449
450	/**
451	 * Error during registration.
452	 */
453	CONN_STATE_REGERR,
454
455	/**
456	 * Error during registration.
457	 */
458	CONN_STATE_CONNERR,
459
460	/**
461	 * MUST be always the last element!!
462	 */
463	NR_CONN_STATES,
464};
465
466static const char *conn_state_names[] = {
467	"Invalid",
468	"Stopped",
469	"StartWait",
470	"SetupWait",
471	"Idle",
472	"TX",
473	"Terminating",
474	"Registration error",
475	"Connect error",
476};
477
478
479/**
480 * Debug Facility Stuff
481 */
482static debug_info_t *iucv_dbf_setup = NULL;
483static debug_info_t *iucv_dbf_data = NULL;
484static debug_info_t *iucv_dbf_trace = NULL;
485
486DEFINE_PER_CPU(char[256], iucv_dbf_txt_buf);
487
488static void iucv_unregister_dbf_views(void)
489{
490	debug_unregister(iucv_dbf_setup);
491	debug_unregister(iucv_dbf_data);
492	debug_unregister(iucv_dbf_trace);
493}
494static int iucv_register_dbf_views(void)
495{
496	iucv_dbf_setup = debug_register(IUCV_DBF_SETUP_NAME,
497					IUCV_DBF_SETUP_PAGES,
498					IUCV_DBF_SETUP_NR_AREAS,
499					IUCV_DBF_SETUP_LEN);
500	iucv_dbf_data = debug_register(IUCV_DBF_DATA_NAME,
501				       IUCV_DBF_DATA_PAGES,
502				       IUCV_DBF_DATA_NR_AREAS,
503				       IUCV_DBF_DATA_LEN);
504	iucv_dbf_trace = debug_register(IUCV_DBF_TRACE_NAME,
505					IUCV_DBF_TRACE_PAGES,
506					IUCV_DBF_TRACE_NR_AREAS,
507					IUCV_DBF_TRACE_LEN);
508
509	if ((iucv_dbf_setup == NULL) || (iucv_dbf_data == NULL) ||
510	    (iucv_dbf_trace == NULL)) {
511		iucv_unregister_dbf_views();
512		return -ENOMEM;
513	}
514	debug_register_view(iucv_dbf_setup, &debug_hex_ascii_view);
515	debug_set_level(iucv_dbf_setup, IUCV_DBF_SETUP_LEVEL);
516
517	debug_register_view(iucv_dbf_data, &debug_hex_ascii_view);
518	debug_set_level(iucv_dbf_data, IUCV_DBF_DATA_LEVEL);
519
520	debug_register_view(iucv_dbf_trace, &debug_hex_ascii_view);
521	debug_set_level(iucv_dbf_trace, IUCV_DBF_TRACE_LEVEL);
522
523	return 0;
524}
525
526/*
527 * Callback-wrappers, called from lowlevel iucv layer.
528 */
529
530static void netiucv_callback_rx(struct iucv_path *path,
531				struct iucv_message *msg)
532{
533	struct iucv_connection *conn = path->private;
534	struct iucv_event ev;
535
536	ev.conn = conn;
537	ev.data = msg;
538	fsm_event(conn->fsm, CONN_EVENT_RX, &ev);
539}
540
541static void netiucv_callback_txdone(struct iucv_path *path,
542				    struct iucv_message *msg)
543{
544	struct iucv_connection *conn = path->private;
545	struct iucv_event ev;
546
547	ev.conn = conn;
548	ev.data = msg;
549	fsm_event(conn->fsm, CONN_EVENT_TXDONE, &ev);
550}
551
552static void netiucv_callback_connack(struct iucv_path *path, u8 ipuser[16])
553{
554	struct iucv_connection *conn = path->private;
555
556	fsm_event(conn->fsm, CONN_EVENT_CONN_ACK, conn);
557}
558
559static int netiucv_callback_connreq(struct iucv_path *path,
560				    u8 ipvmid[8], u8 ipuser[16])
561{
562	struct iucv_connection *conn = path->private;
563	struct iucv_event ev;
564	static char tmp_user[9];
565	static char tmp_udat[17];
566	int rc;
567
568	rc = -EINVAL;
569	memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
570	memcpy(tmp_udat, ipuser, 16);
571	EBCASC(tmp_udat, 16);
572	read_lock_bh(&iucv_connection_rwlock);
573	list_for_each_entry(conn, &iucv_connection_list, list) {
574		if (strncmp(ipvmid, conn->userid, 8) ||
575		    strncmp(ipuser, conn->userdata, 16))
576			continue;
577		/* Found a matching connection for this path. */
578		conn->path = path;
579		ev.conn = conn;
580		ev.data = path;
581		fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
582		rc = 0;
583	}
584	IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
585		       tmp_user, netiucv_printname(tmp_udat, 16));
586	read_unlock_bh(&iucv_connection_rwlock);
587	return rc;
588}
589
590static void netiucv_callback_connrej(struct iucv_path *path, u8 ipuser[16])
591{
592	struct iucv_connection *conn = path->private;
593
594	fsm_event(conn->fsm, CONN_EVENT_CONN_REJ, conn);
595}
596
597static void netiucv_callback_connsusp(struct iucv_path *path, u8 ipuser[16])
598{
599	struct iucv_connection *conn = path->private;
600
601	fsm_event(conn->fsm, CONN_EVENT_CONN_SUS, conn);
602}
603
604static void netiucv_callback_connres(struct iucv_path *path, u8 ipuser[16])
605{
606	struct iucv_connection *conn = path->private;
607
608	fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
609}
610
611/**
612 * NOP action for statemachines
613 */
614static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
615{
616}
617
618/*
619 * Actions of the connection statemachine
620 */
621
622/**
623 * netiucv_unpack_skb
624 * @conn: The connection where this skb has been received.
625 * @pskb: The received skb.
626 *
627 * Unpack a just received skb and hand it over to upper layers.
628 * Helper function for conn_action_rx.
629 */
630static void netiucv_unpack_skb(struct iucv_connection *conn,
631			       struct sk_buff *pskb)
632{
633	struct net_device     *dev = conn->netdev;
634	struct netiucv_priv   *privptr = netdev_priv(dev);
635	u16 offset = 0;
636
637	skb_put(pskb, NETIUCV_HDRLEN);
638	pskb->dev = dev;
639	pskb->ip_summed = CHECKSUM_NONE;
640	pskb->protocol = ntohs(ETH_P_IP);
641
642	while (1) {
643		struct sk_buff *skb;
644		struct ll_header *header = (struct ll_header *) pskb->data;
645
646		if (!header->next)
647			break;
648
649		skb_pull(pskb, NETIUCV_HDRLEN);
650		header->next -= offset;
651		offset += header->next;
652		header->next -= NETIUCV_HDRLEN;
653		if (skb_tailroom(pskb) < header->next) {
654			IUCV_DBF_TEXT_(data, 2, "Illegal next field: %d > %d\n",
655				header->next, skb_tailroom(pskb));
656			return;
657		}
658		skb_put(pskb, header->next);
659		skb_reset_mac_header(pskb);
660		skb = dev_alloc_skb(pskb->len);
661		if (!skb) {
662			IUCV_DBF_TEXT(data, 2,
663				"Out of memory in netiucv_unpack_skb\n");
664			privptr->stats.rx_dropped++;
665			return;
666		}
667		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
668					  pskb->len);
669		skb_reset_mac_header(skb);
670		skb->dev = pskb->dev;
671		skb->protocol = pskb->protocol;
672		pskb->ip_summed = CHECKSUM_UNNECESSARY;
673		privptr->stats.rx_packets++;
674		privptr->stats.rx_bytes += skb->len;
675		/*
676		 * Since receiving is always initiated from a tasklet (in iucv.c),
677		 * we must use netif_rx_ni() instead of netif_rx()
678		 */
679		netif_rx_ni(skb);
680		skb_pull(pskb, header->next);
681		skb_put(pskb, NETIUCV_HDRLEN);
682	}
683}
684
685static void conn_action_rx(fsm_instance *fi, int event, void *arg)
686{
687	struct iucv_event *ev = arg;
688	struct iucv_connection *conn = ev->conn;
689	struct iucv_message *msg = ev->data;
690	struct netiucv_priv *privptr = netdev_priv(conn->netdev);
691	int rc;
692
693	IUCV_DBF_TEXT(trace, 4, __func__);
694
695	if (!conn->netdev) {
696		iucv_message_reject(conn->path, msg);
697		IUCV_DBF_TEXT(data, 2,
698			      "Received data for unlinked connection\n");
699		return;
700	}
701	if (msg->length > conn->max_buffsize) {
702		iucv_message_reject(conn->path, msg);
703		privptr->stats.rx_dropped++;
704		IUCV_DBF_TEXT_(data, 2, "msglen %d > max_buffsize %d\n",
705			       msg->length, conn->max_buffsize);
706		return;
707	}
708	conn->rx_buff->data = conn->rx_buff->head;
709	skb_reset_tail_pointer(conn->rx_buff);
710	conn->rx_buff->len = 0;
711	rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data,
712				  msg->length, NULL);
713	if (rc || msg->length < 5) {
714		privptr->stats.rx_errors++;
715		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_receive\n", rc);
716		return;
717	}
718	netiucv_unpack_skb(conn, conn->rx_buff);
719}
720
721static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
722{
723	struct iucv_event *ev = arg;
724	struct iucv_connection *conn = ev->conn;
725	struct iucv_message *msg = ev->data;
726	struct iucv_message txmsg;
727	struct netiucv_priv *privptr = NULL;
728	u32 single_flag = msg->tag;
729	u32 txbytes = 0;
730	u32 txpackets = 0;
731	u32 stat_maxcq = 0;
732	struct sk_buff *skb;
733	unsigned long saveflags;
734	struct ll_header header;
735	int rc;
736
737	IUCV_DBF_TEXT(trace, 4, __func__);
738
739	if (!conn || !conn->netdev) {
740		IUCV_DBF_TEXT(data, 2,
741			      "Send confirmation for unlinked connection\n");
742		return;
743	}
744	privptr = netdev_priv(conn->netdev);
745	conn->prof.tx_pending--;
746	if (single_flag) {
747		if ((skb = skb_dequeue(&conn->commit_queue))) {
748			atomic_dec(&skb->users);
749			if (privptr) {
750				privptr->stats.tx_packets++;
751				privptr->stats.tx_bytes +=
752					(skb->len - NETIUCV_HDRLEN
753						  - NETIUCV_HDRLEN);
754			}
755			dev_kfree_skb_any(skb);
756		}
757	}
758	conn->tx_buff->data = conn->tx_buff->head;
759	skb_reset_tail_pointer(conn->tx_buff);
760	conn->tx_buff->len = 0;
761	spin_lock_irqsave(&conn->collect_lock, saveflags);
762	while ((skb = skb_dequeue(&conn->collect_queue))) {
763		header.next = conn->tx_buff->len + skb->len + NETIUCV_HDRLEN;
764		memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header,
765		       NETIUCV_HDRLEN);
766		skb_copy_from_linear_data(skb,
767					  skb_put(conn->tx_buff, skb->len),
768					  skb->len);
769		txbytes += skb->len;
770		txpackets++;
771		stat_maxcq++;
772		atomic_dec(&skb->users);
773		dev_kfree_skb_any(skb);
774	}
775	if (conn->collect_len > conn->prof.maxmulti)
776		conn->prof.maxmulti = conn->collect_len;
777	conn->collect_len = 0;
778	spin_unlock_irqrestore(&conn->collect_lock, saveflags);
779	if (conn->tx_buff->len == 0) {
780		fsm_newstate(fi, CONN_STATE_IDLE);
781		return;
782	}
783
784	header.next = 0;
785	memcpy(skb_put(conn->tx_buff, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
786	conn->prof.send_stamp = jiffies;
787	txmsg.class = 0;
788	txmsg.tag = 0;
789	rc = iucv_message_send(conn->path, &txmsg, 0, 0,
790			       conn->tx_buff->data, conn->tx_buff->len);
791	conn->prof.doios_multi++;
792	conn->prof.txlen += conn->tx_buff->len;
793	conn->prof.tx_pending++;
794	if (conn->prof.tx_pending > conn->prof.tx_max_pending)
795		conn->prof.tx_max_pending = conn->prof.tx_pending;
796	if (rc) {
797		conn->prof.tx_pending--;
798		fsm_newstate(fi, CONN_STATE_IDLE);
799		if (privptr)
800			privptr->stats.tx_errors += txpackets;
801		IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
802	} else {
803		if (privptr) {
804			privptr->stats.tx_packets += txpackets;
805			privptr->stats.tx_bytes += txbytes;
806		}
807		if (stat_maxcq > conn->prof.maxcqueue)
808			conn->prof.maxcqueue = stat_maxcq;
809	}
810}
811
812static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
813{
814	struct iucv_event *ev = arg;
815	struct iucv_connection *conn = ev->conn;
816	struct iucv_path *path = ev->data;
817	struct net_device *netdev = conn->netdev;
818	struct netiucv_priv *privptr = netdev_priv(netdev);
819	int rc;
820
821	IUCV_DBF_TEXT(trace, 3, __func__);
822
823	conn->path = path;
824	path->msglim = NETIUCV_QUEUELEN_DEFAULT;
825	path->flags = 0;
826	rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
827	if (rc) {
828		IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
829		return;
830	}
831	fsm_newstate(fi, CONN_STATE_IDLE);
832	netdev->tx_queue_len = conn->path->msglim;
833	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
834}
835
836static void conn_action_connreject(fsm_instance *fi, int event, void *arg)
837{
838	struct iucv_event *ev = arg;
839	struct iucv_path *path = ev->data;
840
841	IUCV_DBF_TEXT(trace, 3, __func__);
842	iucv_path_sever(path, NULL);
843}
844
845static void conn_action_connack(fsm_instance *fi, int event, void *arg)
846{
847	struct iucv_connection *conn = arg;
848	struct net_device *netdev = conn->netdev;
849	struct netiucv_priv *privptr = netdev_priv(netdev);
850
851	IUCV_DBF_TEXT(trace, 3, __func__);
852	fsm_deltimer(&conn->timer);
853	fsm_newstate(fi, CONN_STATE_IDLE);
854	netdev->tx_queue_len = conn->path->msglim;
855	fsm_event(privptr->fsm, DEV_EVENT_CONUP, netdev);
856}
857
858static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
859{
860	struct iucv_connection *conn = arg;
861
862	IUCV_DBF_TEXT(trace, 3, __func__);
863	fsm_deltimer(&conn->timer);
864	iucv_path_sever(conn->path, conn->userdata);
865	fsm_newstate(fi, CONN_STATE_STARTWAIT);
866}
867
868static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
869{
870	struct iucv_connection *conn = arg;
871	struct net_device *netdev = conn->netdev;
872	struct netiucv_priv *privptr = netdev_priv(netdev);
873
874	IUCV_DBF_TEXT(trace, 3, __func__);
875
876	fsm_deltimer(&conn->timer);
877	iucv_path_sever(conn->path, conn->userdata);
878	dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
879			       "connection\n", netiucv_printuser(conn));
880	IUCV_DBF_TEXT(data, 2,
881		      "conn_action_connsever: Remote dropped connection\n");
882	fsm_newstate(fi, CONN_STATE_STARTWAIT);
883	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
884}
885
886static void conn_action_start(fsm_instance *fi, int event, void *arg)
887{
888	struct iucv_connection *conn = arg;
889	struct net_device *netdev = conn->netdev;
890	struct netiucv_priv *privptr = netdev_priv(netdev);
891	int rc;
892
893	IUCV_DBF_TEXT(trace, 3, __func__);
894
895	fsm_newstate(fi, CONN_STATE_STARTWAIT);
896
897	/*
898	 * We must set the state before calling iucv_connect because the
899	 * callback handler could be called at any point after the connection
900	 * request is sent
901	 */
902
903	fsm_newstate(fi, CONN_STATE_SETUPWAIT);
904	conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
905	IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
906		netdev->name, netiucv_printuser(conn));
907
908	rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
909			       NULL, conn->userdata, conn);
910	switch (rc) {
911	case 0:
912		netdev->tx_queue_len = conn->path->msglim;
913		fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC,
914			     CONN_EVENT_TIMER, conn);
915		return;
916	case 11:
917		dev_warn(privptr->dev,
918			"The IUCV device failed to connect to z/VM guest %s\n",
919			netiucv_printname(conn->userid, 8));
920		fsm_newstate(fi, CONN_STATE_STARTWAIT);
921		break;
922	case 12:
923		dev_warn(privptr->dev,
924			"The IUCV device failed to connect to the peer on z/VM"
925			" guest %s\n", netiucv_printname(conn->userid, 8));
926		fsm_newstate(fi, CONN_STATE_STARTWAIT);
927		break;
928	case 13:
929		dev_err(privptr->dev,
930			"Connecting the IUCV device would exceed the maximum"
931			" number of IUCV connections\n");
932		fsm_newstate(fi, CONN_STATE_CONNERR);
933		break;
934	case 14:
935		dev_err(privptr->dev,
936			"z/VM guest %s has too many IUCV connections"
937			" to connect with the IUCV device\n",
938			netiucv_printname(conn->userid, 8));
939		fsm_newstate(fi, CONN_STATE_CONNERR);
940		break;
941	case 15:
942		dev_err(privptr->dev,
943			"The IUCV device cannot connect to a z/VM guest with no"
944			" IUCV authorization\n");
945		fsm_newstate(fi, CONN_STATE_CONNERR);
946		break;
947	default:
948		dev_err(privptr->dev,
949			"Connecting the IUCV device failed with error %d\n",
950			rc);
951		fsm_newstate(fi, CONN_STATE_CONNERR);
952		break;
953	}
954	IUCV_DBF_TEXT_(setup, 5, "iucv_connect rc is %d\n", rc);
955	kfree(conn->path);
956	conn->path = NULL;
957}
958
959static void netiucv_purge_skb_queue(struct sk_buff_head *q)
960{
961	struct sk_buff *skb;
962
963	while ((skb = skb_dequeue(q))) {
964		atomic_dec(&skb->users);
965		dev_kfree_skb_any(skb);
966	}
967}
968
969static void conn_action_stop(fsm_instance *fi, int event, void *arg)
970{
971	struct iucv_event *ev = arg;
972	struct iucv_connection *conn = ev->conn;
973	struct net_device *netdev = conn->netdev;
974	struct netiucv_priv *privptr = netdev_priv(netdev);
975
976	IUCV_DBF_TEXT(trace, 3, __func__);
977
978	fsm_deltimer(&conn->timer);
979	fsm_newstate(fi, CONN_STATE_STOPPED);
980	netiucv_purge_skb_queue(&conn->collect_queue);
981	if (conn->path) {
982		IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
983		iucv_path_sever(conn->path, conn->userdata);
984		kfree(conn->path);
985		conn->path = NULL;
986	}
987	netiucv_purge_skb_queue(&conn->commit_queue);
988	fsm_event(privptr->fsm, DEV_EVENT_CONDOWN, netdev);
989}
990
991static void conn_action_inval(fsm_instance *fi, int event, void *arg)
992{
993	struct iucv_connection *conn = arg;
994	struct net_device *netdev = conn->netdev;
995
996	IUCV_DBF_TEXT_(data, 2, "%s('%s'): conn_action_inval called\n",
997		netdev->name, conn->userid);
998}
999
1000static const fsm_node conn_fsm[] = {
1001	{ CONN_STATE_INVALID,   CONN_EVENT_START,    conn_action_inval      },
1002	{ CONN_STATE_STOPPED,   CONN_EVENT_START,    conn_action_start      },
1003
1004	{ CONN_STATE_STOPPED,   CONN_EVENT_STOP,     conn_action_stop       },
1005	{ CONN_STATE_STARTWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1006	{ CONN_STATE_SETUPWAIT, CONN_EVENT_STOP,     conn_action_stop       },
1007	{ CONN_STATE_IDLE,      CONN_EVENT_STOP,     conn_action_stop       },
1008	{ CONN_STATE_TX,        CONN_EVENT_STOP,     conn_action_stop       },
1009	{ CONN_STATE_REGERR,    CONN_EVENT_STOP,     conn_action_stop       },
1010	{ CONN_STATE_CONNERR,   CONN_EVENT_STOP,     conn_action_stop       },
1011
1012	{ CONN_STATE_STOPPED,   CONN_EVENT_CONN_REQ, conn_action_connreject },
1013        { CONN_STATE_STARTWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1014	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REQ, conn_action_connaccept },
1015	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REQ, conn_action_connreject },
1016	{ CONN_STATE_TX,        CONN_EVENT_CONN_REQ, conn_action_connreject },
1017
1018	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_ACK, conn_action_connack    },
1019	{ CONN_STATE_SETUPWAIT, CONN_EVENT_TIMER,    conn_action_conntimsev },
1020
1021	{ CONN_STATE_SETUPWAIT, CONN_EVENT_CONN_REJ, conn_action_connsever  },
1022	{ CONN_STATE_IDLE,      CONN_EVENT_CONN_REJ, conn_action_connsever  },
1023	{ CONN_STATE_TX,        CONN_EVENT_CONN_REJ, conn_action_connsever  },
1024
1025	{ CONN_STATE_IDLE,      CONN_EVENT_RX,       conn_action_rx         },
1026	{ CONN_STATE_TX,        CONN_EVENT_RX,       conn_action_rx         },
1027
1028	{ CONN_STATE_TX,        CONN_EVENT_TXDONE,   conn_action_txdone     },
1029	{ CONN_STATE_IDLE,      CONN_EVENT_TXDONE,   conn_action_txdone     },
1030};
1031
1032static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
1033
1034
1035/*
1036 * Actions for interface - statemachine.
1037 */
1038
1039/**
1040 * dev_action_start
1041 * @fi: An instance of an interface statemachine.
1042 * @event: The event, just happened.
1043 * @arg: Generic pointer, casted from struct net_device * upon call.
1044 *
1045 * Startup connection by sending CONN_EVENT_START to it.
1046 */
1047static void dev_action_start(fsm_instance *fi, int event, void *arg)
1048{
1049	struct net_device   *dev = arg;
1050	struct netiucv_priv *privptr = netdev_priv(dev);
1051
1052	IUCV_DBF_TEXT(trace, 3, __func__);
1053
1054	fsm_newstate(fi, DEV_STATE_STARTWAIT);
1055	fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
1056}
1057
1058/**
1059 * Shutdown connection by sending CONN_EVENT_STOP to it.
1060 *
1061 * @param fi    An instance of an interface statemachine.
1062 * @param event The event, just happened.
1063 * @param arg   Generic pointer, casted from struct net_device * upon call.
1064 */
1065static void
1066dev_action_stop(fsm_instance *fi, int event, void *arg)
1067{
1068	struct net_device   *dev = arg;
1069	struct netiucv_priv *privptr = netdev_priv(dev);
1070	struct iucv_event   ev;
1071
1072	IUCV_DBF_TEXT(trace, 3, __func__);
1073
1074	ev.conn = privptr->conn;
1075
1076	fsm_newstate(fi, DEV_STATE_STOPWAIT);
1077	fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
1078}
1079
1080/**
1081 * Called from connection statemachine
1082 * when a connection is up and running.
1083 *
1084 * @param fi    An instance of an interface statemachine.
1085 * @param event The event, just happened.
1086 * @param arg   Generic pointer, casted from struct net_device * upon call.
1087 */
1088static void
1089dev_action_connup(fsm_instance *fi, int event, void *arg)
1090{
1091	struct net_device   *dev = arg;
1092	struct netiucv_priv *privptr = netdev_priv(dev);
1093
1094	IUCV_DBF_TEXT(trace, 3, __func__);
1095
1096	switch (fsm_getstate(fi)) {
1097		case DEV_STATE_STARTWAIT:
1098			fsm_newstate(fi, DEV_STATE_RUNNING);
1099			dev_info(privptr->dev,
1100				"The IUCV device has been connected"
1101				" successfully to %s\n",
1102				netiucv_printuser(privptr->conn));
1103			IUCV_DBF_TEXT(setup, 3,
1104				"connection is up and running\n");
1105			break;
1106		case DEV_STATE_STOPWAIT:
1107			IUCV_DBF_TEXT(data, 2,
1108				"dev_action_connup: in DEV_STATE_STOPWAIT\n");
1109			break;
1110	}
1111}
1112
1113/**
1114 * Called from connection statemachine
1115 * when a connection has been shutdown.
1116 *
1117 * @param fi    An instance of an interface statemachine.
1118 * @param event The event, just happened.
1119 * @param arg   Generic pointer, casted from struct net_device * upon call.
1120 */
1121static void
1122dev_action_conndown(fsm_instance *fi, int event, void *arg)
1123{
1124	IUCV_DBF_TEXT(trace, 3, __func__);
1125
1126	switch (fsm_getstate(fi)) {
1127		case DEV_STATE_RUNNING:
1128			fsm_newstate(fi, DEV_STATE_STARTWAIT);
1129			break;
1130		case DEV_STATE_STOPWAIT:
1131			fsm_newstate(fi, DEV_STATE_STOPPED);
1132			IUCV_DBF_TEXT(setup, 3, "connection is down\n");
1133			break;
1134	}
1135}
1136
1137static const fsm_node dev_fsm[] = {
1138	{ DEV_STATE_STOPPED,    DEV_EVENT_START,   dev_action_start    },
1139
1140	{ DEV_STATE_STOPWAIT,   DEV_EVENT_START,   dev_action_start    },
1141	{ DEV_STATE_STOPWAIT,   DEV_EVENT_CONDOWN, dev_action_conndown },
1142
1143	{ DEV_STATE_STARTWAIT,  DEV_EVENT_STOP,    dev_action_stop     },
1144	{ DEV_STATE_STARTWAIT,  DEV_EVENT_CONUP,   dev_action_connup   },
1145
1146	{ DEV_STATE_RUNNING,    DEV_EVENT_STOP,    dev_action_stop     },
1147	{ DEV_STATE_RUNNING,    DEV_EVENT_CONDOWN, dev_action_conndown },
1148	{ DEV_STATE_RUNNING,    DEV_EVENT_CONUP,   netiucv_action_nop  },
1149};
1150
1151static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
1152
1153/**
1154 * Transmit a packet.
1155 * This is a helper function for netiucv_tx().
1156 *
1157 * @param conn Connection to be used for sending.
1158 * @param skb Pointer to struct sk_buff of packet to send.
1159 *            The linklevel header has already been set up
1160 *            by netiucv_tx().
1161 *
1162 * @return 0 on success, -ERRNO on failure. (Never fails.)
1163 */
1164static int netiucv_transmit_skb(struct iucv_connection *conn,
1165				struct sk_buff *skb)
1166{
1167	struct iucv_message msg;
1168	unsigned long saveflags;
1169	struct ll_header header;
1170	int rc;
1171
1172	if (fsm_getstate(conn->fsm) != CONN_STATE_IDLE) {
1173		int l = skb->len + NETIUCV_HDRLEN;
1174
1175		spin_lock_irqsave(&conn->collect_lock, saveflags);
1176		if (conn->collect_len + l >
1177		    (conn->max_buffsize - NETIUCV_HDRLEN)) {
1178			rc = -EBUSY;
1179			IUCV_DBF_TEXT(data, 2,
1180				      "EBUSY from netiucv_transmit_skb\n");
1181		} else {
1182			atomic_inc(&skb->users);
1183			skb_queue_tail(&conn->collect_queue, skb);
1184			conn->collect_len += l;
1185			rc = 0;
1186		}
1187		spin_unlock_irqrestore(&conn->collect_lock, saveflags);
1188	} else {
1189		struct sk_buff *nskb = skb;
1190		/**
1191		 * Copy the skb to a new allocated skb in lowmem only if the
1192		 * data is located above 2G in memory or tailroom is < 2.
1193		 */
1194		unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) +
1195				    NETIUCV_HDRLEN)) >> 31;
1196		int copied = 0;
1197		if (hi || (skb_tailroom(skb) < 2)) {
1198			nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
1199					 NETIUCV_HDRLEN, GFP_ATOMIC | GFP_DMA);
1200			if (!nskb) {
1201				IUCV_DBF_TEXT(data, 2, "alloc_skb failed\n");
1202				rc = -ENOMEM;
1203				return rc;
1204			} else {
1205				skb_reserve(nskb, NETIUCV_HDRLEN);
1206				memcpy(skb_put(nskb, skb->len),
1207				       skb->data, skb->len);
1208			}
1209			copied = 1;
1210		}
1211		/**
1212		 * skb now is below 2G and has enough room. Add headers.
1213		 */
1214		header.next = nskb->len + NETIUCV_HDRLEN;
1215		memcpy(skb_push(nskb, NETIUCV_HDRLEN), &header, NETIUCV_HDRLEN);
1216		header.next = 0;
1217		memcpy(skb_put(nskb, NETIUCV_HDRLEN), &header,  NETIUCV_HDRLEN);
1218
1219		fsm_newstate(conn->fsm, CONN_STATE_TX);
1220		conn->prof.send_stamp = jiffies;
1221
1222		msg.tag = 1;
1223		msg.class = 0;
1224		rc = iucv_message_send(conn->path, &msg, 0, 0,
1225				       nskb->data, nskb->len);
1226		conn->prof.doios_single++;
1227		conn->prof.txlen += skb->len;
1228		conn->prof.tx_pending++;
1229		if (conn->prof.tx_pending > conn->prof.tx_max_pending)
1230			conn->prof.tx_max_pending = conn->prof.tx_pending;
1231		if (rc) {
1232			struct netiucv_priv *privptr;
1233			fsm_newstate(conn->fsm, CONN_STATE_IDLE);
1234			conn->prof.tx_pending--;
1235			privptr = netdev_priv(conn->netdev);
1236			if (privptr)
1237				privptr->stats.tx_errors++;
1238			if (copied)
1239				dev_kfree_skb(nskb);
1240			else {
1241				/**
1242				 * Remove our headers. They get added
1243				 * again on retransmit.
1244				 */
1245				skb_pull(skb, NETIUCV_HDRLEN);
1246				skb_trim(skb, skb->len - NETIUCV_HDRLEN);
1247			}
1248			IUCV_DBF_TEXT_(data, 2, "rc %d from iucv_send\n", rc);
1249		} else {
1250			if (copied)
1251				dev_kfree_skb(skb);
1252			atomic_inc(&nskb->users);
1253			skb_queue_tail(&conn->commit_queue, nskb);
1254		}
1255	}
1256
1257	return rc;
1258}
1259
1260/*
1261 * Interface API for upper network layers
1262 */
1263
1264/**
1265 * Open an interface.
1266 * Called from generic network layer when ifconfig up is run.
1267 *
1268 * @param dev Pointer to interface struct.
1269 *
1270 * @return 0 on success, -ERRNO on failure. (Never fails.)
1271 */
1272static int netiucv_open(struct net_device *dev)
1273{
1274	struct netiucv_priv *priv = netdev_priv(dev);
1275
1276	fsm_event(priv->fsm, DEV_EVENT_START, dev);
1277	return 0;
1278}
1279
1280/**
1281 * Close an interface.
1282 * Called from generic network layer when ifconfig down is run.
1283 *
1284 * @param dev Pointer to interface struct.
1285 *
1286 * @return 0 on success, -ERRNO on failure. (Never fails.)
1287 */
1288static int netiucv_close(struct net_device *dev)
1289{
1290	struct netiucv_priv *priv = netdev_priv(dev);
1291
1292	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
1293	return 0;
1294}
1295
1296static int netiucv_pm_prepare(struct device *dev)
1297{
1298	IUCV_DBF_TEXT(trace, 3, __func__);
1299	return 0;
1300}
1301
1302static void netiucv_pm_complete(struct device *dev)
1303{
1304	IUCV_DBF_TEXT(trace, 3, __func__);
1305	return;
1306}
1307
1308/**
1309 * netiucv_pm_freeze() - Freeze PM callback
1310 * @dev:	netiucv device
1311 *
1312 * close open netiucv interfaces
1313 */
1314static int netiucv_pm_freeze(struct device *dev)
1315{
1316	struct netiucv_priv *priv = dev_get_drvdata(dev);
1317	struct net_device *ndev = NULL;
1318	int rc = 0;
1319
1320	IUCV_DBF_TEXT(trace, 3, __func__);
1321	if (priv && priv->conn)
1322		ndev = priv->conn->netdev;
1323	if (!ndev)
1324		goto out;
1325	netif_device_detach(ndev);
1326	priv->pm_state = fsm_getstate(priv->fsm);
1327	rc = netiucv_close(ndev);
1328out:
1329	return rc;
1330}
1331
1332/**
1333 * netiucv_pm_restore_thaw() - Thaw and restore PM callback
1334 * @dev:	netiucv device
1335 *
1336 * re-open netiucv interfaces closed during freeze
1337 */
1338static int netiucv_pm_restore_thaw(struct device *dev)
1339{
1340	struct netiucv_priv *priv = dev_get_drvdata(dev);
1341	struct net_device *ndev = NULL;
1342	int rc = 0;
1343
1344	IUCV_DBF_TEXT(trace, 3, __func__);
1345	if (priv && priv->conn)
1346		ndev = priv->conn->netdev;
1347	if (!ndev)
1348		goto out;
1349	switch (priv->pm_state) {
1350	case DEV_STATE_RUNNING:
1351	case DEV_STATE_STARTWAIT:
1352		rc = netiucv_open(ndev);
1353		break;
1354	default:
1355		break;
1356	}
1357	netif_device_attach(ndev);
1358out:
1359	return rc;
1360}
1361
1362/**
1363 * Start transmission of a packet.
1364 * Called from generic network device layer.
1365 *
1366 * @param skb Pointer to buffer containing the packet.
1367 * @param dev Pointer to interface struct.
1368 *
1369 * @return 0 if packet consumed, !0 if packet rejected.
1370 *         Note: If we return !0, then the packet is free'd by
1371 *               the generic network layer.
1372 */
1373static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
1374{
1375	struct netiucv_priv *privptr = netdev_priv(dev);
1376	int rc;
1377
1378	IUCV_DBF_TEXT(trace, 4, __func__);
1379	/**
1380	 * Some sanity checks ...
1381	 */
1382	if (skb == NULL) {
1383		IUCV_DBF_TEXT(data, 2, "netiucv_tx: skb is NULL\n");
1384		privptr->stats.tx_dropped++;
1385		return NETDEV_TX_OK;
1386	}
1387	if (skb_headroom(skb) < NETIUCV_HDRLEN) {
1388		IUCV_DBF_TEXT(data, 2,
1389			"netiucv_tx: skb_headroom < NETIUCV_HDRLEN\n");
1390		dev_kfree_skb(skb);
1391		privptr->stats.tx_dropped++;
1392		return NETDEV_TX_OK;
1393	}
1394
1395	/**
1396	 * If connection is not running, try to restart it
1397	 * and throw away packet.
1398	 */
1399	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
1400		dev_kfree_skb(skb);
1401		privptr->stats.tx_dropped++;
1402		privptr->stats.tx_errors++;
1403		privptr->stats.tx_carrier_errors++;
1404		return NETDEV_TX_OK;
1405	}
1406
1407	if (netiucv_test_and_set_busy(dev)) {
1408		IUCV_DBF_TEXT(data, 2, "EBUSY from netiucv_tx\n");
1409		return NETDEV_TX_BUSY;
1410	}
1411	dev->trans_start = jiffies;
1412	rc = netiucv_transmit_skb(privptr->conn, skb);
1413	netiucv_clear_busy(dev);
1414	return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
1415}
1416
1417/**
1418 * netiucv_stats
1419 * @dev: Pointer to interface struct.
1420 *
1421 * Returns interface statistics of a device.
1422 *
1423 * Returns pointer to stats struct of this interface.
1424 */
1425static struct net_device_stats *netiucv_stats (struct net_device * dev)
1426{
1427	struct netiucv_priv *priv = netdev_priv(dev);
1428
1429	IUCV_DBF_TEXT(trace, 5, __func__);
1430	return &priv->stats;
1431}
1432
1433/**
1434 * netiucv_change_mtu
1435 * @dev: Pointer to interface struct.
1436 * @new_mtu: The new MTU to use for this interface.
1437 *
1438 * Sets MTU of an interface.
1439 *
1440 * Returns 0 on success, -EINVAL if MTU is out of valid range.
1441 *         (valid range is 576 .. NETIUCV_MTU_MAX).
1442 */
1443static int netiucv_change_mtu(struct net_device * dev, int new_mtu)
1444{
1445	IUCV_DBF_TEXT(trace, 3, __func__);
1446	if (new_mtu < 576 || new_mtu > NETIUCV_MTU_MAX) {
1447		IUCV_DBF_TEXT(setup, 2, "given MTU out of valid range\n");
1448		return -EINVAL;
1449	}
1450	dev->mtu = new_mtu;
1451	return 0;
1452}
1453
1454/*
1455 * attributes in sysfs
1456 */
1457
1458static ssize_t user_show(struct device *dev, struct device_attribute *attr,
1459			 char *buf)
1460{
1461	struct netiucv_priv *priv = dev_get_drvdata(dev);
1462
1463	IUCV_DBF_TEXT(trace, 5, __func__);
1464	return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
1465}
1466
1467static int netiucv_check_user(const char *buf, size_t count, char *username,
1468			      char *userdata)
1469{
1470	const char *p;
1471	int i;
1472
1473	p = strchr(buf, '.');
1474	if ((p && ((count > 26) ||
1475		   ((p - buf) > 8) ||
1476		   (buf + count - p > 18))) ||
1477	    (!p && (count > 9))) {
1478		IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
1479		return -EINVAL;
1480	}
1481
1482	for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
1483		if (isalnum(*p) || *p == '$') {
1484			username[i] = toupper(*p);
1485			continue;
1486		}
1487		if (*p == '\n')
1488			/* trailing lf, grr */
1489			break;
1490		IUCV_DBF_TEXT_(setup, 2,
1491			       "conn_write: invalid character %02x\n", *p);
1492		return -EINVAL;
1493	}
1494	while (i < 8)
1495		username[i++] = ' ';
1496	username[8] = '\0';
1497
1498	if (*p == '.') {
1499		p++;
1500		for (i = 0; i < 16 && *p; i++, p++) {
1501			if (*p == '\n')
1502				break;
1503			userdata[i] = toupper(*p);
1504		}
1505		while (i > 0 && i < 16)
1506			userdata[i++] = ' ';
1507	} else
1508		memcpy(userdata, iucvMagic_ascii, 16);
1509	userdata[16] = '\0';
1510	ASCEBC(userdata, 16);
1511
1512	return 0;
1513}
1514
1515static ssize_t user_write(struct device *dev, struct device_attribute *attr,
1516			  const char *buf, size_t count)
1517{
1518	struct netiucv_priv *priv = dev_get_drvdata(dev);
1519	struct net_device *ndev = priv->conn->netdev;
1520	char	username[9];
1521	char	userdata[17];
1522	int	rc;
1523	struct iucv_connection *cp;
1524
1525	IUCV_DBF_TEXT(trace, 3, __func__);
1526	rc = netiucv_check_user(buf, count, username, userdata);
1527	if (rc)
1528		return rc;
1529
1530	if (memcmp(username, priv->conn->userid, 9) &&
1531	    (ndev->flags & (IFF_UP | IFF_RUNNING))) {
1532		/* username changed while the interface is active. */
1533		IUCV_DBF_TEXT(setup, 2, "user_write: device active\n");
1534		return -EPERM;
1535	}
1536	read_lock_bh(&iucv_connection_rwlock);
1537	list_for_each_entry(cp, &iucv_connection_list, list) {
1538		if (!strncmp(username, cp->userid, 9) &&
1539		   !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
1540			read_unlock_bh(&iucv_connection_rwlock);
1541			IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
1542				"already exists\n", netiucv_printuser(cp));
1543			return -EEXIST;
1544		}
1545	}
1546	read_unlock_bh(&iucv_connection_rwlock);
1547	memcpy(priv->conn->userid, username, 9);
1548	memcpy(priv->conn->userdata, userdata, 17);
1549	return count;
1550}
1551
1552static DEVICE_ATTR(user, 0644, user_show, user_write);
1553
1554static ssize_t buffer_show (struct device *dev, struct device_attribute *attr,
1555			    char *buf)
1556{
1557	struct netiucv_priv *priv = dev_get_drvdata(dev);
1558
1559	IUCV_DBF_TEXT(trace, 5, __func__);
1560	return sprintf(buf, "%d\n", priv->conn->max_buffsize);
1561}
1562
1563static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
1564			     const char *buf, size_t count)
1565{
1566	struct netiucv_priv *priv = dev_get_drvdata(dev);
1567	struct net_device *ndev = priv->conn->netdev;
1568	char         *e;
1569	int          bs1;
1570
1571	IUCV_DBF_TEXT(trace, 3, __func__);
1572	if (count >= 39)
1573		return -EINVAL;
1574
1575	bs1 = simple_strtoul(buf, &e, 0);
1576
1577	if (e && (!isspace(*e))) {
1578		IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
1579			*e);
1580		return -EINVAL;
1581	}
1582	if (bs1 > NETIUCV_BUFSIZE_MAX) {
1583		IUCV_DBF_TEXT_(setup, 2,
1584			"buffer_write: buffer size %d too large\n",
1585			bs1);
1586		return -EINVAL;
1587	}
1588	if ((ndev->flags & IFF_RUNNING) &&
1589	    (bs1 < (ndev->mtu + NETIUCV_HDRLEN + 2))) {
1590		IUCV_DBF_TEXT_(setup, 2,
1591			"buffer_write: buffer size %d too small\n",
1592			bs1);
1593		return -EINVAL;
1594	}
1595	if (bs1 < (576 + NETIUCV_HDRLEN + NETIUCV_HDRLEN)) {
1596		IUCV_DBF_TEXT_(setup, 2,
1597			"buffer_write: buffer size %d too small\n",
1598			bs1);
1599		return -EINVAL;
1600	}
1601
1602	priv->conn->max_buffsize = bs1;
1603	if (!(ndev->flags & IFF_RUNNING))
1604		ndev->mtu = bs1 - NETIUCV_HDRLEN - NETIUCV_HDRLEN;
1605
1606	return count;
1607
1608}
1609
1610static DEVICE_ATTR(buffer, 0644, buffer_show, buffer_write);
1611
1612static ssize_t dev_fsm_show (struct device *dev, struct device_attribute *attr,
1613			     char *buf)
1614{
1615	struct netiucv_priv *priv = dev_get_drvdata(dev);
1616
1617	IUCV_DBF_TEXT(trace, 5, __func__);
1618	return sprintf(buf, "%s\n", fsm_getstate_str(priv->fsm));
1619}
1620
1621static DEVICE_ATTR(device_fsm_state, 0444, dev_fsm_show, NULL);
1622
1623static ssize_t conn_fsm_show (struct device *dev,
1624			      struct device_attribute *attr, char *buf)
1625{
1626	struct netiucv_priv *priv = dev_get_drvdata(dev);
1627
1628	IUCV_DBF_TEXT(trace, 5, __func__);
1629	return sprintf(buf, "%s\n", fsm_getstate_str(priv->conn->fsm));
1630}
1631
1632static DEVICE_ATTR(connection_fsm_state, 0444, conn_fsm_show, NULL);
1633
1634static ssize_t maxmulti_show (struct device *dev,
1635			      struct device_attribute *attr, char *buf)
1636{
1637	struct netiucv_priv *priv = dev_get_drvdata(dev);
1638
1639	IUCV_DBF_TEXT(trace, 5, __func__);
1640	return sprintf(buf, "%ld\n", priv->conn->prof.maxmulti);
1641}
1642
1643static ssize_t maxmulti_write (struct device *dev,
1644			       struct device_attribute *attr,
1645			       const char *buf, size_t count)
1646{
1647	struct netiucv_priv *priv = dev_get_drvdata(dev);
1648
1649	IUCV_DBF_TEXT(trace, 4, __func__);
1650	priv->conn->prof.maxmulti = 0;
1651	return count;
1652}
1653
1654static DEVICE_ATTR(max_tx_buffer_used, 0644, maxmulti_show, maxmulti_write);
1655
1656static ssize_t maxcq_show (struct device *dev, struct device_attribute *attr,
1657			   char *buf)
1658{
1659	struct netiucv_priv *priv = dev_get_drvdata(dev);
1660
1661	IUCV_DBF_TEXT(trace, 5, __func__);
1662	return sprintf(buf, "%ld\n", priv->conn->prof.maxcqueue);
1663}
1664
1665static ssize_t maxcq_write (struct device *dev, struct device_attribute *attr,
1666			    const char *buf, size_t count)
1667{
1668	struct netiucv_priv *priv = dev_get_drvdata(dev);
1669
1670	IUCV_DBF_TEXT(trace, 4, __func__);
1671	priv->conn->prof.maxcqueue = 0;
1672	return count;
1673}
1674
1675static DEVICE_ATTR(max_chained_skbs, 0644, maxcq_show, maxcq_write);
1676
1677static ssize_t sdoio_show (struct device *dev, struct device_attribute *attr,
1678			   char *buf)
1679{
1680	struct netiucv_priv *priv = dev_get_drvdata(dev);
1681
1682	IUCV_DBF_TEXT(trace, 5, __func__);
1683	return sprintf(buf, "%ld\n", priv->conn->prof.doios_single);
1684}
1685
1686static ssize_t sdoio_write (struct device *dev, struct device_attribute *attr,
1687			    const char *buf, size_t count)
1688{
1689	struct netiucv_priv *priv = dev_get_drvdata(dev);
1690
1691	IUCV_DBF_TEXT(trace, 4, __func__);
1692	priv->conn->prof.doios_single = 0;
1693	return count;
1694}
1695
1696static DEVICE_ATTR(tx_single_write_ops, 0644, sdoio_show, sdoio_write);
1697
1698static ssize_t mdoio_show (struct device *dev, struct device_attribute *attr,
1699			   char *buf)
1700{
1701	struct netiucv_priv *priv = dev_get_drvdata(dev);
1702
1703	IUCV_DBF_TEXT(trace, 5, __func__);
1704	return sprintf(buf, "%ld\n", priv->conn->prof.doios_multi);
1705}
1706
1707static ssize_t mdoio_write (struct device *dev, struct device_attribute *attr,
1708			    const char *buf, size_t count)
1709{
1710	struct netiucv_priv *priv = dev_get_drvdata(dev);
1711
1712	IUCV_DBF_TEXT(trace, 5, __func__);
1713	priv->conn->prof.doios_multi = 0;
1714	return count;
1715}
1716
1717static DEVICE_ATTR(tx_multi_write_ops, 0644, mdoio_show, mdoio_write);
1718
1719static ssize_t txlen_show (struct device *dev, struct device_attribute *attr,
1720			   char *buf)
1721{
1722	struct netiucv_priv *priv = dev_get_drvdata(dev);
1723
1724	IUCV_DBF_TEXT(trace, 5, __func__);
1725	return sprintf(buf, "%ld\n", priv->conn->prof.txlen);
1726}
1727
1728static ssize_t txlen_write (struct device *dev, struct device_attribute *attr,
1729			    const char *buf, size_t count)
1730{
1731	struct netiucv_priv *priv = dev_get_drvdata(dev);
1732
1733	IUCV_DBF_TEXT(trace, 4, __func__);
1734	priv->conn->prof.txlen = 0;
1735	return count;
1736}
1737
1738static DEVICE_ATTR(netto_bytes, 0644, txlen_show, txlen_write);
1739
1740static ssize_t txtime_show (struct device *dev, struct device_attribute *attr,
1741			    char *buf)
1742{
1743	struct netiucv_priv *priv = dev_get_drvdata(dev);
1744
1745	IUCV_DBF_TEXT(trace, 5, __func__);
1746	return sprintf(buf, "%ld\n", priv->conn->prof.tx_time);
1747}
1748
1749static ssize_t txtime_write (struct device *dev, struct device_attribute *attr,
1750			     const char *buf, size_t count)
1751{
1752	struct netiucv_priv *priv = dev_get_drvdata(dev);
1753
1754	IUCV_DBF_TEXT(trace, 4, __func__);
1755	priv->conn->prof.tx_time = 0;
1756	return count;
1757}
1758
1759static DEVICE_ATTR(max_tx_io_time, 0644, txtime_show, txtime_write);
1760
1761static ssize_t txpend_show (struct device *dev, struct device_attribute *attr,
1762			    char *buf)
1763{
1764	struct netiucv_priv *priv = dev_get_drvdata(dev);
1765
1766	IUCV_DBF_TEXT(trace, 5, __func__);
1767	return sprintf(buf, "%ld\n", priv->conn->prof.tx_pending);
1768}
1769
1770static ssize_t txpend_write (struct device *dev, struct device_attribute *attr,
1771			     const char *buf, size_t count)
1772{
1773	struct netiucv_priv *priv = dev_get_drvdata(dev);
1774
1775	IUCV_DBF_TEXT(trace, 4, __func__);
1776	priv->conn->prof.tx_pending = 0;
1777	return count;
1778}
1779
1780static DEVICE_ATTR(tx_pending, 0644, txpend_show, txpend_write);
1781
1782static ssize_t txmpnd_show (struct device *dev, struct device_attribute *attr,
1783			    char *buf)
1784{
1785	struct netiucv_priv *priv = dev_get_drvdata(dev);
1786
1787	IUCV_DBF_TEXT(trace, 5, __func__);
1788	return sprintf(buf, "%ld\n", priv->conn->prof.tx_max_pending);
1789}
1790
1791static ssize_t txmpnd_write (struct device *dev, struct device_attribute *attr,
1792			     const char *buf, size_t count)
1793{
1794	struct netiucv_priv *priv = dev_get_drvdata(dev);
1795
1796	IUCV_DBF_TEXT(trace, 4, __func__);
1797	priv->conn->prof.tx_max_pending = 0;
1798	return count;
1799}
1800
1801static DEVICE_ATTR(tx_max_pending, 0644, txmpnd_show, txmpnd_write);
1802
1803static struct attribute *netiucv_attrs[] = {
1804	&dev_attr_buffer.attr,
1805	&dev_attr_user.attr,
1806	NULL,
1807};
1808
1809static struct attribute_group netiucv_attr_group = {
1810	.attrs = netiucv_attrs,
1811};
1812
1813static struct attribute *netiucv_stat_attrs[] = {
1814	&dev_attr_device_fsm_state.attr,
1815	&dev_attr_connection_fsm_state.attr,
1816	&dev_attr_max_tx_buffer_used.attr,
1817	&dev_attr_max_chained_skbs.attr,
1818	&dev_attr_tx_single_write_ops.attr,
1819	&dev_attr_tx_multi_write_ops.attr,
1820	&dev_attr_netto_bytes.attr,
1821	&dev_attr_max_tx_io_time.attr,
1822	&dev_attr_tx_pending.attr,
1823	&dev_attr_tx_max_pending.attr,
1824	NULL,
1825};
1826
1827static struct attribute_group netiucv_stat_attr_group = {
1828	.name  = "stats",
1829	.attrs = netiucv_stat_attrs,
1830};
1831
1832static const struct attribute_group *netiucv_attr_groups[] = {
1833	&netiucv_stat_attr_group,
1834	&netiucv_attr_group,
1835	NULL,
1836};
1837
1838static int netiucv_register_device(struct net_device *ndev)
1839{
1840	struct netiucv_priv *priv = netdev_priv(ndev);
1841	struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL);
1842	int ret;
1843
1844	IUCV_DBF_TEXT(trace, 3, __func__);
1845
1846	if (dev) {
1847		dev_set_name(dev, "net%s", ndev->name);
1848		dev->bus = &iucv_bus;
1849		dev->parent = iucv_root;
1850		dev->groups = netiucv_attr_groups;
1851		/*
1852		 * The release function could be called after the
1853		 * module has been unloaded. It's _only_ task is to
1854		 * free the struct. Therefore, we specify kfree()
1855		 * directly here. (Probably a little bit obfuscating
1856		 * but legitime ...).
1857		 */
1858		dev->release = (void (*)(struct device *))kfree;
1859		dev->driver = &netiucv_driver;
1860	} else
1861		return -ENOMEM;
1862
1863	ret = device_register(dev);
1864	if (ret) {
1865		put_device(dev);
1866		return ret;
1867	}
1868	priv->dev = dev;
1869	dev_set_drvdata(dev, priv);
1870	return 0;
1871}
1872
1873static void netiucv_unregister_device(struct device *dev)
1874{
1875	IUCV_DBF_TEXT(trace, 3, __func__);
1876	device_unregister(dev);
1877}
1878
1879/**
1880 * Allocate and initialize a new connection structure.
1881 * Add it to the list of netiucv connections;
1882 */
1883static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
1884						      char *username,
1885						      char *userdata)
1886{
1887	struct iucv_connection *conn;
1888
1889	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
1890	if (!conn)
1891		goto out;
1892	skb_queue_head_init(&conn->collect_queue);
1893	skb_queue_head_init(&conn->commit_queue);
1894	spin_lock_init(&conn->collect_lock);
1895	conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT;
1896	conn->netdev = dev;
1897
1898	conn->rx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1899	if (!conn->rx_buff)
1900		goto out_conn;
1901	conn->tx_buff = alloc_skb(conn->max_buffsize, GFP_KERNEL | GFP_DMA);
1902	if (!conn->tx_buff)
1903		goto out_rx;
1904	conn->fsm = init_fsm("netiucvconn", conn_state_names,
1905			     conn_event_names, NR_CONN_STATES,
1906			     NR_CONN_EVENTS, conn_fsm, CONN_FSM_LEN,
1907			     GFP_KERNEL);
1908	if (!conn->fsm)
1909		goto out_tx;
1910
1911	fsm_settimer(conn->fsm, &conn->timer);
1912	fsm_newstate(conn->fsm, CONN_STATE_INVALID);
1913
1914	if (userdata)
1915		memcpy(conn->userdata, userdata, 17);
1916	if (username) {
1917		memcpy(conn->userid, username, 9);
1918		fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
1919	}
1920
1921	write_lock_bh(&iucv_connection_rwlock);
1922	list_add_tail(&conn->list, &iucv_connection_list);
1923	write_unlock_bh(&iucv_connection_rwlock);
1924	return conn;
1925
1926out_tx:
1927	kfree_skb(conn->tx_buff);
1928out_rx:
1929	kfree_skb(conn->rx_buff);
1930out_conn:
1931	kfree(conn);
1932out:
1933	return NULL;
1934}
1935
1936/**
1937 * Release a connection structure and remove it from the
1938 * list of netiucv connections.
1939 */
1940static void netiucv_remove_connection(struct iucv_connection *conn)
1941{
1942
1943	IUCV_DBF_TEXT(trace, 3, __func__);
1944	write_lock_bh(&iucv_connection_rwlock);
1945	list_del_init(&conn->list);
1946	write_unlock_bh(&iucv_connection_rwlock);
1947	fsm_deltimer(&conn->timer);
1948	netiucv_purge_skb_queue(&conn->collect_queue);
1949	if (conn->path) {
1950		iucv_path_sever(conn->path, conn->userdata);
1951		kfree(conn->path);
1952		conn->path = NULL;
1953	}
1954	netiucv_purge_skb_queue(&conn->commit_queue);
1955	kfree_fsm(conn->fsm);
1956	kfree_skb(conn->rx_buff);
1957	kfree_skb(conn->tx_buff);
1958}
1959
1960/**
1961 * Release everything of a net device.
1962 */
1963static void netiucv_free_netdevice(struct net_device *dev)
1964{
1965	struct netiucv_priv *privptr = netdev_priv(dev);
1966
1967	IUCV_DBF_TEXT(trace, 3, __func__);
1968
1969	if (!dev)
1970		return;
1971
1972	if (privptr) {
1973		if (privptr->conn)
1974			netiucv_remove_connection(privptr->conn);
1975		if (privptr->fsm)
1976			kfree_fsm(privptr->fsm);
1977		privptr->conn = NULL; privptr->fsm = NULL;
1978		/* privptr gets freed by free_netdev() */
1979	}
1980	free_netdev(dev);
1981}
1982
1983/**
1984 * Initialize a net device. (Called from kernel in alloc_netdev())
1985 */
1986static const struct net_device_ops netiucv_netdev_ops = {
1987	.ndo_open		= netiucv_open,
1988	.ndo_stop		= netiucv_close,
1989	.ndo_get_stats		= netiucv_stats,
1990	.ndo_start_xmit		= netiucv_tx,
1991	.ndo_change_mtu	   	= netiucv_change_mtu,
1992};
1993
1994static void netiucv_setup_netdevice(struct net_device *dev)
1995{
1996	dev->mtu	         = NETIUCV_MTU_DEFAULT;
1997	dev->destructor          = netiucv_free_netdevice;
1998	dev->hard_header_len     = NETIUCV_HDRLEN;
1999	dev->addr_len            = 0;
2000	dev->type                = ARPHRD_SLIP;
2001	dev->tx_queue_len        = NETIUCV_QUEUELEN_DEFAULT;
2002	dev->flags	         = IFF_POINTOPOINT | IFF_NOARP;
2003	dev->netdev_ops		 = &netiucv_netdev_ops;
2004}
2005
2006/**
2007 * Allocate and initialize everything of a net device.
2008 */
2009static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
2010{
2011	struct netiucv_priv *privptr;
2012	struct net_device *dev;
2013
2014	dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
2015			   NET_NAME_UNKNOWN, netiucv_setup_netdevice);
2016	if (!dev)
2017		return NULL;
2018	rtnl_lock();
2019	if (dev_alloc_name(dev, dev->name) < 0)
2020		goto out_netdev;
2021
2022	privptr = netdev_priv(dev);
2023	privptr->fsm = init_fsm("netiucvdev", dev_state_names,
2024				dev_event_names, NR_DEV_STATES, NR_DEV_EVENTS,
2025				dev_fsm, DEV_FSM_LEN, GFP_KERNEL);
2026	if (!privptr->fsm)
2027		goto out_netdev;
2028
2029	privptr->conn = netiucv_new_connection(dev, username, userdata);
2030	if (!privptr->conn) {
2031		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
2032		goto out_fsm;
2033	}
2034	fsm_newstate(privptr->fsm, DEV_STATE_STOPPED);
2035	return dev;
2036
2037out_fsm:
2038	kfree_fsm(privptr->fsm);
2039out_netdev:
2040	rtnl_unlock();
2041	free_netdev(dev);
2042	return NULL;
2043}
2044
2045static ssize_t conn_write(struct device_driver *drv,
2046			  const char *buf, size_t count)
2047{
2048	char username[9];
2049	char userdata[17];
2050	int rc;
2051	struct net_device *dev;
2052	struct netiucv_priv *priv;
2053	struct iucv_connection *cp;
2054
2055	IUCV_DBF_TEXT(trace, 3, __func__);
2056	rc = netiucv_check_user(buf, count, username, userdata);
2057	if (rc)
2058		return rc;
2059
2060	read_lock_bh(&iucv_connection_rwlock);
2061	list_for_each_entry(cp, &iucv_connection_list, list) {
2062		if (!strncmp(username, cp->userid, 9) &&
2063		    !strncmp(userdata, cp->userdata, 17)) {
2064			read_unlock_bh(&iucv_connection_rwlock);
2065			IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
2066				"already exists\n", netiucv_printuser(cp));
2067			return -EEXIST;
2068		}
2069	}
2070	read_unlock_bh(&iucv_connection_rwlock);
2071
2072	dev = netiucv_init_netdevice(username, userdata);
2073	if (!dev) {
2074		IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
2075		return -ENODEV;
2076	}
2077
2078	rc = netiucv_register_device(dev);
2079	if (rc) {
2080		rtnl_unlock();
2081		IUCV_DBF_TEXT_(setup, 2,
2082			"ret %d from netiucv_register_device\n", rc);
2083		goto out_free_ndev;
2084	}
2085
2086	/* sysfs magic */
2087	priv = netdev_priv(dev);
2088	SET_NETDEV_DEV(dev, priv->dev);
2089
2090	rc = register_netdevice(dev);
2091	rtnl_unlock();
2092	if (rc)
2093		goto out_unreg;
2094
2095	dev_info(priv->dev, "The IUCV interface to %s has been established "
2096			    "successfully\n",
2097		netiucv_printuser(priv->conn));
2098
2099	return count;
2100
2101out_unreg:
2102	netiucv_unregister_device(priv->dev);
2103out_free_ndev:
2104	netiucv_free_netdevice(dev);
2105	return rc;
2106}
2107
2108static DRIVER_ATTR(connection, 0200, NULL, conn_write);
2109
2110static ssize_t remove_write (struct device_driver *drv,
2111			     const char *buf, size_t count)
2112{
2113	struct iucv_connection *cp;
2114        struct net_device *ndev;
2115        struct netiucv_priv *priv;
2116        struct device *dev;
2117        char name[IFNAMSIZ];
2118	const char *p;
2119        int i;
2120
2121	IUCV_DBF_TEXT(trace, 3, __func__);
2122
2123        if (count >= IFNAMSIZ)
2124                count = IFNAMSIZ - 1;
2125
2126	for (i = 0, p = buf; i < count && *p; i++, p++) {
2127		if (*p == '\n' || *p == ' ')
2128                        /* trailing lf, grr */
2129                        break;
2130		name[i] = *p;
2131        }
2132        name[i] = '\0';
2133
2134	read_lock_bh(&iucv_connection_rwlock);
2135	list_for_each_entry(cp, &iucv_connection_list, list) {
2136		ndev = cp->netdev;
2137		priv = netdev_priv(ndev);
2138                dev = priv->dev;
2139		if (strncmp(name, ndev->name, count))
2140			continue;
2141		read_unlock_bh(&iucv_connection_rwlock);
2142                if (ndev->flags & (IFF_UP | IFF_RUNNING)) {
2143			dev_warn(dev, "The IUCV device is connected"
2144				" to %s and cannot be removed\n",
2145				priv->conn->userid);
2146			IUCV_DBF_TEXT(data, 2, "remove_write: still active\n");
2147			return -EPERM;
2148                }
2149                unregister_netdev(ndev);
2150                netiucv_unregister_device(dev);
2151                return count;
2152        }
2153	read_unlock_bh(&iucv_connection_rwlock);
2154	IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n");
2155        return -EINVAL;
2156}
2157
2158static DRIVER_ATTR(remove, 0200, NULL, remove_write);
2159
2160static struct attribute * netiucv_drv_attrs[] = {
2161	&driver_attr_connection.attr,
2162	&driver_attr_remove.attr,
2163	NULL,
2164};
2165
2166static struct attribute_group netiucv_drv_attr_group = {
2167	.attrs = netiucv_drv_attrs,
2168};
2169
2170static const struct attribute_group *netiucv_drv_attr_groups[] = {
2171	&netiucv_drv_attr_group,
2172	NULL,
2173};
2174
2175static void netiucv_banner(void)
2176{
2177	pr_info("driver initialized\n");
2178}
2179
2180static void __exit netiucv_exit(void)
2181{
2182	struct iucv_connection *cp;
2183	struct net_device *ndev;
2184	struct netiucv_priv *priv;
2185	struct device *dev;
2186
2187	IUCV_DBF_TEXT(trace, 3, __func__);
2188	while (!list_empty(&iucv_connection_list)) {
2189		cp = list_entry(iucv_connection_list.next,
2190				struct iucv_connection, list);
2191		ndev = cp->netdev;
2192		priv = netdev_priv(ndev);
2193		dev = priv->dev;
2194
2195		unregister_netdev(ndev);
2196		netiucv_unregister_device(dev);
2197	}
2198
2199	device_unregister(netiucv_dev);
2200	driver_unregister(&netiucv_driver);
2201	iucv_unregister(&netiucv_handler, 1);
2202	iucv_unregister_dbf_views();
2203
2204	pr_info("driver unloaded\n");
2205	return;
2206}
2207
2208static int __init netiucv_init(void)
2209{
2210	int rc;
2211
2212	rc = iucv_register_dbf_views();
2213	if (rc)
2214		goto out;
2215	rc = iucv_register(&netiucv_handler, 1);
2216	if (rc)
2217		goto out_dbf;
2218	IUCV_DBF_TEXT(trace, 3, __func__);
2219	netiucv_driver.groups = netiucv_drv_attr_groups;
2220	rc = driver_register(&netiucv_driver);
2221	if (rc) {
2222		IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc);
2223		goto out_iucv;
2224	}
2225	/* establish dummy device */
2226	netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL);
2227	if (!netiucv_dev) {
2228		rc = -ENOMEM;
2229		goto out_driver;
2230	}
2231	dev_set_name(netiucv_dev, "netiucv");
2232	netiucv_dev->bus = &iucv_bus;
2233	netiucv_dev->parent = iucv_root;
2234	netiucv_dev->release = (void (*)(struct device *))kfree;
2235	netiucv_dev->driver = &netiucv_driver;
2236	rc = device_register(netiucv_dev);
2237	if (rc) {
2238		put_device(netiucv_dev);
2239		goto out_driver;
2240	}
2241	netiucv_banner();
2242	return rc;
2243
2244out_driver:
2245	driver_unregister(&netiucv_driver);
2246out_iucv:
2247	iucv_unregister(&netiucv_handler, 1);
2248out_dbf:
2249	iucv_unregister_dbf_views();
2250out:
2251	return rc;
2252}
2253
2254module_init(netiucv_init);
2255module_exit(netiucv_exit);
2256MODULE_LICENSE("GPL");
2257