1/* 82596.c: A generic 82596 ethernet driver for linux. */
2/*
3   Based on Apricot.c
4   Written 1994 by Mark Evans.
5   This driver is for the Apricot 82596 bus-master interface
6
7   Modularised 12/94 Mark Evans
8
9
10   Modified to support the 82596 ethernet chips on 680x0 VME boards.
11   by Richard Hirst <richard@sleepie.demon.co.uk>
12   Renamed to be 82596.c
13
14   980825:  Changed to receive directly in to sk_buffs which are
15   allocated at open() time.  Eliminates copy on incoming frames
16   (small ones are still copied).  Shared data now held in a
17   non-cached page, so we can run on 68060 in copyback mode.
18
19   TBD:
20   * look at deferring rx frames rather than discarding (as per tulip)
21   * handle tx ring full as per tulip
22   * performance test to tune rx_copybreak
23
24   Most of my modifications relate to the braindead big-endian
25   implementation by Intel.  When the i596 is operating in
26   'big-endian' mode, it thinks a 32 bit value of 0x12345678
27   should be stored as 0x56781234.  This is a real pain, when
28   you have linked lists which are shared by the 680x0 and the
29   i596.
30
31   Driver skeleton
32   Written 1993 by Donald Becker.
33   Copyright 1993 United States Government as represented by the Director,
34   National Security Agency. This software may only be used and distributed
35   according to the terms of the GNU General Public License as modified by SRC,
36   incorporated herein by reference.
37
38   The author may be reached as becker@scyld.com, or C/O
39   Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
40
41 */
42
43#include <linux/module.h>
44#include <linux/kernel.h>
45#include <linux/string.h>
46#include <linux/errno.h>
47#include <linux/ioport.h>
48#include <linux/interrupt.h>
49#include <linux/delay.h>
50#include <linux/netdevice.h>
51#include <linux/etherdevice.h>
52#include <linux/skbuff.h>
53#include <linux/init.h>
54#include <linux/bitops.h>
55#include <linux/gfp.h>
56
57#include <asm/io.h>
58#include <asm/dma.h>
59#include <asm/pgtable.h>
60#include <asm/cacheflush.h>
61
62static char version[] __initdata =
63	"82596.c $Revision: 1.5 $\n";
64
65#define DRV_NAME	"82596"
66
67/* DEBUG flags
68 */
69
70#define DEB_INIT	0x0001
71#define DEB_PROBE	0x0002
72#define DEB_SERIOUS	0x0004
73#define DEB_ERRORS	0x0008
74#define DEB_MULTI	0x0010
75#define DEB_TDR		0x0020
76#define DEB_OPEN	0x0040
77#define DEB_RESET	0x0080
78#define DEB_ADDCMD	0x0100
79#define DEB_STATUS	0x0200
80#define DEB_STARTTX	0x0400
81#define DEB_RXADDR	0x0800
82#define DEB_TXADDR	0x1000
83#define DEB_RXFRAME	0x2000
84#define DEB_INTS	0x4000
85#define DEB_STRUCT	0x8000
86#define DEB_ANY		0xffff
87
88
89#define DEB(x,y)	if (i596_debug & (x)) y
90
91
92#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE)
93#define ENABLE_MVME16x_NET
94#endif
95#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE)
96#define ENABLE_BVME6000_NET
97#endif
98
99#ifdef ENABLE_MVME16x_NET
100#include <asm/mvme16xhw.h>
101#endif
102#ifdef ENABLE_BVME6000_NET
103#include <asm/bvme6000hw.h>
104#endif
105
106/*
107 * Define various macros for Channel Attention, word swapping etc., dependent
108 * on architecture.  MVME and BVME are 680x0 based, otherwise it is Intel.
109 */
110
111#ifdef __mc68000__
112#define WSWAPrfd(x)  ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
113#define WSWAPrbd(x)  ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
114#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16)))
115#define WSWAPscb(x)  ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
116#define WSWAPcmd(x)  ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
117#define WSWAPtbd(x)  ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16)))
118#define WSWAPchar(x) ((char *)            (((u32)(x)<<16) | ((((u32)(x)))>>16)))
119#define ISCP_BUSY	0x00010000
120#else
121#error 82596.c: unknown architecture
122#endif
123
124/*
125 * These were the intel versions, left here for reference. There
126 * are currently no x86 users of this legacy i82596 chip.
127 */
128#if 0
129#define WSWAPrfd(x)     ((struct i596_rfd *)((long)x))
130#define WSWAPrbd(x)     ((struct i596_rbd *)((long)x))
131#define WSWAPiscp(x)    ((struct i596_iscp *)((long)x))
132#define WSWAPscb(x)     ((struct i596_scb *)((long)x))
133#define WSWAPcmd(x)     ((struct i596_cmd *)((long)x))
134#define WSWAPtbd(x)     ((struct i596_tbd *)((long)x))
135#define WSWAPchar(x)    ((char *)((long)x))
136#define ISCP_BUSY	0x0001
137#endif
138
139/*
140 * The MPU_PORT command allows direct access to the 82596. With PORT access
141 * the following commands are available (p5-18). The 32-bit port command
142 * must be word-swapped with the most significant word written first.
143 * This only applies to VME boards.
144 */
145#define PORT_RESET		0x00	/* reset 82596 */
146#define PORT_SELFTEST		0x01	/* selftest */
147#define PORT_ALTSCP		0x02	/* alternate SCB address */
148#define PORT_ALTDUMP		0x03	/* Alternate DUMP address */
149
150static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
151
152MODULE_AUTHOR("Richard Hirst");
153MODULE_DESCRIPTION("i82596 driver");
154MODULE_LICENSE("GPL");
155
156module_param(i596_debug, int, 0);
157MODULE_PARM_DESC(i596_debug, "i82596 debug mask");
158
159
160/* Copy frames shorter than rx_copybreak, otherwise pass on up in
161 * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
162 */
163static int rx_copybreak = 100;
164
165#define PKT_BUF_SZ	1536
166#define MAX_MC_CNT	64
167
168#define I596_TOTAL_SIZE 17
169
170#define I596_NULL ((void *)0xffffffff)
171
172#define CMD_EOL		0x8000	/* The last command of the list, stop. */
173#define CMD_SUSP	0x4000	/* Suspend after doing cmd. */
174#define CMD_INTR	0x2000	/* Interrupt after doing cmd. */
175
176#define CMD_FLEX	0x0008	/* Enable flexible memory model */
177
178enum commands {
179	CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
180	CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
181};
182
183#define STAT_C		0x8000	/* Set to 0 after execution */
184#define STAT_B		0x4000	/* Command being executed */
185#define STAT_OK		0x2000	/* Command executed ok */
186#define STAT_A		0x1000	/* Command aborted */
187
188#define	 CUC_START	0x0100
189#define	 CUC_RESUME	0x0200
190#define	 CUC_SUSPEND    0x0300
191#define	 CUC_ABORT	0x0400
192#define	 RX_START	0x0010
193#define	 RX_RESUME	0x0020
194#define	 RX_SUSPEND	0x0030
195#define	 RX_ABORT	0x0040
196
197#define TX_TIMEOUT	(HZ/20)
198
199
200struct i596_reg {
201	unsigned short porthi;
202	unsigned short portlo;
203	unsigned long ca;
204};
205
206#define EOF		0x8000
207#define SIZE_MASK	0x3fff
208
209struct i596_tbd {
210	unsigned short size;
211	unsigned short pad;
212	struct i596_tbd *next;
213	char *data;
214};
215
216/* The command structure has two 'next' pointers; v_next is the address of
217 * the next command as seen by the CPU, b_next is the address of the next
218 * command as seen by the 82596.  The b_next pointer, as used by the 82596
219 * always references the status field of the next command, rather than the
220 * v_next field, because the 82596 is unaware of v_next.  It may seem more
221 * logical to put v_next at the end of the structure, but we cannot do that
222 * because the 82596 expects other fields to be there, depending on command
223 * type.
224 */
225
226struct i596_cmd {
227	struct i596_cmd *v_next;	/* Address from CPUs viewpoint */
228	unsigned short status;
229	unsigned short command;
230	struct i596_cmd *b_next;	/* Address from i596 viewpoint */
231};
232
233struct tx_cmd {
234	struct i596_cmd cmd;
235	struct i596_tbd *tbd;
236	unsigned short size;
237	unsigned short pad;
238	struct sk_buff *skb;	/* So we can free it after tx */
239};
240
241struct tdr_cmd {
242	struct i596_cmd cmd;
243	unsigned short status;
244	unsigned short pad;
245};
246
247struct mc_cmd {
248	struct i596_cmd cmd;
249	short mc_cnt;
250	char mc_addrs[MAX_MC_CNT*6];
251};
252
253struct sa_cmd {
254	struct i596_cmd cmd;
255	char eth_addr[8];
256};
257
258struct cf_cmd {
259	struct i596_cmd cmd;
260	char i596_config[16];
261};
262
263struct i596_rfd {
264	unsigned short stat;
265	unsigned short cmd;
266	struct i596_rfd *b_next;	/* Address from i596 viewpoint */
267	struct i596_rbd *rbd;
268	unsigned short count;
269	unsigned short size;
270	struct i596_rfd *v_next;	/* Address from CPUs viewpoint */
271	struct i596_rfd *v_prev;
272};
273
274struct i596_rbd {
275    unsigned short count;
276    unsigned short zero1;
277    struct i596_rbd *b_next;
278    unsigned char *b_data;		/* Address from i596 viewpoint */
279    unsigned short size;
280    unsigned short zero2;
281    struct sk_buff *skb;
282    struct i596_rbd *v_next;
283    struct i596_rbd *b_addr;		/* This rbd addr from i596 view */
284    unsigned char *v_data;		/* Address from CPUs viewpoint */
285};
286
287#define TX_RING_SIZE 64
288#define RX_RING_SIZE 16
289
290struct i596_scb {
291	unsigned short status;
292	unsigned short command;
293	struct i596_cmd *cmd;
294	struct i596_rfd *rfd;
295	unsigned long crc_err;
296	unsigned long align_err;
297	unsigned long resource_err;
298	unsigned long over_err;
299	unsigned long rcvdt_err;
300	unsigned long short_err;
301	unsigned short t_on;
302	unsigned short t_off;
303};
304
305struct i596_iscp {
306	unsigned long stat;
307	struct i596_scb *scb;
308};
309
310struct i596_scp {
311	unsigned long sysbus;
312	unsigned long pad;
313	struct i596_iscp *iscp;
314};
315
316struct i596_private {
317	volatile struct i596_scp scp;
318	volatile struct i596_iscp iscp;
319	volatile struct i596_scb scb;
320	struct sa_cmd sa_cmd;
321	struct cf_cmd cf_cmd;
322	struct tdr_cmd tdr_cmd;
323	struct mc_cmd mc_cmd;
324	unsigned long stat;
325	int last_restart __attribute__((aligned(4)));
326	struct i596_rfd *rfd_head;
327	struct i596_rbd *rbd_head;
328	struct i596_cmd *cmd_tail;
329	struct i596_cmd *cmd_head;
330	int cmd_backlog;
331	unsigned long last_cmd;
332	struct i596_rfd rfds[RX_RING_SIZE];
333	struct i596_rbd rbds[RX_RING_SIZE];
334	struct tx_cmd tx_cmds[TX_RING_SIZE];
335	struct i596_tbd tbds[TX_RING_SIZE];
336	int next_tx_cmd;
337	spinlock_t lock;
338};
339
340static char init_setup[] =
341{
342	0x8E,			/* length, prefetch on */
343	0xC8,			/* fifo to 8, monitor off */
344#ifdef CONFIG_VME
345	0xc0,			/* don't save bad frames */
346#else
347	0x80,			/* don't save bad frames */
348#endif
349	0x2E,			/* No source address insertion, 8 byte preamble */
350	0x00,			/* priority and backoff defaults */
351	0x60,			/* interframe spacing */
352	0x00,			/* slot time LSB */
353	0xf2,			/* slot time and retries */
354	0x00,			/* promiscuous mode */
355	0x00,			/* collision detect */
356	0x40,			/* minimum frame length */
357	0xff,
358	0x00,
359	0x7f /*  *multi IA */ };
360
361static int i596_open(struct net_device *dev);
362static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
363static irqreturn_t i596_interrupt(int irq, void *dev_id);
364static int i596_close(struct net_device *dev);
365static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
366static void i596_tx_timeout (struct net_device *dev);
367static void print_eth(unsigned char *buf, char *str);
368static void set_multicast_list(struct net_device *dev);
369
370static int rx_ring_size = RX_RING_SIZE;
371static int ticks_limit = 25;
372static int max_cmd_backlog = TX_RING_SIZE-1;
373
374
375static inline void CA(struct net_device *dev)
376{
377#ifdef ENABLE_MVME16x_NET
378	if (MACH_IS_MVME16x) {
379		((struct i596_reg *) dev->base_addr)->ca = 1;
380	}
381#endif
382#ifdef ENABLE_BVME6000_NET
383	if (MACH_IS_BVME6000) {
384		volatile u32 i;
385
386		i = *(volatile u32 *) (dev->base_addr);
387	}
388#endif
389}
390
391
392static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x)
393{
394#ifdef ENABLE_MVME16x_NET
395	if (MACH_IS_MVME16x) {
396		struct i596_reg *p = (struct i596_reg *) (dev->base_addr);
397		p->porthi = ((c) | (u32) (x)) & 0xffff;
398		p->portlo = ((c) | (u32) (x)) >> 16;
399	}
400#endif
401#ifdef ENABLE_BVME6000_NET
402	if (MACH_IS_BVME6000) {
403		u32 v = (u32) (c) | (u32) (x);
404		v = ((u32) (v) << 16) | ((u32) (v) >> 16);
405		*(volatile u32 *) dev->base_addr = v;
406		udelay(1);
407		*(volatile u32 *) dev->base_addr = v;
408	}
409#endif
410}
411
412
413static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
414{
415	while (--delcnt && lp->iscp.stat)
416		udelay(10);
417	if (!delcnt) {
418		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
419		     dev->name, str, lp->scb.status, lp->scb.command);
420		return -1;
421	}
422	else
423		return 0;
424}
425
426
427static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
428{
429	while (--delcnt && lp->scb.command)
430		udelay(10);
431	if (!delcnt) {
432		printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
433		     dev->name, str, lp->scb.status, lp->scb.command);
434		return -1;
435	}
436	else
437		return 0;
438}
439
440
441static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str)
442{
443	volatile struct i596_cmd *c = cmd;
444
445	while (--delcnt && c->command)
446		udelay(10);
447	if (!delcnt) {
448		printk(KERN_ERR "%s: %s.\n", dev->name, str);
449		return -1;
450	}
451	else
452		return 0;
453}
454
455
456static void i596_display_data(struct net_device *dev)
457{
458	struct i596_private *lp = dev->ml_priv;
459	struct i596_cmd *cmd;
460	struct i596_rfd *rfd;
461	struct i596_rbd *rbd;
462
463	printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n",
464	       &lp->scp, lp->scp.sysbus, lp->scp.iscp);
465	printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n",
466	       &lp->iscp, lp->iscp.stat, lp->iscp.scb);
467	printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x,"
468		" .cmd = %p, .rfd = %p\n",
469	       &lp->scb, lp->scb.status, lp->scb.command,
470		lp->scb.cmd, lp->scb.rfd);
471	printk(KERN_ERR "   errors: crc %lx, align %lx, resource %lx,"
472               " over %lx, rcvdt %lx, short %lx\n",
473		lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
474		lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
475	cmd = lp->cmd_head;
476	while (cmd != I596_NULL) {
477		printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n",
478		  cmd, cmd->status, cmd->command, cmd->b_next);
479		cmd = cmd->v_next;
480	}
481	rfd = lp->rfd_head;
482	printk(KERN_ERR "rfd_head = %p\n", rfd);
483	do {
484		printk(KERN_ERR "   %p .stat %04x, .cmd %04x, b_next %p, rbd %p,"
485                        " count %04x\n",
486			rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
487			rfd->count);
488		rfd = rfd->v_next;
489	} while (rfd != lp->rfd_head);
490	rbd = lp->rbd_head;
491	printk(KERN_ERR "rbd_head = %p\n", rbd);
492	do {
493		printk(KERN_ERR "   %p .count %04x, b_next %p, b_data %p, size %04x\n",
494			rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
495		rbd = rbd->v_next;
496	} while (rbd != lp->rbd_head);
497}
498
499
500#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
501static irqreturn_t i596_error(int irq, void *dev_id)
502{
503	struct net_device *dev = dev_id;
504#ifdef ENABLE_MVME16x_NET
505	if (MACH_IS_MVME16x) {
506		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
507
508		pcc2[0x28] = 1;
509		pcc2[0x2b] = 0x1d;
510	}
511#endif
512#ifdef ENABLE_BVME6000_NET
513	if (MACH_IS_BVME6000) {
514		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
515
516		*ethirq = 1;
517		*ethirq = 3;
518	}
519#endif
520	printk(KERN_ERR "%s: Error interrupt\n", dev->name);
521	i596_display_data(dev);
522	return IRQ_HANDLED;
523}
524#endif
525
526static inline void remove_rx_bufs(struct net_device *dev)
527{
528	struct i596_private *lp = dev->ml_priv;
529	struct i596_rbd *rbd;
530	int i;
531
532	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
533		if (rbd->skb == NULL)
534			break;
535		dev_kfree_skb(rbd->skb);
536		rbd->skb = NULL;
537	}
538}
539
540static inline int init_rx_bufs(struct net_device *dev)
541{
542	struct i596_private *lp = dev->ml_priv;
543	int i;
544	struct i596_rfd *rfd;
545	struct i596_rbd *rbd;
546
547	/* First build the Receive Buffer Descriptor List */
548
549	for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
550		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
551
552		if (skb == NULL) {
553			remove_rx_bufs(dev);
554			return -ENOMEM;
555		}
556
557		rbd->v_next = rbd+1;
558		rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
559		rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
560		rbd->skb = skb;
561		rbd->v_data = skb->data;
562		rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
563		rbd->size = PKT_BUF_SZ;
564#ifdef __mc68000__
565		cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
566#endif
567	}
568	lp->rbd_head = lp->rbds;
569	rbd = lp->rbds + rx_ring_size - 1;
570	rbd->v_next = lp->rbds;
571	rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds));
572
573	/* Now build the Receive Frame Descriptor List */
574
575	for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
576		rfd->rbd = I596_NULL;
577		rfd->v_next = rfd+1;
578		rfd->v_prev = rfd-1;
579		rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1));
580		rfd->cmd = CMD_FLEX;
581	}
582	lp->rfd_head = lp->rfds;
583	lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
584	rfd = lp->rfds;
585	rfd->rbd = lp->rbd_head;
586	rfd->v_prev = lp->rfds + rx_ring_size - 1;
587	rfd = lp->rfds + rx_ring_size - 1;
588	rfd->v_next = lp->rfds;
589	rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds));
590	rfd->cmd = CMD_EOL|CMD_FLEX;
591
592	return 0;
593}
594
595
596static void rebuild_rx_bufs(struct net_device *dev)
597{
598	struct i596_private *lp = dev->ml_priv;
599	int i;
600
601	/* Ensure rx frame/buffer descriptors are tidy */
602
603	for (i = 0; i < rx_ring_size; i++) {
604		lp->rfds[i].rbd = I596_NULL;
605		lp->rfds[i].cmd = CMD_FLEX;
606	}
607	lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
608	lp->rfd_head = lp->rfds;
609	lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds));
610	lp->rbd_head = lp->rbds;
611	lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds));
612}
613
614
615static int init_i596_mem(struct net_device *dev)
616{
617	struct i596_private *lp = dev->ml_priv;
618	unsigned long flags;
619
620	MPU_PORT(dev, PORT_RESET, NULL);
621
622	udelay(100);		/* Wait 100us - seems to help */
623
624#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
625#ifdef ENABLE_MVME16x_NET
626	if (MACH_IS_MVME16x) {
627		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
628
629		/* Disable all ints for now */
630		pcc2[0x28] = 1;
631		pcc2[0x2a] = 0x48;
632		/* Following disables snooping.  Snooping is not required
633		 * as we make appropriate use of non-cached pages for
634		 * shared data, and cache_push/cache_clear.
635		 */
636		pcc2[0x2b] = 0x08;
637	}
638#endif
639#ifdef ENABLE_BVME6000_NET
640	if (MACH_IS_BVME6000) {
641		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
642
643		*ethirq = 1;
644	}
645#endif
646
647	/* change the scp address */
648
649	MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp));
650
651#endif
652
653	lp->last_cmd = jiffies;
654
655#ifdef ENABLE_MVME16x_NET
656	if (MACH_IS_MVME16x)
657		lp->scp.sysbus = 0x00000054;
658#endif
659#ifdef ENABLE_BVME6000_NET
660	if (MACH_IS_BVME6000)
661		lp->scp.sysbus = 0x0000004c;
662#endif
663
664	lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp));
665	lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb));
666	lp->iscp.stat = ISCP_BUSY;
667	lp->cmd_backlog = 0;
668
669	lp->cmd_head = lp->scb.cmd = I596_NULL;
670
671#ifdef ENABLE_BVME6000_NET
672	if (MACH_IS_BVME6000) {
673		lp->scb.t_on  = 7 * 25;
674		lp->scb.t_off = 1 * 25;
675	}
676#endif
677
678	DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
679
680	CA(dev);
681
682	if (wait_istat(dev,lp,1000,"initialization timed out"))
683		goto failed;
684	DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name));
685
686	/* Ensure rx frame/buffer descriptors are tidy */
687	rebuild_rx_bufs(dev);
688	lp->scb.command = 0;
689
690#ifdef ENABLE_MVME16x_NET
691	if (MACH_IS_MVME16x) {
692		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
693
694		/* Enable ints, etc. now */
695		pcc2[0x2a] = 0x55;	/* Edge sensitive */
696		pcc2[0x2b] = 0x15;
697	}
698#endif
699#ifdef ENABLE_BVME6000_NET
700	if (MACH_IS_BVME6000) {
701		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
702
703		*ethirq = 3;
704	}
705#endif
706
707
708	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name));
709	memcpy(lp->cf_cmd.i596_config, init_setup, 14);
710	lp->cf_cmd.cmd.command = CmdConfigure;
711	i596_add_cmd(dev, &lp->cf_cmd.cmd);
712
713	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
714	memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, ETH_ALEN);
715	lp->sa_cmd.cmd.command = CmdSASetup;
716	i596_add_cmd(dev, &lp->sa_cmd.cmd);
717
718	DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
719	lp->tdr_cmd.cmd.command = CmdTDR;
720	i596_add_cmd(dev, &lp->tdr_cmd.cmd);
721
722	spin_lock_irqsave (&lp->lock, flags);
723
724	if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) {
725		spin_unlock_irqrestore (&lp->lock, flags);
726		goto failed;
727	}
728	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
729	lp->scb.command = RX_START;
730	CA(dev);
731
732	spin_unlock_irqrestore (&lp->lock, flags);
733
734	if (wait_cmd(dev,lp,1000,"RX_START not processed"))
735		goto failed;
736	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name));
737	return 0;
738
739failed:
740	printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name);
741	MPU_PORT(dev, PORT_RESET, NULL);
742	return -1;
743}
744
745static inline int i596_rx(struct net_device *dev)
746{
747	struct i596_private *lp = dev->ml_priv;
748	struct i596_rfd *rfd;
749	struct i596_rbd *rbd;
750	int frames = 0;
751
752	DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n",
753			lp->rfd_head, lp->rbd_head));
754
755	rfd = lp->rfd_head;		/* Ref next frame to check */
756
757	while ((rfd->stat) & STAT_C) {	/* Loop while complete frames */
758		if (rfd->rbd == I596_NULL)
759			rbd = I596_NULL;
760		else if (rfd->rbd == lp->rbd_head->b_addr)
761			rbd = lp->rbd_head;
762		else {
763			printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name);
764			/* XXX Now what? */
765			rbd = I596_NULL;
766		}
767		DEB(DEB_RXFRAME, printk(KERN_DEBUG "  rfd %p, rfd.rbd %p, rfd.stat %04x\n",
768			rfd, rfd->rbd, rfd->stat));
769
770		if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) {
771			/* a good frame */
772			int pkt_len = rbd->count & 0x3fff;
773			struct sk_buff *skb = rbd->skb;
774			int rx_in_place = 0;
775
776			DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
777			frames++;
778
779			/* Check if the packet is long enough to just accept
780			 * without copying to a properly sized skbuff.
781			 */
782
783			if (pkt_len > rx_copybreak) {
784				struct sk_buff *newskb;
785
786				/* Get fresh skbuff to replace filled one. */
787				newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
788				if (newskb == NULL) {
789					skb = NULL;	/* drop pkt */
790					goto memory_squeeze;
791				}
792				/* Pass up the skb already on the Rx ring. */
793				skb_put(skb, pkt_len);
794				rx_in_place = 1;
795				rbd->skb = newskb;
796				rbd->v_data = newskb->data;
797				rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
798#ifdef __mc68000__
799				cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
800#endif
801			} else {
802				skb = netdev_alloc_skb(dev, pkt_len + 2);
803			}
804memory_squeeze:
805			if (skb == NULL) {
806				/* XXX tulip.c can defer packets here!! */
807				dev->stats.rx_dropped++;
808			} else {
809				if (!rx_in_place) {
810					/* 16 byte align the data fields */
811					skb_reserve(skb, 2);
812					memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
813				}
814				skb->protocol=eth_type_trans(skb,dev);
815				skb->len = pkt_len;
816#ifdef __mc68000__
817				cache_clear(virt_to_phys(rbd->skb->data),
818						pkt_len);
819#endif
820				netif_rx(skb);
821				dev->stats.rx_packets++;
822				dev->stats.rx_bytes+=pkt_len;
823			}
824		}
825		else {
826			DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n",
827					dev->name, rfd->stat));
828			dev->stats.rx_errors++;
829			if ((rfd->stat) & 0x0001)
830				dev->stats.collisions++;
831			if ((rfd->stat) & 0x0080)
832				dev->stats.rx_length_errors++;
833			if ((rfd->stat) & 0x0100)
834				dev->stats.rx_over_errors++;
835			if ((rfd->stat) & 0x0200)
836				dev->stats.rx_fifo_errors++;
837			if ((rfd->stat) & 0x0400)
838				dev->stats.rx_frame_errors++;
839			if ((rfd->stat) & 0x0800)
840				dev->stats.rx_crc_errors++;
841			if ((rfd->stat) & 0x1000)
842				dev->stats.rx_length_errors++;
843		}
844
845		/* Clear the buffer descriptor count and EOF + F flags */
846
847		if (rbd != I596_NULL && (rbd->count & 0x4000)) {
848			rbd->count = 0;
849			lp->rbd_head = rbd->v_next;
850		}
851
852		/* Tidy the frame descriptor, marking it as end of list */
853
854		rfd->rbd = I596_NULL;
855		rfd->stat = 0;
856		rfd->cmd = CMD_EOL|CMD_FLEX;
857		rfd->count = 0;
858
859		/* Remove end-of-list from old end descriptor */
860
861		rfd->v_prev->cmd = CMD_FLEX;
862
863		/* Update record of next frame descriptor to process */
864
865		lp->scb.rfd = rfd->b_next;
866		lp->rfd_head = rfd->v_next;
867		rfd = lp->rfd_head;
868	}
869
870	DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames));
871
872	return 0;
873}
874
875
876static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
877{
878	struct i596_cmd *ptr;
879
880	while (lp->cmd_head != I596_NULL) {
881		ptr = lp->cmd_head;
882		lp->cmd_head = ptr->v_next;
883		lp->cmd_backlog--;
884
885		switch ((ptr->command) & 0x7) {
886		case CmdTx:
887			{
888				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
889				struct sk_buff *skb = tx_cmd->skb;
890
891				dev_kfree_skb(skb);
892
893				dev->stats.tx_errors++;
894				dev->stats.tx_aborted_errors++;
895
896				ptr->v_next = ptr->b_next = I596_NULL;
897				tx_cmd->cmd.command = 0;  /* Mark as free */
898				break;
899			}
900		default:
901			ptr->v_next = ptr->b_next = I596_NULL;
902		}
903	}
904
905	wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out");
906	lp->scb.cmd = I596_NULL;
907}
908
909static void i596_reset(struct net_device *dev, struct i596_private *lp,
910			int ioaddr)
911{
912	unsigned long flags;
913
914	DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n"));
915
916	spin_lock_irqsave (&lp->lock, flags);
917
918	wait_cmd(dev,lp,100,"i596_reset timed out");
919
920	netif_stop_queue(dev);
921
922	lp->scb.command = CUC_ABORT | RX_ABORT;
923	CA(dev);
924
925	/* wait for shutdown */
926	wait_cmd(dev,lp,1000,"i596_reset 2 timed out");
927	spin_unlock_irqrestore (&lp->lock, flags);
928
929	i596_cleanup_cmd(dev,lp);
930	i596_rx(dev);
931
932	netif_start_queue(dev);
933	init_i596_mem(dev);
934}
935
936static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
937{
938	struct i596_private *lp = dev->ml_priv;
939	int ioaddr = dev->base_addr;
940	unsigned long flags;
941
942	DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n"));
943
944	cmd->status = 0;
945	cmd->command |= (CMD_EOL | CMD_INTR);
946	cmd->v_next = cmd->b_next = I596_NULL;
947
948	spin_lock_irqsave (&lp->lock, flags);
949
950	if (lp->cmd_head != I596_NULL) {
951		lp->cmd_tail->v_next = cmd;
952		lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status));
953	} else {
954		lp->cmd_head = cmd;
955		wait_cmd(dev,lp,100,"i596_add_cmd timed out");
956		lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status));
957		lp->scb.command = CUC_START;
958		CA(dev);
959	}
960	lp->cmd_tail = cmd;
961	lp->cmd_backlog++;
962
963	spin_unlock_irqrestore (&lp->lock, flags);
964
965	if (lp->cmd_backlog > max_cmd_backlog) {
966		unsigned long tickssofar = jiffies - lp->last_cmd;
967
968		if (tickssofar < ticks_limit)
969			return;
970
971		printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name);
972
973		i596_reset(dev, lp, ioaddr);
974	}
975}
976
977static int i596_open(struct net_device *dev)
978{
979	int res = 0;
980
981	DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
982
983	if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
984		printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
985		return -EAGAIN;
986	}
987#ifdef ENABLE_MVME16x_NET
988	if (MACH_IS_MVME16x) {
989		if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) {
990			res = -EAGAIN;
991			goto err_irq_dev;
992		}
993	}
994#endif
995	res = init_rx_bufs(dev);
996	if (res)
997		goto err_irq_56;
998
999	netif_start_queue(dev);
1000
1001	if (init_i596_mem(dev)) {
1002		res = -EAGAIN;
1003		goto err_queue;
1004	}
1005
1006	return 0;
1007
1008err_queue:
1009	netif_stop_queue(dev);
1010	remove_rx_bufs(dev);
1011err_irq_56:
1012#ifdef ENABLE_MVME16x_NET
1013	free_irq(0x56, dev);
1014err_irq_dev:
1015#endif
1016	free_irq(dev->irq, dev);
1017
1018	return res;
1019}
1020
1021static void i596_tx_timeout (struct net_device *dev)
1022{
1023	struct i596_private *lp = dev->ml_priv;
1024	int ioaddr = dev->base_addr;
1025
1026	/* Transmitter timeout, serious problems. */
1027	DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n",
1028			dev->name));
1029
1030	dev->stats.tx_errors++;
1031
1032	/* Try to restart the adaptor */
1033	if (lp->last_restart == dev->stats.tx_packets) {
1034		DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n"));
1035		/* Shutdown and restart */
1036		i596_reset (dev, lp, ioaddr);
1037	} else {
1038		/* Issue a channel attention signal */
1039		DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n"));
1040		lp->scb.command = CUC_START | RX_START;
1041		CA (dev);
1042		lp->last_restart = dev->stats.tx_packets;
1043	}
1044
1045	dev->trans_start = jiffies; /* prevent tx timeout */
1046	netif_wake_queue (dev);
1047}
1048
1049static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1050{
1051	struct i596_private *lp = dev->ml_priv;
1052	struct tx_cmd *tx_cmd;
1053	struct i596_tbd *tbd;
1054	short length = skb->len;
1055
1056	DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
1057				dev->name, skb->len, skb->data));
1058
1059	if (skb->len < ETH_ZLEN) {
1060		if (skb_padto(skb, ETH_ZLEN))
1061			return NETDEV_TX_OK;
1062		length = ETH_ZLEN;
1063	}
1064	netif_stop_queue(dev);
1065
1066	tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1067	tbd = lp->tbds + lp->next_tx_cmd;
1068
1069	if (tx_cmd->cmd.command) {
1070		printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n",
1071				dev->name);
1072		dev->stats.tx_dropped++;
1073
1074		dev_kfree_skb(skb);
1075	} else {
1076		if (++lp->next_tx_cmd == TX_RING_SIZE)
1077			lp->next_tx_cmd = 0;
1078		tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd));
1079		tbd->next = I596_NULL;
1080
1081		tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1082		tx_cmd->skb = skb;
1083
1084		tx_cmd->pad = 0;
1085		tx_cmd->size = 0;
1086		tbd->pad = 0;
1087		tbd->size = EOF | length;
1088
1089		tbd->data = WSWAPchar(virt_to_bus(skb->data));
1090
1091#ifdef __mc68000__
1092		cache_push(virt_to_phys(skb->data), length);
1093#endif
1094		DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1095		i596_add_cmd(dev, &tx_cmd->cmd);
1096
1097		dev->stats.tx_packets++;
1098		dev->stats.tx_bytes += length;
1099	}
1100
1101	netif_start_queue(dev);
1102
1103	return NETDEV_TX_OK;
1104}
1105
1106static void print_eth(unsigned char *add, char *str)
1107{
1108	printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n",
1109	       add, add + 6, add, add[12], add[13], str);
1110}
1111
1112static int io = 0x300;
1113static int irq = 10;
1114
1115static const struct net_device_ops i596_netdev_ops = {
1116	.ndo_open 		= i596_open,
1117	.ndo_stop		= i596_close,
1118	.ndo_start_xmit		= i596_start_xmit,
1119	.ndo_set_rx_mode	= set_multicast_list,
1120	.ndo_tx_timeout		= i596_tx_timeout,
1121	.ndo_change_mtu		= eth_change_mtu,
1122	.ndo_set_mac_address 	= eth_mac_addr,
1123	.ndo_validate_addr	= eth_validate_addr,
1124};
1125
1126struct net_device * __init i82596_probe(int unit)
1127{
1128	struct net_device *dev;
1129	int i;
1130	struct i596_private *lp;
1131	char eth_addr[8];
1132	static int probed;
1133	int err;
1134
1135	if (probed)
1136		return ERR_PTR(-ENODEV);
1137	probed++;
1138
1139	dev = alloc_etherdev(0);
1140	if (!dev)
1141		return ERR_PTR(-ENOMEM);
1142
1143	if (unit >= 0) {
1144		sprintf(dev->name, "eth%d", unit);
1145		netdev_boot_setup_check(dev);
1146	} else {
1147		dev->base_addr = io;
1148		dev->irq = irq;
1149	}
1150
1151#ifdef ENABLE_MVME16x_NET
1152	if (MACH_IS_MVME16x) {
1153		if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) {
1154			printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n");
1155			err = -ENODEV;
1156			goto out;
1157		}
1158		memcpy(eth_addr, (void *) 0xfffc1f2c, ETH_ALEN);	/* YUCK! Get addr from NOVRAM */
1159		dev->base_addr = MVME_I596_BASE;
1160		dev->irq = (unsigned) MVME16x_IRQ_I596;
1161		goto found;
1162	}
1163#endif
1164#ifdef ENABLE_BVME6000_NET
1165	if (MACH_IS_BVME6000) {
1166		volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE;
1167		unsigned char msr = rtc[3];
1168		int i;
1169
1170		rtc[3] |= 0x80;
1171		for (i = 0; i < 6; i++)
1172			eth_addr[i] = rtc[i * 4 + 7];	/* Stored in RTC RAM at offset 1 */
1173		rtc[3] = msr;
1174		dev->base_addr = BVME_I596_BASE;
1175		dev->irq = (unsigned) BVME_IRQ_I596;
1176		goto found;
1177	}
1178#endif
1179	err = -ENODEV;
1180	goto out;
1181
1182found:
1183	dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0);
1184	if (!dev->mem_start) {
1185		err = -ENOMEM;
1186		goto out1;
1187	}
1188
1189	DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1190
1191	for (i = 0; i < 6; i++)
1192		DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i]));
1193
1194	DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq));
1195
1196	DEB(DEB_PROBE,printk(KERN_INFO "%s", version));
1197
1198	/* The 82596-specific entries in the device structure. */
1199	dev->netdev_ops = &i596_netdev_ops;
1200	dev->watchdog_timeo = TX_TIMEOUT;
1201
1202	dev->ml_priv = (void *)(dev->mem_start);
1203
1204	lp = dev->ml_priv;
1205	DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), "
1206			"lp->scb at 0x%08lx\n",
1207			dev->name, (unsigned long)lp,
1208			sizeof(struct i596_private), (unsigned long)&lp->scb));
1209	memset((void *) lp, 0, sizeof(struct i596_private));
1210
1211#ifdef __mc68000__
1212	cache_push(virt_to_phys((void *)(dev->mem_start)), 4096);
1213	cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096);
1214	kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER);
1215#endif
1216	lp->scb.command = 0;
1217	lp->scb.cmd = I596_NULL;
1218	lp->scb.rfd = I596_NULL;
1219	spin_lock_init(&lp->lock);
1220
1221	err = register_netdev(dev);
1222	if (err)
1223		goto out2;
1224	return dev;
1225out2:
1226#ifdef __mc68000__
1227	/* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1228	 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1229	 */
1230	kernel_set_cachemode((void *)(dev->mem_start), 4096,
1231			IOMAP_FULL_CACHING);
1232#endif
1233	free_page ((u32)(dev->mem_start));
1234out1:
1235out:
1236	free_netdev(dev);
1237	return ERR_PTR(err);
1238}
1239
1240static irqreturn_t i596_interrupt(int irq, void *dev_id)
1241{
1242	struct net_device *dev = dev_id;
1243	struct i596_private *lp;
1244	short ioaddr;
1245	unsigned short status, ack_cmd = 0;
1246	int handled = 0;
1247
1248#ifdef ENABLE_BVME6000_NET
1249	if (MACH_IS_BVME6000) {
1250		if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) {
1251			i596_error(irq, dev_id);
1252			return IRQ_HANDLED;
1253		}
1254	}
1255#endif
1256	if (dev == NULL) {
1257		printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq);
1258		return IRQ_NONE;
1259	}
1260
1261	ioaddr = dev->base_addr;
1262	lp = dev->ml_priv;
1263
1264	spin_lock (&lp->lock);
1265
1266	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1267	status = lp->scb.status;
1268
1269	DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1270			dev->name, irq, status));
1271
1272	ack_cmd = status & 0xf000;
1273
1274	if ((status & 0x8000) || (status & 0x2000)) {
1275		struct i596_cmd *ptr;
1276
1277		handled = 1;
1278		if ((status & 0x8000))
1279			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name));
1280		if ((status & 0x2000))
1281			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1282
1283		while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) {
1284			ptr = lp->cmd_head;
1285
1286			DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n",
1287				       lp->cmd_head->status, lp->cmd_head->command));
1288			lp->cmd_head = ptr->v_next;
1289			lp->cmd_backlog--;
1290
1291			switch ((ptr->command) & 0x7) {
1292			case CmdTx:
1293			    {
1294				struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1295				struct sk_buff *skb = tx_cmd->skb;
1296
1297				if ((ptr->status) & STAT_OK) {
1298					DEB(DEB_TXADDR,print_eth(skb->data, "tx-done"));
1299				} else {
1300					dev->stats.tx_errors++;
1301					if ((ptr->status) & 0x0020)
1302						dev->stats.collisions++;
1303					if (!((ptr->status) & 0x0040))
1304						dev->stats.tx_heartbeat_errors++;
1305					if ((ptr->status) & 0x0400)
1306						dev->stats.tx_carrier_errors++;
1307					if ((ptr->status) & 0x0800)
1308						dev->stats.collisions++;
1309					if ((ptr->status) & 0x1000)
1310						dev->stats.tx_aborted_errors++;
1311				}
1312
1313				dev_kfree_skb_irq(skb);
1314
1315				tx_cmd->cmd.command = 0; /* Mark free */
1316				break;
1317			    }
1318			case CmdTDR:
1319			    {
1320				unsigned short status = ((struct tdr_cmd *)ptr)->status;
1321
1322				if (status & 0x8000) {
1323					DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name));
1324				} else {
1325					if (status & 0x4000)
1326						printk(KERN_ERR "%s: Transceiver problem.\n", dev->name);
1327					if (status & 0x2000)
1328						printk(KERN_ERR "%s: Termination problem.\n", dev->name);
1329					if (status & 0x1000)
1330						printk(KERN_ERR "%s: Short circuit.\n", dev->name);
1331
1332					DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff));
1333				}
1334				break;
1335			    }
1336			case CmdConfigure:
1337			case CmdMulticastList:
1338				/* Zap command so set_multicast_list() knows it is free */
1339				ptr->command = 0;
1340				break;
1341			}
1342			ptr->v_next = ptr->b_next = I596_NULL;
1343			lp->last_cmd = jiffies;
1344		}
1345
1346		ptr = lp->cmd_head;
1347		while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) {
1348			ptr->command &= 0x1fff;
1349			ptr = ptr->v_next;
1350		}
1351
1352		if ((lp->cmd_head != I596_NULL))
1353			ack_cmd |= CUC_START;
1354		lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status));
1355	}
1356	if ((status & 0x1000) || (status & 0x4000)) {
1357		if ((status & 0x4000))
1358			DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name));
1359		i596_rx(dev);
1360		/* Only RX_START if stopped - RGH 07-07-96 */
1361		if (status & 0x1000) {
1362			if (netif_running(dev)) {
1363				DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1364				ack_cmd |= RX_START;
1365				dev->stats.rx_errors++;
1366				dev->stats.rx_fifo_errors++;
1367				rebuild_rx_bufs(dev);
1368			}
1369		}
1370	}
1371	wait_cmd(dev,lp,100,"i596 interrupt, timeout");
1372	lp->scb.command = ack_cmd;
1373
1374#ifdef ENABLE_MVME16x_NET
1375	if (MACH_IS_MVME16x) {
1376		/* Ack the interrupt */
1377
1378		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1379
1380		pcc2[0x2a] |= 0x08;
1381	}
1382#endif
1383#ifdef ENABLE_BVME6000_NET
1384	if (MACH_IS_BVME6000) {
1385		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1386
1387		*ethirq = 1;
1388		*ethirq = 3;
1389	}
1390#endif
1391	CA(dev);
1392
1393	DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1394
1395	spin_unlock (&lp->lock);
1396	return IRQ_RETVAL(handled);
1397}
1398
1399static int i596_close(struct net_device *dev)
1400{
1401	struct i596_private *lp = dev->ml_priv;
1402	unsigned long flags;
1403
1404	netif_stop_queue(dev);
1405
1406	DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
1407		       dev->name, lp->scb.status));
1408
1409	spin_lock_irqsave(&lp->lock, flags);
1410
1411	wait_cmd(dev,lp,100,"close1 timed out");
1412	lp->scb.command = CUC_ABORT | RX_ABORT;
1413	CA(dev);
1414
1415	wait_cmd(dev,lp,100,"close2 timed out");
1416
1417	spin_unlock_irqrestore(&lp->lock, flags);
1418	DEB(DEB_STRUCT,i596_display_data(dev));
1419	i596_cleanup_cmd(dev,lp);
1420
1421#ifdef ENABLE_MVME16x_NET
1422	if (MACH_IS_MVME16x) {
1423		volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
1424
1425		/* Disable all ints */
1426		pcc2[0x28] = 1;
1427		pcc2[0x2a] = 0x40;
1428		pcc2[0x2b] = 0x40;	/* Set snooping bits now! */
1429	}
1430#endif
1431#ifdef ENABLE_BVME6000_NET
1432	if (MACH_IS_BVME6000) {
1433		volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG;
1434
1435		*ethirq = 1;
1436	}
1437#endif
1438
1439#ifdef ENABLE_MVME16x_NET
1440	free_irq(0x56, dev);
1441#endif
1442	free_irq(dev->irq, dev);
1443	remove_rx_bufs(dev);
1444
1445	return 0;
1446}
1447
1448/*
1449 *    Set or clear the multicast filter for this adaptor.
1450 */
1451
1452static void set_multicast_list(struct net_device *dev)
1453{
1454	struct i596_private *lp = dev->ml_priv;
1455	int config = 0, cnt;
1456
1457	DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1458		dev->name, netdev_mc_count(dev),
1459		dev->flags & IFF_PROMISC  ? "ON" : "OFF",
1460		dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1461
1462	if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out"))
1463		return;
1464
1465	if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1466		lp->cf_cmd.i596_config[8] |= 0x01;
1467		config = 1;
1468	}
1469	if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1470		lp->cf_cmd.i596_config[8] &= ~0x01;
1471		config = 1;
1472	}
1473	if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1474		lp->cf_cmd.i596_config[11] &= ~0x20;
1475		config = 1;
1476	}
1477	if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1478		lp->cf_cmd.i596_config[11] |= 0x20;
1479		config = 1;
1480	}
1481	if (config) {
1482		lp->cf_cmd.cmd.command = CmdConfigure;
1483		i596_add_cmd(dev, &lp->cf_cmd.cmd);
1484	}
1485
1486	cnt = netdev_mc_count(dev);
1487	if (cnt > MAX_MC_CNT)
1488	{
1489		cnt = MAX_MC_CNT;
1490		printk(KERN_ERR "%s: Only %d multicast addresses supported",
1491			dev->name, cnt);
1492	}
1493
1494	if (!netdev_mc_empty(dev)) {
1495		struct netdev_hw_addr *ha;
1496		unsigned char *cp;
1497		struct mc_cmd *cmd;
1498
1499		if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out"))
1500			return;
1501		cmd = &lp->mc_cmd;
1502		cmd->cmd.command = CmdMulticastList;
1503		cmd->mc_cnt = cnt * ETH_ALEN;
1504		cp = cmd->mc_addrs;
1505		netdev_for_each_mc_addr(ha, dev) {
1506			if (!cnt--)
1507				break;
1508			memcpy(cp, ha->addr, ETH_ALEN);
1509			if (i596_debug > 1)
1510				DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1511						dev->name, cp));
1512			cp += ETH_ALEN;
1513		}
1514		i596_add_cmd(dev, &cmd->cmd);
1515	}
1516}
1517
1518#ifdef MODULE
1519static struct net_device *dev_82596;
1520
1521static int debug = -1;
1522module_param(debug, int, 0);
1523MODULE_PARM_DESC(debug, "i82596 debug mask");
1524
1525int __init init_module(void)
1526{
1527	if (debug >= 0)
1528		i596_debug = debug;
1529	dev_82596 = i82596_probe(-1);
1530	return PTR_ERR_OR_ZERO(dev_82596);
1531}
1532
1533void __exit cleanup_module(void)
1534{
1535	unregister_netdev(dev_82596);
1536#ifdef __mc68000__
1537	/* XXX This assumes default cache mode to be IOMAP_FULL_CACHING,
1538	 * XXX which may be invalid (CONFIG_060_WRITETHROUGH)
1539	 */
1540
1541	kernel_set_cachemode((void *)(dev_82596->mem_start), 4096,
1542			IOMAP_FULL_CACHING);
1543#endif
1544	free_page ((u32)(dev_82596->mem_start));
1545	free_netdev(dev_82596);
1546}
1547
1548#endif				/* MODULE */
1549