1 /*	tulip_core.c: A DEC 21x4x-family ethernet driver for Linux.
2 
3 	Copyright 2000,2001  The Linux Kernel Team
4 	Written/copyright 1994-2001 by Donald Becker.
5 
6 	This software may be used and distributed according to the terms
7 	of the GNU General Public License, incorporated herein by reference.
8 
9 	Please submit bugs to http://bugzilla.kernel.org/ .
10 */
11 
12 #define pr_fmt(fmt) "tulip: " fmt
13 
14 #define DRV_NAME	"tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION    "1.1.15-NAPI" /* Keep at least for test */
17 #else
18 #define DRV_VERSION	"1.1.15"
19 #endif
20 #define DRV_RELDATE	"Feb 27, 2007"
21 
22 
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <asm/uaccess.h>
35 
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39 
40 static char version[] =
41 	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42 
43 /* A few user-configurable values. */
44 
45 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
46 static unsigned int max_interrupt_work = 25;
47 
48 #define MAX_UNITS 8
49 /* Used to pass the full-duplex flag, etc. */
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
53 
54 /*  The possible media types that can be set in options[] are: */
55 const char * const medianame[32] = {
56 	"10baseT", "10base2", "AUI", "100baseTx",
57 	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 	"","","","", "","","","",  "","","","Transceiver reset",
62 };
63 
64 /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 	defined(CONFIG_SPARC) || defined(__ia64__) || \
67 	defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72 
73 /*
74   Set the bus performance register.
75 	Typical: Set 16 longword cache alignment, no burst limit.
76 	Cache alignment bits 15:14	     Burst length 13:8
77 		0000	No alignment  0x00000000 unlimited		0800 8 longwords
78 		4000	8  longwords		0100 1 longword		1000 16 longwords
79 		8000	16 longwords		0200 2 longwords	2000 32 longwords
80 		C000	32  longwords		0400 4 longwords
81 	Warning: many older 486 systems are broken and require setting 0x00A04800
82 	   8 longword cache alignment, 8 longword burst.
83 	ToDo: Non-Intel setting could be better.
84 */
85 
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91 /* The UltraSparc PCI controllers will disconnect at every 64-byte
92  * crossing anyways so it makes no sense to tell Tulip to burst
93  * any more than that.
94  */
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 #warning Processor architecture undefined!
102 static int csr0 = 0x00A00000 | 0x4800;
103 #endif
104 
105 /* Operational parameters that usually are not changed. */
106 /* Time in jiffies before concluding the transmitter is hung. */
107 #define TX_TIMEOUT  (4*HZ)
108 
109 
110 MODULE_AUTHOR("The Linux Kernel Team");
111 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
114 module_param(tulip_debug, int, 0);
115 module_param(max_interrupt_work, int, 0);
116 module_param(rx_copybreak, int, 0);
117 module_param(csr0, int, 0);
118 module_param_array(options, int, NULL, 0);
119 module_param_array(full_duplex, int, NULL, 0);
120 
121 #ifdef TULIP_DEBUG
122 int tulip_debug = TULIP_DEBUG;
123 #else
124 int tulip_debug = 1;
125 #endif
126 
tulip_timer(unsigned long data)127 static void tulip_timer(unsigned long data)
128 {
129 	struct net_device *dev = (struct net_device *)data;
130 	struct tulip_private *tp = netdev_priv(dev);
131 
132 	if (netif_running(dev))
133 		schedule_work(&tp->media_work);
134 }
135 
136 /*
137  * This table use during operation for capabilities and media timer.
138  *
139  * It is indexed via the values in 'enum chips'
140  */
141 
142 struct tulip_chip_table tulip_tbl[] = {
143   { }, /* placeholder for array, slot unused currently */
144   { }, /* placeholder for array, slot unused currently */
145 
146   /* DC21140 */
147   { "Digital DS21140 Tulip", 128, 0x0001ebef,
148 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
149 	tulip_media_task },
150 
151   /* DC21142, DC21143 */
152   { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
153 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
154 	| HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
155 
156   /* LC82C168 */
157   { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
158 	HAS_MII | HAS_PNICNWAY, pnic_timer, },
159 
160   /* MX98713 */
161   { "Macronix 98713 PMAC", 128, 0x0001ebef,
162 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
163 
164   /* MX98715 */
165   { "Macronix 98715 PMAC", 256, 0x0001ebef,
166 	HAS_MEDIA_TABLE, mxic_timer, },
167 
168   /* MX98725 */
169   { "Macronix 98725 PMAC", 256, 0x0001ebef,
170 	HAS_MEDIA_TABLE, mxic_timer, },
171 
172   /* AX88140 */
173   { "ASIX AX88140", 128, 0x0001fbff,
174 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
175 	| IS_ASIX, tulip_timer, tulip_media_task },
176 
177   /* PNIC2 */
178   { "Lite-On PNIC-II", 256, 0x0801fbff,
179 	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
180 
181   /* COMET */
182   { "ADMtek Comet", 256, 0x0001abef,
183 	HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
184 
185   /* COMPEX9881 */
186   { "Compex 9881 PMAC", 128, 0x0001ebef,
187 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
188 
189   /* I21145 */
190   { "Intel DS21145 Tulip", 128, 0x0801fbff,
191 	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
192 	| HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
193 
194   /* DM910X */
195 #ifdef CONFIG_TULIP_DM910X
196   { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
197 	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
198 	tulip_timer, tulip_media_task },
199 #else
200   { NULL },
201 #endif
202 
203   /* RS7112 */
204   { "Conexant LANfinity", 256, 0x0001ebef,
205 	HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
206 
207 };
208 
209 
210 static const struct pci_device_id tulip_pci_tbl[] = {
211 	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
212 	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
213 	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
214 	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
215 	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
216 /*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
217 	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
218 	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
219 	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 	{ 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
229 	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
230 	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
231 #ifdef CONFIG_TULIP_DM910X
232 	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
234 #endif
235 	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
236 	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
237 	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 	{ 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 	{ 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 	{ 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
241 	{ 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
242 	{ 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
247 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
248 	{ 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Microsoft MN-120 */
249 	{ 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
250 	{ } /* terminate list */
251 };
252 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
253 
254 
255 /* A full-duplex map for media types. */
256 const char tulip_media_cap[32] =
257 {0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
258 
259 static void tulip_tx_timeout(struct net_device *dev);
260 static void tulip_init_ring(struct net_device *dev);
261 static void tulip_free_ring(struct net_device *dev);
262 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
263 					  struct net_device *dev);
264 static int tulip_open(struct net_device *dev);
265 static int tulip_close(struct net_device *dev);
266 static void tulip_up(struct net_device *dev);
267 static void tulip_down(struct net_device *dev);
268 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
269 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
270 static void set_rx_mode(struct net_device *dev);
271 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
272 #ifdef CONFIG_NET_POLL_CONTROLLER
273 static void poll_tulip(struct net_device *dev);
274 #endif
275 
tulip_set_power_state(struct tulip_private * tp,int sleep,int snooze)276 static void tulip_set_power_state (struct tulip_private *tp,
277 				   int sleep, int snooze)
278 {
279 	if (tp->flags & HAS_ACPI) {
280 		u32 tmp, newtmp;
281 		pci_read_config_dword (tp->pdev, CFDD, &tmp);
282 		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
283 		if (sleep)
284 			newtmp |= CFDD_Sleep;
285 		else if (snooze)
286 			newtmp |= CFDD_Snooze;
287 		if (tmp != newtmp)
288 			pci_write_config_dword (tp->pdev, CFDD, newtmp);
289 	}
290 
291 }
292 
293 
tulip_up(struct net_device * dev)294 static void tulip_up(struct net_device *dev)
295 {
296 	struct tulip_private *tp = netdev_priv(dev);
297 	void __iomem *ioaddr = tp->base_addr;
298 	int next_tick = 3*HZ;
299 	u32 reg;
300 	int i;
301 
302 #ifdef CONFIG_TULIP_NAPI
303 	napi_enable(&tp->napi);
304 #endif
305 
306 	/* Wake the chip from sleep/snooze mode. */
307 	tulip_set_power_state (tp, 0, 0);
308 
309 	/* Disable all WOL events */
310 	pci_enable_wake(tp->pdev, PCI_D3hot, 0);
311 	pci_enable_wake(tp->pdev, PCI_D3cold, 0);
312 	tulip_set_wolopts(tp->pdev, 0);
313 
314 	/* On some chip revs we must set the MII/SYM port before the reset!? */
315 	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
316 		iowrite32(0x00040000, ioaddr + CSR6);
317 
318 	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
319 	iowrite32(0x00000001, ioaddr + CSR0);
320 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
321 	udelay(100);
322 
323 	/* Deassert reset.
324 	   Wait the specified 50 PCI cycles after a reset by initializing
325 	   Tx and Rx queues and the address filter list. */
326 	iowrite32(tp->csr0, ioaddr + CSR0);
327 	pci_read_config_dword(tp->pdev, PCI_COMMAND, &reg);  /* flush write */
328 	udelay(100);
329 
330 	if (tulip_debug > 1)
331 		netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332 
333 	iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 	iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
335 	tp->cur_rx = tp->cur_tx = 0;
336 	tp->dirty_rx = tp->dirty_tx = 0;
337 
338 	if (tp->flags & MC_HASH_ONLY) {
339 		u32 addr_low = get_unaligned_le32(dev->dev_addr);
340 		u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
341 		if (tp->chip_id == AX88140) {
342 			iowrite32(0, ioaddr + CSR13);
343 			iowrite32(addr_low,  ioaddr + CSR14);
344 			iowrite32(1, ioaddr + CSR13);
345 			iowrite32(addr_high, ioaddr + CSR14);
346 		} else if (tp->flags & COMET_MAC_ADDR) {
347 			iowrite32(addr_low,  ioaddr + 0xA4);
348 			iowrite32(addr_high, ioaddr + 0xA8);
349 			iowrite32(0, ioaddr + CSR27);
350 			iowrite32(0, ioaddr + CSR28);
351 		}
352 	} else {
353 		/* This is set_rx_mode(), but without starting the transmitter. */
354 		u16 *eaddrs = (u16 *)dev->dev_addr;
355 		u16 *setup_frm = &tp->setup_frame[15*6];
356 		dma_addr_t mapping;
357 
358 		/* 21140 bug: you must add the broadcast address. */
359 		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
360 		/* Fill the final entry of the table with our physical address. */
361 		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
362 		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
363 		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
364 
365 		mapping = pci_map_single(tp->pdev, tp->setup_frame,
366 					 sizeof(tp->setup_frame),
367 					 PCI_DMA_TODEVICE);
368 		tp->tx_buffers[tp->cur_tx].skb = NULL;
369 		tp->tx_buffers[tp->cur_tx].mapping = mapping;
370 
371 		/* Put the setup frame on the Tx list. */
372 		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
373 		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
374 		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
375 
376 		tp->cur_tx++;
377 	}
378 
379 	tp->saved_if_port = dev->if_port;
380 	if (dev->if_port == 0)
381 		dev->if_port = tp->default_port;
382 
383 	/* Allow selecting a default media. */
384 	i = 0;
385 	if (tp->mtable == NULL)
386 		goto media_picked;
387 	if (dev->if_port) {
388 		int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
389 			(dev->if_port == 12 ? 0 : dev->if_port);
390 		for (i = 0; i < tp->mtable->leafcount; i++)
391 			if (tp->mtable->mleaf[i].media == looking_for) {
392 				dev_info(&dev->dev,
393 					 "Using user-specified media %s\n",
394 					 medianame[dev->if_port]);
395 				goto media_picked;
396 			}
397 	}
398 	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
399 		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
400 		for (i = 0; i < tp->mtable->leafcount; i++)
401 			if (tp->mtable->mleaf[i].media == looking_for) {
402 				dev_info(&dev->dev,
403 					 "Using EEPROM-set media %s\n",
404 					 medianame[looking_for]);
405 				goto media_picked;
406 			}
407 	}
408 	/* Start sensing first non-full-duplex media. */
409 	for (i = tp->mtable->leafcount - 1;
410 		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
411 		;
412 media_picked:
413 
414 	tp->csr6 = 0;
415 	tp->cur_index = i;
416 	tp->nwayset = 0;
417 
418 	if (dev->if_port) {
419 		if (tp->chip_id == DC21143  &&
420 		    (tulip_media_cap[dev->if_port] & MediaIsMII)) {
421 			/* We must reset the media CSRs when we force-select MII mode. */
422 			iowrite32(0x0000, ioaddr + CSR13);
423 			iowrite32(0x0000, ioaddr + CSR14);
424 			iowrite32(0x0008, ioaddr + CSR15);
425 		}
426 		tulip_select_media(dev, 1);
427 	} else if (tp->chip_id == DC21142) {
428 		if (tp->mii_cnt) {
429 			tulip_select_media(dev, 1);
430 			if (tulip_debug > 1)
431 				dev_info(&dev->dev,
432 					 "Using MII transceiver %d, status %04x\n",
433 					 tp->phys[0],
434 					 tulip_mdio_read(dev, tp->phys[0], 1));
435 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
436 			tp->csr6 = csr6_mask_hdcap;
437 			dev->if_port = 11;
438 			iowrite32(0x0000, ioaddr + CSR13);
439 			iowrite32(0x0000, ioaddr + CSR14);
440 		} else
441 			t21142_start_nway(dev);
442 	} else if (tp->chip_id == PNIC2) {
443 	        /* for initial startup advertise 10/100 Full and Half */
444 	        tp->sym_advertise = 0x01E0;
445                 /* enable autonegotiate end interrupt */
446 	        iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
447 	        iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
448 		pnic2_start_nway(dev);
449 	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
450 		if (tp->mii_cnt) {
451 			dev->if_port = 11;
452 			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
453 			iowrite32(0x0001, ioaddr + CSR15);
454 		} else if (ioread32(ioaddr + CSR5) & TPLnkPass)
455 			pnic_do_nway(dev);
456 		else {
457 			/* Start with 10mbps to do autonegotiation. */
458 			iowrite32(0x32, ioaddr + CSR12);
459 			tp->csr6 = 0x00420000;
460 			iowrite32(0x0001B078, ioaddr + 0xB8);
461 			iowrite32(0x0201B078, ioaddr + 0xB8);
462 			next_tick = 1*HZ;
463 		}
464 	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
465 		   ! tp->medialock) {
466 		dev->if_port = 0;
467 		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
468 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
469 	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
470 		/* Provided by BOLO, Macronix - 12/10/1998. */
471 		dev->if_port = 0;
472 		tp->csr6 = 0x01a80200;
473 		iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
474 		iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
475 	} else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
476 		/* Enable automatic Tx underrun recovery. */
477 		iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
478 		dev->if_port = tp->mii_cnt ? 11 : 0;
479 		tp->csr6 = 0x00040000;
480 	} else if (tp->chip_id == AX88140) {
481 		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
482 	} else
483 		tulip_select_media(dev, 1);
484 
485 	/* Start the chip's Tx to process setup frame. */
486 	tulip_stop_rxtx(tp);
487 	barrier();
488 	udelay(5);
489 	iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
490 
491 	/* Enable interrupts by setting the interrupt mask. */
492 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
493 	iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
494 	tulip_start_rxtx(tp);
495 	iowrite32(0, ioaddr + CSR2);		/* Rx poll demand */
496 
497 	if (tulip_debug > 2) {
498 		netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
499 			   ioread32(ioaddr + CSR0),
500 			   ioread32(ioaddr + CSR5),
501 			   ioread32(ioaddr + CSR6));
502 	}
503 
504 	/* Set the timer to switch to check for link beat and perhaps switch
505 	   to an alternate media type. */
506 	tp->timer.expires = RUN_AT(next_tick);
507 	add_timer(&tp->timer);
508 #ifdef CONFIG_TULIP_NAPI
509 	init_timer(&tp->oom_timer);
510         tp->oom_timer.data = (unsigned long)dev;
511         tp->oom_timer.function = oom_timer;
512 #endif
513 }
514 
515 static int
tulip_open(struct net_device * dev)516 tulip_open(struct net_device *dev)
517 {
518 	struct tulip_private *tp = netdev_priv(dev);
519 	int retval;
520 
521 	tulip_init_ring (dev);
522 
523 	retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 			     dev->name, dev);
525 	if (retval)
526 		goto free_ring;
527 
528 	tulip_up (dev);
529 
530 	netif_start_queue (dev);
531 
532 	return 0;
533 
534 free_ring:
535 	tulip_free_ring (dev);
536 	return retval;
537 }
538 
539 
tulip_tx_timeout(struct net_device * dev)540 static void tulip_tx_timeout(struct net_device *dev)
541 {
542 	struct tulip_private *tp = netdev_priv(dev);
543 	void __iomem *ioaddr = tp->base_addr;
544 	unsigned long flags;
545 
546 	spin_lock_irqsave (&tp->lock, flags);
547 
548 	if (tulip_media_cap[dev->if_port] & MediaIsMII) {
549 		/* Do nothing -- the media monitor should handle this. */
550 		if (tulip_debug > 1)
551 			dev_warn(&dev->dev,
552 				 "Transmit timeout using MII device\n");
553 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
554 		   tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
555 		   tp->chip_id == DM910X) {
556 		dev_warn(&dev->dev,
557 			 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
558 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
559 			 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
560 			 ioread32(ioaddr + CSR15));
561 		tp->timeout_recovery = 1;
562 		schedule_work(&tp->media_work);
563 		goto out_unlock;
564 	} else if (tp->chip_id == PNIC2) {
565 		dev_warn(&dev->dev,
566 			 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
567 			 (int)ioread32(ioaddr + CSR5),
568 			 (int)ioread32(ioaddr + CSR6),
569 			 (int)ioread32(ioaddr + CSR7),
570 			 (int)ioread32(ioaddr + CSR12));
571 	} else {
572 		dev_warn(&dev->dev,
573 			 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
574 			 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
575 		dev->if_port = 0;
576 	}
577 
578 #if defined(way_too_many_messages)
579 	if (tulip_debug > 3) {
580 		int i;
581 		for (i = 0; i < RX_RING_SIZE; i++) {
582 			u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
583 			int j;
584 			printk(KERN_DEBUG
585 			       "%2d: %08x %08x %08x %08x  %02x %02x %02x\n",
586 			       i,
587 			       (unsigned int)tp->rx_ring[i].status,
588 			       (unsigned int)tp->rx_ring[i].length,
589 			       (unsigned int)tp->rx_ring[i].buffer1,
590 			       (unsigned int)tp->rx_ring[i].buffer2,
591 			       buf[0], buf[1], buf[2]);
592 			for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
593 				if (j < 100)
594 					pr_cont(" %02x", buf[j]);
595 			pr_cont(" j=%d\n", j);
596 		}
597 		printk(KERN_DEBUG "  Rx ring %p: ", tp->rx_ring);
598 		for (i = 0; i < RX_RING_SIZE; i++)
599 			pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
600 		printk(KERN_DEBUG "  Tx ring %p: ", tp->tx_ring);
601 		for (i = 0; i < TX_RING_SIZE; i++)
602 			pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
603 		pr_cont("\n");
604 	}
605 #endif
606 
607 	tulip_tx_timeout_complete(tp, ioaddr);
608 
609 out_unlock:
610 	spin_unlock_irqrestore (&tp->lock, flags);
611 	dev->trans_start = jiffies; /* prevent tx timeout */
612 	netif_wake_queue (dev);
613 }
614 
615 
616 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
tulip_init_ring(struct net_device * dev)617 static void tulip_init_ring(struct net_device *dev)
618 {
619 	struct tulip_private *tp = netdev_priv(dev);
620 	int i;
621 
622 	tp->susp_rx = 0;
623 	tp->ttimer = 0;
624 	tp->nir = 0;
625 
626 	for (i = 0; i < RX_RING_SIZE; i++) {
627 		tp->rx_ring[i].status = 0x00000000;
628 		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
629 		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
630 		tp->rx_buffers[i].skb = NULL;
631 		tp->rx_buffers[i].mapping = 0;
632 	}
633 	/* Mark the last entry as wrapping the ring. */
634 	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
635 	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
636 
637 	for (i = 0; i < RX_RING_SIZE; i++) {
638 		dma_addr_t mapping;
639 
640 		/* Note the receive buffer must be longword aligned.
641 		   netdev_alloc_skb() provides 16 byte alignment.  But do *not*
642 		   use skb_reserve() to align the IP header! */
643 		struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
644 		tp->rx_buffers[i].skb = skb;
645 		if (skb == NULL)
646 			break;
647 		mapping = pci_map_single(tp->pdev, skb->data,
648 					 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
649 		tp->rx_buffers[i].mapping = mapping;
650 		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
651 		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
652 	}
653 	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
654 
655 	/* The Tx buffer descriptor is filled in as needed, but we
656 	   do need to clear the ownership bit. */
657 	for (i = 0; i < TX_RING_SIZE; i++) {
658 		tp->tx_buffers[i].skb = NULL;
659 		tp->tx_buffers[i].mapping = 0;
660 		tp->tx_ring[i].status = 0x00000000;
661 		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
662 	}
663 	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
664 }
665 
666 static netdev_tx_t
tulip_start_xmit(struct sk_buff * skb,struct net_device * dev)667 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
668 {
669 	struct tulip_private *tp = netdev_priv(dev);
670 	int entry;
671 	u32 flag;
672 	dma_addr_t mapping;
673 	unsigned long flags;
674 
675 	spin_lock_irqsave(&tp->lock, flags);
676 
677 	/* Calculate the next Tx descriptor entry. */
678 	entry = tp->cur_tx % TX_RING_SIZE;
679 
680 	tp->tx_buffers[entry].skb = skb;
681 	mapping = pci_map_single(tp->pdev, skb->data,
682 				 skb->len, PCI_DMA_TODEVICE);
683 	tp->tx_buffers[entry].mapping = mapping;
684 	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
685 
686 	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
687 		flag = 0x60000000; /* No interrupt */
688 	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
689 		flag = 0xe0000000; /* Tx-done intr. */
690 	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
691 		flag = 0x60000000; /* No Tx-done intr. */
692 	} else {		/* Leave room for set_rx_mode() to fill entries. */
693 		flag = 0xe0000000; /* Tx-done intr. */
694 		netif_stop_queue(dev);
695 	}
696 	if (entry == TX_RING_SIZE-1)
697 		flag = 0xe0000000 | DESC_RING_WRAP;
698 
699 	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
700 	/* if we were using Transmit Automatic Polling, we would need a
701 	 * wmb() here. */
702 	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
703 	wmb();
704 
705 	tp->cur_tx++;
706 
707 	/* Trigger an immediate transmit demand. */
708 	iowrite32(0, tp->base_addr + CSR1);
709 
710 	spin_unlock_irqrestore(&tp->lock, flags);
711 
712 	return NETDEV_TX_OK;
713 }
714 
tulip_clean_tx_ring(struct tulip_private * tp)715 static void tulip_clean_tx_ring(struct tulip_private *tp)
716 {
717 	unsigned int dirty_tx;
718 
719 	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
720 		dirty_tx++) {
721 		int entry = dirty_tx % TX_RING_SIZE;
722 		int status = le32_to_cpu(tp->tx_ring[entry].status);
723 
724 		if (status < 0) {
725 			tp->dev->stats.tx_errors++;	/* It wasn't Txed */
726 			tp->tx_ring[entry].status = 0;
727 		}
728 
729 		/* Check for Tx filter setup frames. */
730 		if (tp->tx_buffers[entry].skb == NULL) {
731 			/* test because dummy frames not mapped */
732 			if (tp->tx_buffers[entry].mapping)
733 				pci_unmap_single(tp->pdev,
734 					tp->tx_buffers[entry].mapping,
735 					sizeof(tp->setup_frame),
736 					PCI_DMA_TODEVICE);
737 			continue;
738 		}
739 
740 		pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
741 				tp->tx_buffers[entry].skb->len,
742 				PCI_DMA_TODEVICE);
743 
744 		/* Free the original skb. */
745 		dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
746 		tp->tx_buffers[entry].skb = NULL;
747 		tp->tx_buffers[entry].mapping = 0;
748 	}
749 }
750 
tulip_down(struct net_device * dev)751 static void tulip_down (struct net_device *dev)
752 {
753 	struct tulip_private *tp = netdev_priv(dev);
754 	void __iomem *ioaddr = tp->base_addr;
755 	unsigned long flags;
756 
757 	cancel_work_sync(&tp->media_work);
758 
759 #ifdef CONFIG_TULIP_NAPI
760 	napi_disable(&tp->napi);
761 #endif
762 
763 	del_timer_sync (&tp->timer);
764 #ifdef CONFIG_TULIP_NAPI
765 	del_timer_sync (&tp->oom_timer);
766 #endif
767 	spin_lock_irqsave (&tp->lock, flags);
768 
769 	/* Disable interrupts by clearing the interrupt mask. */
770 	iowrite32 (0x00000000, ioaddr + CSR7);
771 
772 	/* Stop the Tx and Rx processes. */
773 	tulip_stop_rxtx(tp);
774 
775 	/* prepare receive buffers */
776 	tulip_refill_rx(dev);
777 
778 	/* release any unconsumed transmit buffers */
779 	tulip_clean_tx_ring(tp);
780 
781 	if (ioread32(ioaddr + CSR6) != 0xffffffff)
782 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
783 
784 	spin_unlock_irqrestore (&tp->lock, flags);
785 
786 	init_timer(&tp->timer);
787 	tp->timer.data = (unsigned long)dev;
788 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
789 
790 	dev->if_port = tp->saved_if_port;
791 
792 	/* Leave the driver in snooze, not sleep, mode. */
793 	tulip_set_power_state (tp, 0, 1);
794 }
795 
tulip_free_ring(struct net_device * dev)796 static void tulip_free_ring (struct net_device *dev)
797 {
798 	struct tulip_private *tp = netdev_priv(dev);
799 	int i;
800 
801 	/* Free all the skbuffs in the Rx queue. */
802 	for (i = 0; i < RX_RING_SIZE; i++) {
803 		struct sk_buff *skb = tp->rx_buffers[i].skb;
804 		dma_addr_t mapping = tp->rx_buffers[i].mapping;
805 
806 		tp->rx_buffers[i].skb = NULL;
807 		tp->rx_buffers[i].mapping = 0;
808 
809 		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
810 		tp->rx_ring[i].length = 0;
811 		/* An invalid address. */
812 		tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
813 		if (skb) {
814 			pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
815 					 PCI_DMA_FROMDEVICE);
816 			dev_kfree_skb (skb);
817 		}
818 	}
819 
820 	for (i = 0; i < TX_RING_SIZE; i++) {
821 		struct sk_buff *skb = tp->tx_buffers[i].skb;
822 
823 		if (skb != NULL) {
824 			pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
825 					 skb->len, PCI_DMA_TODEVICE);
826 			dev_kfree_skb (skb);
827 		}
828 		tp->tx_buffers[i].skb = NULL;
829 		tp->tx_buffers[i].mapping = 0;
830 	}
831 }
832 
tulip_close(struct net_device * dev)833 static int tulip_close (struct net_device *dev)
834 {
835 	struct tulip_private *tp = netdev_priv(dev);
836 	void __iomem *ioaddr = tp->base_addr;
837 
838 	netif_stop_queue (dev);
839 
840 	tulip_down (dev);
841 
842 	if (tulip_debug > 1)
843 		netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
844 			   ioread32 (ioaddr + CSR5));
845 
846 	free_irq (tp->pdev->irq, dev);
847 
848 	tulip_free_ring (dev);
849 
850 	return 0;
851 }
852 
tulip_get_stats(struct net_device * dev)853 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
854 {
855 	struct tulip_private *tp = netdev_priv(dev);
856 	void __iomem *ioaddr = tp->base_addr;
857 
858 	if (netif_running(dev)) {
859 		unsigned long flags;
860 
861 		spin_lock_irqsave (&tp->lock, flags);
862 
863 		dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
864 
865 		spin_unlock_irqrestore(&tp->lock, flags);
866 	}
867 
868 	return &dev->stats;
869 }
870 
871 
tulip_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)872 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
873 {
874 	struct tulip_private *np = netdev_priv(dev);
875 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
876 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
877 	strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
878 }
879 
880 
tulip_ethtool_set_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)881 static int tulip_ethtool_set_wol(struct net_device *dev,
882 				 struct ethtool_wolinfo *wolinfo)
883 {
884 	struct tulip_private *tp = netdev_priv(dev);
885 
886 	if (wolinfo->wolopts & (~tp->wolinfo.supported))
887 		   return -EOPNOTSUPP;
888 
889 	tp->wolinfo.wolopts = wolinfo->wolopts;
890 	device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
891 	return 0;
892 }
893 
tulip_ethtool_get_wol(struct net_device * dev,struct ethtool_wolinfo * wolinfo)894 static void tulip_ethtool_get_wol(struct net_device *dev,
895 				  struct ethtool_wolinfo *wolinfo)
896 {
897 	struct tulip_private *tp = netdev_priv(dev);
898 
899 	wolinfo->supported = tp->wolinfo.supported;
900 	wolinfo->wolopts = tp->wolinfo.wolopts;
901 	return;
902 }
903 
904 
905 static const struct ethtool_ops ops = {
906 	.get_drvinfo = tulip_get_drvinfo,
907 	.set_wol     = tulip_ethtool_set_wol,
908 	.get_wol     = tulip_ethtool_get_wol,
909 };
910 
911 /* Provide ioctl() calls to examine the MII xcvr state. */
private_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)912 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
913 {
914 	struct tulip_private *tp = netdev_priv(dev);
915 	void __iomem *ioaddr = tp->base_addr;
916 	struct mii_ioctl_data *data = if_mii(rq);
917 	const unsigned int phy_idx = 0;
918 	int phy = tp->phys[phy_idx] & 0x1f;
919 	unsigned int regnum = data->reg_num;
920 
921 	switch (cmd) {
922 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
923 		if (tp->mii_cnt)
924 			data->phy_id = phy;
925 		else if (tp->flags & HAS_NWAY)
926 			data->phy_id = 32;
927 		else if (tp->chip_id == COMET)
928 			data->phy_id = 1;
929 		else
930 			return -ENODEV;
931 
932 	case SIOCGMIIREG:		/* Read MII PHY register. */
933 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
934 			int csr12 = ioread32 (ioaddr + CSR12);
935 			int csr14 = ioread32 (ioaddr + CSR14);
936 			switch (regnum) {
937 			case 0:
938                                 if (((csr14<<5) & 0x1000) ||
939                                         (dev->if_port == 5 && tp->nwayset))
940                                         data->val_out = 0x1000;
941                                 else
942                                         data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
943                                                 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
944 				break;
945 			case 1:
946                                 data->val_out =
947 					0x1848 +
948 					((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
949 					((csr12&0x06) == 6 ? 0 : 4);
950                                 data->val_out |= 0x6048;
951 				break;
952 			case 4:
953                                 /* Advertised value, bogus 10baseTx-FD value from CSR6. */
954                                 data->val_out =
955 					((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
956 					((csr14 >> 1) & 0x20) + 1;
957                                 data->val_out |= ((csr14 >> 9) & 0x03C0);
958 				break;
959 			case 5: data->val_out = tp->lpar; break;
960 			default: data->val_out = 0; break;
961 			}
962 		} else {
963 			data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
964 		}
965 		return 0;
966 
967 	case SIOCSMIIREG:		/* Write MII PHY register. */
968 		if (regnum & ~0x1f)
969 			return -EINVAL;
970 		if (data->phy_id == phy) {
971 			u16 value = data->val_in;
972 			switch (regnum) {
973 			case 0:	/* Check for autonegotiation on or reset. */
974 				tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
975 				if (tp->full_duplex_lock)
976 					tp->full_duplex = (value & 0x0100) ? 1 : 0;
977 				break;
978 			case 4:
979 				tp->advertising[phy_idx] =
980 				tp->mii_advertise = data->val_in;
981 				break;
982 			}
983 		}
984 		if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
985 			u16 value = data->val_in;
986 			if (regnum == 0) {
987 			  if ((value & 0x1200) == 0x1200) {
988 			    if (tp->chip_id == PNIC2) {
989                                    pnic2_start_nway (dev);
990                             } else {
991 				   t21142_start_nway (dev);
992                             }
993 			  }
994 			} else if (regnum == 4)
995 				tp->sym_advertise = value;
996 		} else {
997 			tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
998 		}
999 		return 0;
1000 	default:
1001 		return -EOPNOTSUPP;
1002 	}
1003 
1004 	return -EOPNOTSUPP;
1005 }
1006 
1007 
1008 /* Set or clear the multicast filter for this adaptor.
1009    Note that we only use exclusion around actually queueing the
1010    new frame, not around filling tp->setup_frame.  This is non-deterministic
1011    when re-entered but still correct. */
1012 
build_setup_frame_hash(u16 * setup_frm,struct net_device * dev)1013 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1014 {
1015 	struct tulip_private *tp = netdev_priv(dev);
1016 	u16 hash_table[32];
1017 	struct netdev_hw_addr *ha;
1018 	int i;
1019 	u16 *eaddrs;
1020 
1021 	memset(hash_table, 0, sizeof(hash_table));
1022 	__set_bit_le(255, hash_table);			/* Broadcast entry */
1023 	/* This should work on big-endian machines as well. */
1024 	netdev_for_each_mc_addr(ha, dev) {
1025 		int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1026 
1027 		__set_bit_le(index, hash_table);
1028 	}
1029 	for (i = 0; i < 32; i++) {
1030 		*setup_frm++ = hash_table[i];
1031 		*setup_frm++ = hash_table[i];
1032 	}
1033 	setup_frm = &tp->setup_frame[13*6];
1034 
1035 	/* Fill the final entry with our physical address. */
1036 	eaddrs = (u16 *)dev->dev_addr;
1037 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1038 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1039 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1040 }
1041 
build_setup_frame_perfect(u16 * setup_frm,struct net_device * dev)1042 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1043 {
1044 	struct tulip_private *tp = netdev_priv(dev);
1045 	struct netdev_hw_addr *ha;
1046 	u16 *eaddrs;
1047 
1048 	/* We have <= 14 addresses so we can use the wonderful
1049 	   16 address perfect filtering of the Tulip. */
1050 	netdev_for_each_mc_addr(ha, dev) {
1051 		eaddrs = (u16 *) ha->addr;
1052 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1053 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1054 		*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1055 	}
1056 	/* Fill the unused entries with the broadcast address. */
1057 	memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1058 	setup_frm = &tp->setup_frame[15*6];
1059 
1060 	/* Fill the final entry with our physical address. */
1061 	eaddrs = (u16 *)dev->dev_addr;
1062 	*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1063 	*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1064 	*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1065 }
1066 
1067 
set_rx_mode(struct net_device * dev)1068 static void set_rx_mode(struct net_device *dev)
1069 {
1070 	struct tulip_private *tp = netdev_priv(dev);
1071 	void __iomem *ioaddr = tp->base_addr;
1072 	int csr6;
1073 
1074 	csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1075 
1076 	tp->csr6 &= ~0x00D5;
1077 	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
1078 		tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1079 		csr6 |= AcceptAllMulticast | AcceptAllPhys;
1080 	} else if ((netdev_mc_count(dev) > 1000) ||
1081 		   (dev->flags & IFF_ALLMULTI)) {
1082 		/* Too many to filter well -- accept all multicasts. */
1083 		tp->csr6 |= AcceptAllMulticast;
1084 		csr6 |= AcceptAllMulticast;
1085 	} else	if (tp->flags & MC_HASH_ONLY) {
1086 		/* Some work-alikes have only a 64-entry hash filter table. */
1087 		/* Should verify correctness on big-endian/__powerpc__ */
1088 		struct netdev_hw_addr *ha;
1089 		if (netdev_mc_count(dev) > 64) {
1090 			/* Arbitrary non-effective limit. */
1091 			tp->csr6 |= AcceptAllMulticast;
1092 			csr6 |= AcceptAllMulticast;
1093 		} else {
1094 			u32 mc_filter[2] = {0, 0};		 /* Multicast hash filter */
1095 			int filterbit;
1096 			netdev_for_each_mc_addr(ha, dev) {
1097 				if (tp->flags & COMET_MAC_ADDR)
1098 					filterbit = ether_crc_le(ETH_ALEN,
1099 								 ha->addr);
1100 				else
1101 					filterbit = ether_crc(ETH_ALEN,
1102 							      ha->addr) >> 26;
1103 				filterbit &= 0x3f;
1104 				mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1105 				if (tulip_debug > 2)
1106 					dev_info(&dev->dev,
1107 						 "Added filter for %pM  %08x bit %d\n",
1108 						 ha->addr,
1109 						 ether_crc(ETH_ALEN, ha->addr),
1110 						 filterbit);
1111 			}
1112 			if (mc_filter[0] == tp->mc_filter[0]  &&
1113 				mc_filter[1] == tp->mc_filter[1])
1114 				;				/* No change. */
1115 			else if (tp->flags & IS_ASIX) {
1116 				iowrite32(2, ioaddr + CSR13);
1117 				iowrite32(mc_filter[0], ioaddr + CSR14);
1118 				iowrite32(3, ioaddr + CSR13);
1119 				iowrite32(mc_filter[1], ioaddr + CSR14);
1120 			} else if (tp->flags & COMET_MAC_ADDR) {
1121 				iowrite32(mc_filter[0], ioaddr + CSR27);
1122 				iowrite32(mc_filter[1], ioaddr + CSR28);
1123 			}
1124 			tp->mc_filter[0] = mc_filter[0];
1125 			tp->mc_filter[1] = mc_filter[1];
1126 		}
1127 	} else {
1128 		unsigned long flags;
1129 		u32 tx_flags = 0x08000000 | 192;
1130 
1131 		/* Note that only the low-address shortword of setup_frame is valid!
1132 		   The values are doubled for big-endian architectures. */
1133 		if (netdev_mc_count(dev) > 14) {
1134 			/* Must use a multicast hash table. */
1135 			build_setup_frame_hash(tp->setup_frame, dev);
1136 			tx_flags = 0x08400000 | 192;
1137 		} else {
1138 			build_setup_frame_perfect(tp->setup_frame, dev);
1139 		}
1140 
1141 		spin_lock_irqsave(&tp->lock, flags);
1142 
1143 		if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1144 			/* Same setup recently queued, we need not add it. */
1145 		} else {
1146 			unsigned int entry;
1147 			int dummy = -1;
1148 
1149 			/* Now add this frame to the Tx list. */
1150 
1151 			entry = tp->cur_tx++ % TX_RING_SIZE;
1152 
1153 			if (entry != 0) {
1154 				/* Avoid a chip errata by prefixing a dummy entry. */
1155 				tp->tx_buffers[entry].skb = NULL;
1156 				tp->tx_buffers[entry].mapping = 0;
1157 				tp->tx_ring[entry].length =
1158 					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1159 				tp->tx_ring[entry].buffer1 = 0;
1160 				/* Must set DescOwned later to avoid race with chip */
1161 				dummy = entry;
1162 				entry = tp->cur_tx++ % TX_RING_SIZE;
1163 
1164 			}
1165 
1166 			tp->tx_buffers[entry].skb = NULL;
1167 			tp->tx_buffers[entry].mapping =
1168 				pci_map_single(tp->pdev, tp->setup_frame,
1169 					       sizeof(tp->setup_frame),
1170 					       PCI_DMA_TODEVICE);
1171 			/* Put the setup frame on the Tx list. */
1172 			if (entry == TX_RING_SIZE-1)
1173 				tx_flags |= DESC_RING_WRAP;		/* Wrap ring. */
1174 			tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1175 			tp->tx_ring[entry].buffer1 =
1176 				cpu_to_le32(tp->tx_buffers[entry].mapping);
1177 			tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1178 			if (dummy >= 0)
1179 				tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1180 			if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1181 				netif_stop_queue(dev);
1182 
1183 			/* Trigger an immediate transmit demand. */
1184 			iowrite32(0, ioaddr + CSR1);
1185 		}
1186 
1187 		spin_unlock_irqrestore(&tp->lock, flags);
1188 	}
1189 
1190 	iowrite32(csr6, ioaddr + CSR6);
1191 }
1192 
1193 #ifdef CONFIG_TULIP_MWI
tulip_mwi_config(struct pci_dev * pdev,struct net_device * dev)1194 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1195 {
1196 	struct tulip_private *tp = netdev_priv(dev);
1197 	u8 cache;
1198 	u16 pci_command;
1199 	u32 csr0;
1200 
1201 	if (tulip_debug > 3)
1202 		netdev_dbg(dev, "tulip_mwi_config()\n");
1203 
1204 	tp->csr0 = csr0 = 0;
1205 
1206 	/* if we have any cache line size at all, we can do MRM and MWI */
1207 	csr0 |= MRM | MWI;
1208 
1209 	/* Enable MWI in the standard PCI command bit.
1210 	 * Check for the case where MWI is desired but not available
1211 	 */
1212 	pci_try_set_mwi(pdev);
1213 
1214 	/* read result from hardware (in case bit refused to enable) */
1215 	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1216 	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1217 		csr0 &= ~MWI;
1218 
1219 	/* if cache line size hardwired to zero, no MWI */
1220 	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1221 	if ((csr0 & MWI) && (cache == 0)) {
1222 		csr0 &= ~MWI;
1223 		pci_clear_mwi(pdev);
1224 	}
1225 
1226 	/* assign per-cacheline-size cache alignment and
1227 	 * burst length values
1228 	 */
1229 	switch (cache) {
1230 	case 8:
1231 		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1232 		break;
1233 	case 16:
1234 		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1235 		break;
1236 	case 32:
1237 		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1238 		break;
1239 	default:
1240 		cache = 0;
1241 		break;
1242 	}
1243 
1244 	/* if we have a good cache line size, we by now have a good
1245 	 * csr0, so save it and exit
1246 	 */
1247 	if (cache)
1248 		goto out;
1249 
1250 	/* we don't have a good csr0 or cache line size, disable MWI */
1251 	if (csr0 & MWI) {
1252 		pci_clear_mwi(pdev);
1253 		csr0 &= ~MWI;
1254 	}
1255 
1256 	/* sane defaults for burst length and cache alignment
1257 	 * originally from de4x5 driver
1258 	 */
1259 	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1260 
1261 out:
1262 	tp->csr0 = csr0;
1263 	if (tulip_debug > 2)
1264 		netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1265 			   cache, csr0);
1266 }
1267 #endif
1268 
1269 /*
1270  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
1271  *	is the DM910X and the on chip ULi devices
1272  */
1273 
tulip_uli_dm_quirk(struct pci_dev * pdev)1274 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1275 {
1276 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1277 		return 1;
1278 	return 0;
1279 }
1280 
1281 static const struct net_device_ops tulip_netdev_ops = {
1282 	.ndo_open		= tulip_open,
1283 	.ndo_start_xmit		= tulip_start_xmit,
1284 	.ndo_tx_timeout		= tulip_tx_timeout,
1285 	.ndo_stop		= tulip_close,
1286 	.ndo_get_stats		= tulip_get_stats,
1287 	.ndo_do_ioctl 		= private_ioctl,
1288 	.ndo_set_rx_mode	= set_rx_mode,
1289 	.ndo_change_mtu		= eth_change_mtu,
1290 	.ndo_set_mac_address	= eth_mac_addr,
1291 	.ndo_validate_addr	= eth_validate_addr,
1292 #ifdef CONFIG_NET_POLL_CONTROLLER
1293 	.ndo_poll_controller	 = poll_tulip,
1294 #endif
1295 };
1296 
1297 const struct pci_device_id early_486_chipsets[] = {
1298 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1299 	{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1300 	{ },
1301 };
1302 
tulip_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1303 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1304 {
1305 	struct tulip_private *tp;
1306 	/* See note below on the multiport cards. */
1307 	static unsigned char last_phys_addr[ETH_ALEN] = {
1308 		0x00, 'L', 'i', 'n', 'u', 'x'
1309 	};
1310 	static int last_irq;
1311 	static int multiport_cnt;	/* For four-port boards w/one EEPROM */
1312 	int i, irq;
1313 	unsigned short sum;
1314 	unsigned char *ee_data;
1315 	struct net_device *dev;
1316 	void __iomem *ioaddr;
1317 	static int board_idx = -1;
1318 	int chip_idx = ent->driver_data;
1319 	const char *chip_name = tulip_tbl[chip_idx].chip_name;
1320 	unsigned int eeprom_missing = 0;
1321 	unsigned int force_csr0 = 0;
1322 
1323 #ifndef MODULE
1324 	if (tulip_debug > 0)
1325 		printk_once(KERN_INFO "%s", version);
1326 #endif
1327 
1328 	board_idx++;
1329 
1330 	/*
1331 	 *	Lan media wire a tulip chip to a wan interface. Needs a very
1332 	 *	different driver (lmc driver)
1333 	 */
1334 
1335         if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1336 		pr_err("skipping LMC card\n");
1337 		return -ENODEV;
1338 	} else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1339 		   (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1340 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1341 		    pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1342 		pr_err("skipping SBE T3E3 port\n");
1343 		return -ENODEV;
1344 	}
1345 
1346 	/*
1347 	 *	DM910x chips should be handled by the dmfe driver, except
1348 	 *	on-board chips on SPARC systems.  Also, early DM9100s need
1349 	 *	software CRC which only the dmfe driver supports.
1350 	 */
1351 
1352 #ifdef CONFIG_TULIP_DM910X
1353 	if (chip_idx == DM910X) {
1354 		struct device_node *dp;
1355 
1356 		if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1357 		    pdev->revision < 0x30) {
1358 			pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1359 			return -ENODEV;
1360 		}
1361 
1362 		dp = pci_device_to_OF_node(pdev);
1363 		if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1364 			pr_info("skipping DM910x expansion card (use dmfe)\n");
1365 			return -ENODEV;
1366 		}
1367 	}
1368 #endif
1369 
1370 	/*
1371 	 *	Looks for early PCI chipsets where people report hangs
1372 	 *	without the workarounds being on.
1373 	 */
1374 
1375 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
1376 	      aligned.  Aries might need this too. The Saturn errata are not
1377 	      pretty reading but thankfully it's an old 486 chipset.
1378 
1379 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
1380 	      Saturn.
1381 	*/
1382 
1383 	if (pci_dev_present(early_486_chipsets)) {
1384 		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1385 		force_csr0 = 1;
1386 	}
1387 
1388 	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
1389 	if (chip_idx == AX88140) {
1390 		if ((csr0 & 0x3f00) == 0)
1391 			csr0 |= 0x2000;
1392 	}
1393 
1394 	/* PNIC doesn't have MWI/MRL/MRM... */
1395 	if (chip_idx == LC82C168)
1396 		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
1397 
1398 	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
1399 	if (tulip_uli_dm_quirk(pdev)) {
1400 		csr0 &= ~0x01f100ff;
1401 #if defined(CONFIG_SPARC)
1402                 csr0 = (csr0 & ~0xff00) | 0xe000;
1403 #endif
1404 	}
1405 	/*
1406 	 *	And back to business
1407 	 */
1408 
1409 	i = pci_enable_device(pdev);
1410 	if (i) {
1411 		pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1412 		return i;
1413 	}
1414 
1415 	irq = pdev->irq;
1416 
1417 	/* alloc_etherdev ensures aligned and zeroed private structures */
1418 	dev = alloc_etherdev (sizeof (*tp));
1419 	if (!dev)
1420 		return -ENOMEM;
1421 
1422 	SET_NETDEV_DEV(dev, &pdev->dev);
1423 	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1424 		pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1425 		       pci_name(pdev),
1426 		       (unsigned long long)pci_resource_len (pdev, 0),
1427 		       (unsigned long long)pci_resource_start (pdev, 0));
1428 		goto err_out_free_netdev;
1429 	}
1430 
1431 	/* grab all resources from both PIO and MMIO regions, as we
1432 	 * don't want anyone else messing around with our hardware */
1433 	if (pci_request_regions (pdev, DRV_NAME))
1434 		goto err_out_free_netdev;
1435 
1436 	ioaddr =  pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1437 
1438 	if (!ioaddr)
1439 		goto err_out_free_res;
1440 
1441 	/*
1442 	 * initialize private data structure 'tp'
1443 	 * it is zeroed and aligned in alloc_etherdev
1444 	 */
1445 	tp = netdev_priv(dev);
1446 	tp->dev = dev;
1447 
1448 	tp->rx_ring = pci_alloc_consistent(pdev,
1449 					   sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1450 					   sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1451 					   &tp->rx_ring_dma);
1452 	if (!tp->rx_ring)
1453 		goto err_out_mtable;
1454 	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1455 	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1456 
1457 	tp->chip_id = chip_idx;
1458 	tp->flags = tulip_tbl[chip_idx].flags;
1459 
1460 	tp->wolinfo.supported = 0;
1461 	tp->wolinfo.wolopts = 0;
1462 	/* COMET: Enable power management only for AN983B */
1463 	if (chip_idx == COMET ) {
1464 		u32 sig;
1465 		pci_read_config_dword (pdev, 0x80, &sig);
1466 		if (sig == 0x09811317) {
1467 			tp->flags |= COMET_PM;
1468 			tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1469 			pr_info("%s: Enabled WOL support for AN983B\n",
1470 				__func__);
1471 		}
1472 	}
1473 	tp->pdev = pdev;
1474 	tp->base_addr = ioaddr;
1475 	tp->revision = pdev->revision;
1476 	tp->csr0 = csr0;
1477 	spin_lock_init(&tp->lock);
1478 	spin_lock_init(&tp->mii_lock);
1479 	init_timer(&tp->timer);
1480 	tp->timer.data = (unsigned long)dev;
1481 	tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
1482 
1483 	INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1484 
1485 #ifdef CONFIG_TULIP_MWI
1486 	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1487 		tulip_mwi_config (pdev, dev);
1488 #endif
1489 
1490 	/* Stop the chip's Tx and Rx processes. */
1491 	tulip_stop_rxtx(tp);
1492 
1493 	pci_set_master(pdev);
1494 
1495 #ifdef CONFIG_GSC
1496 	if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1497 		switch (pdev->subsystem_device) {
1498 		default:
1499 			break;
1500 		case 0x1061:
1501 		case 0x1062:
1502 		case 0x1063:
1503 		case 0x1098:
1504 		case 0x1099:
1505 		case 0x10EE:
1506 			tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1507 			chip_name = "GSC DS21140 Tulip";
1508 		}
1509 	}
1510 #endif
1511 
1512 	/* Clear the missed-packet counter. */
1513 	ioread32(ioaddr + CSR8);
1514 
1515 	/* The station address ROM is read byte serially.  The register must
1516 	   be polled, waiting for the value to be read bit serially from the
1517 	   EEPROM.
1518 	   */
1519 	ee_data = tp->eeprom;
1520 	memset(ee_data, 0, sizeof(tp->eeprom));
1521 	sum = 0;
1522 	if (chip_idx == LC82C168) {
1523 		for (i = 0; i < 3; i++) {
1524 			int value, boguscnt = 100000;
1525 			iowrite32(0x600 | i, ioaddr + 0x98);
1526 			do {
1527 				value = ioread32(ioaddr + CSR9);
1528 			} while (value < 0  && --boguscnt > 0);
1529 			put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1530 			sum += value & 0xffff;
1531 		}
1532 	} else if (chip_idx == COMET) {
1533 		/* No need to read the EEPROM. */
1534 		put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1535 		put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1536 		for (i = 0; i < 6; i ++)
1537 			sum += dev->dev_addr[i];
1538 	} else {
1539 		/* A serial EEPROM interface, we read now and sort it out later. */
1540 		int sa_offset = 0;
1541 		int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1542 		int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1543 
1544 		if (ee_max_addr > sizeof(tp->eeprom))
1545 			ee_max_addr = sizeof(tp->eeprom);
1546 
1547 		for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1548 			u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1549 			ee_data[i] = data & 0xff;
1550 			ee_data[i + 1] = data >> 8;
1551 		}
1552 
1553 		/* DEC now has a specification (see Notes) but early board makers
1554 		   just put the address in the first EEPROM locations. */
1555 		/* This does  memcmp(ee_data, ee_data+16, 8) */
1556 		for (i = 0; i < 8; i ++)
1557 			if (ee_data[i] != ee_data[16+i])
1558 				sa_offset = 20;
1559 		if (chip_idx == CONEXANT) {
1560 			/* Check that the tuple type and length is correct. */
1561 			if (ee_data[0x198] == 0x04  &&  ee_data[0x199] == 6)
1562 				sa_offset = 0x19A;
1563 		} else if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&
1564 				   ee_data[2] == 0) {
1565 			sa_offset = 2;		/* Grrr, damn Matrox boards. */
1566 			multiport_cnt = 4;
1567 		}
1568 #ifdef CONFIG_MIPS_COBALT
1569                if ((pdev->bus->number == 0) &&
1570                    ((PCI_SLOT(pdev->devfn) == 7) ||
1571                     (PCI_SLOT(pdev->devfn) == 12))) {
1572                        /* Cobalt MAC address in first EEPROM locations. */
1573                        sa_offset = 0;
1574 		       /* Ensure our media table fixup get's applied */
1575 		       memcpy(ee_data + 16, ee_data, 8);
1576                }
1577 #endif
1578 #ifdef CONFIG_GSC
1579 		/* Check to see if we have a broken srom */
1580 		if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1581 			/* pci_vendor_id and subsystem_id are swapped */
1582 			ee_data[0] = ee_data[2];
1583 			ee_data[1] = ee_data[3];
1584 			ee_data[2] = 0x61;
1585 			ee_data[3] = 0x10;
1586 
1587 			/* HSC-PCI boards need to be byte-swaped and shifted
1588 			 * up 1 word.  This shift needs to happen at the end
1589 			 * of the MAC first because of the 2 byte overlap.
1590 			 */
1591 			for (i = 4; i >= 0; i -= 2) {
1592 				ee_data[17 + i + 3] = ee_data[17 + i];
1593 				ee_data[16 + i + 5] = ee_data[16 + i];
1594 			}
1595 		}
1596 #endif
1597 
1598 		for (i = 0; i < 6; i ++) {
1599 			dev->dev_addr[i] = ee_data[i + sa_offset];
1600 			sum += ee_data[i + sa_offset];
1601 		}
1602 	}
1603 	/* Lite-On boards have the address byte-swapped. */
1604 	if ((dev->dev_addr[0] == 0xA0 ||
1605 	     dev->dev_addr[0] == 0xC0 ||
1606 	     dev->dev_addr[0] == 0x02) &&
1607 	    dev->dev_addr[1] == 0x00)
1608 		for (i = 0; i < 6; i+=2) {
1609 			char tmp = dev->dev_addr[i];
1610 			dev->dev_addr[i] = dev->dev_addr[i+1];
1611 			dev->dev_addr[i+1] = tmp;
1612 		}
1613 	/* On the Zynx 315 Etherarray and other multiport boards only the
1614 	   first Tulip has an EEPROM.
1615 	   On Sparc systems the mac address is held in the OBP property
1616 	   "local-mac-address".
1617 	   The addresses of the subsequent ports are derived from the first.
1618 	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
1619 	   that here as well. */
1620 	if (sum == 0  || sum == 6*0xff) {
1621 #if defined(CONFIG_SPARC)
1622 		struct device_node *dp = pci_device_to_OF_node(pdev);
1623 		const unsigned char *addr;
1624 		int len;
1625 #endif
1626 		eeprom_missing = 1;
1627 		for (i = 0; i < 5; i++)
1628 			dev->dev_addr[i] = last_phys_addr[i];
1629 		dev->dev_addr[i] = last_phys_addr[i] + 1;
1630 #if defined(CONFIG_SPARC)
1631 		addr = of_get_property(dp, "local-mac-address", &len);
1632 		if (addr && len == ETH_ALEN)
1633 			memcpy(dev->dev_addr, addr, ETH_ALEN);
1634 #endif
1635 #if defined(__i386__) || defined(__x86_64__)	/* Patch up x86 BIOS bug. */
1636 		if (last_irq)
1637 			irq = last_irq;
1638 #endif
1639 	}
1640 
1641 	for (i = 0; i < 6; i++)
1642 		last_phys_addr[i] = dev->dev_addr[i];
1643 	last_irq = irq;
1644 
1645 	/* The lower four bits are the media type. */
1646 	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
1647 		if (options[board_idx] & MEDIA_MASK)
1648 			tp->default_port = options[board_idx] & MEDIA_MASK;
1649 		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1650 			tp->full_duplex = 1;
1651 		if (mtu[board_idx] > 0)
1652 			dev->mtu = mtu[board_idx];
1653 	}
1654 	if (dev->mem_start & MEDIA_MASK)
1655 		tp->default_port = dev->mem_start & MEDIA_MASK;
1656 	if (tp->default_port) {
1657 		pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1658 			board_idx, medianame[tp->default_port & MEDIA_MASK]);
1659 		tp->medialock = 1;
1660 		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1661 			tp->full_duplex = 1;
1662 	}
1663 	if (tp->full_duplex)
1664 		tp->full_duplex_lock = 1;
1665 
1666 	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1667 		static const u16 media2advert[] = {
1668 			0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1669 		};
1670 		tp->mii_advertise = media2advert[tp->default_port - 9];
1671 		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1672 	}
1673 
1674 	if (tp->flags & HAS_MEDIA_TABLE) {
1675 		sprintf(dev->name, DRV_NAME "%d", board_idx);	/* hack */
1676 		tulip_parse_eeprom(dev);
1677 		strcpy(dev->name, "eth%d");			/* un-hack */
1678 	}
1679 
1680 	if ((tp->flags & ALWAYS_CHECK_MII) ||
1681 		(tp->mtable  &&  tp->mtable->has_mii) ||
1682 		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
1683 		if (tp->mtable  &&  tp->mtable->has_mii) {
1684 			for (i = 0; i < tp->mtable->leafcount; i++)
1685 				if (tp->mtable->mleaf[i].media == 11) {
1686 					tp->cur_index = i;
1687 					tp->saved_if_port = dev->if_port;
1688 					tulip_select_media(dev, 2);
1689 					dev->if_port = tp->saved_if_port;
1690 					break;
1691 				}
1692 		}
1693 
1694 		/* Find the connected MII xcvrs.
1695 		   Doing this in open() would allow detecting external xcvrs
1696 		   later, but takes much time. */
1697 		tulip_find_mii (dev, board_idx);
1698 	}
1699 
1700 	/* The Tulip-specific entries in the device structure. */
1701 	dev->netdev_ops = &tulip_netdev_ops;
1702 	dev->watchdog_timeo = TX_TIMEOUT;
1703 #ifdef CONFIG_TULIP_NAPI
1704 	netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1705 #endif
1706 	dev->ethtool_ops = &ops;
1707 
1708 	if (register_netdev(dev))
1709 		goto err_out_free_ring;
1710 
1711 	pci_set_drvdata(pdev, dev);
1712 
1713 	dev_info(&dev->dev,
1714 #ifdef CONFIG_TULIP_MMIO
1715 		 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1716 #else
1717 		 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1718 #endif
1719 		 chip_name, pdev->revision,
1720 		 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1721 		 eeprom_missing ? " EEPROM not present," : "",
1722 		 dev->dev_addr, irq);
1723 
1724         if (tp->chip_id == PNIC2)
1725 		tp->link_change = pnic2_lnk_change;
1726 	else if (tp->flags & HAS_NWAY)
1727 		tp->link_change = t21142_lnk_change;
1728 	else if (tp->flags & HAS_PNICNWAY)
1729 		tp->link_change = pnic_lnk_change;
1730 
1731 	/* Reset the xcvr interface and turn on heartbeat. */
1732 	switch (chip_idx) {
1733 	case DC21140:
1734 	case DM910X:
1735 	default:
1736 		if (tp->mtable)
1737 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1738 		break;
1739 	case DC21142:
1740 		if (tp->mii_cnt  ||  tulip_media_cap[dev->if_port] & MediaIsMII) {
1741 			iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1742 			iowrite32(0x0000, ioaddr + CSR13);
1743 			iowrite32(0x0000, ioaddr + CSR14);
1744 			iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1745 		} else
1746 			t21142_start_nway(dev);
1747 		break;
1748 	case PNIC2:
1749 	        /* just do a reset for sanity sake */
1750 		iowrite32(0x0000, ioaddr + CSR13);
1751 		iowrite32(0x0000, ioaddr + CSR14);
1752 		break;
1753 	case LC82C168:
1754 		if ( ! tp->mii_cnt) {
1755 			tp->nway = 1;
1756 			tp->nwayset = 0;
1757 			iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1758 			iowrite32(0x30, ioaddr + CSR12);
1759 			iowrite32(0x0001F078, ioaddr + CSR6);
1760 			iowrite32(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
1761 		}
1762 		break;
1763 	case MX98713:
1764 	case COMPEX9881:
1765 		iowrite32(0x00000000, ioaddr + CSR6);
1766 		iowrite32(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
1767 		iowrite32(0x00000001, ioaddr + CSR13);
1768 		break;
1769 	case MX98715:
1770 	case MX98725:
1771 		iowrite32(0x01a80000, ioaddr + CSR6);
1772 		iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1773 		iowrite32(0x00001000, ioaddr + CSR12);
1774 		break;
1775 	case COMET:
1776 		/* No initialization necessary. */
1777 		break;
1778 	}
1779 
1780 	/* put the chip in snooze mode until opened */
1781 	tulip_set_power_state (tp, 0, 1);
1782 
1783 	return 0;
1784 
1785 err_out_free_ring:
1786 	pci_free_consistent (pdev,
1787 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1788 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1789 			     tp->rx_ring, tp->rx_ring_dma);
1790 
1791 err_out_mtable:
1792 	kfree (tp->mtable);
1793 	pci_iounmap(pdev, ioaddr);
1794 
1795 err_out_free_res:
1796 	pci_release_regions (pdev);
1797 
1798 err_out_free_netdev:
1799 	free_netdev (dev);
1800 	return -ENODEV;
1801 }
1802 
1803 
1804 /* set the registers according to the given wolopts */
tulip_set_wolopts(struct pci_dev * pdev,u32 wolopts)1805 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1806 {
1807 	struct net_device *dev = pci_get_drvdata(pdev);
1808 	struct tulip_private *tp = netdev_priv(dev);
1809 	void __iomem *ioaddr = tp->base_addr;
1810 
1811 	if (tp->flags & COMET_PM) {
1812 
1813 		unsigned int tmp;
1814 
1815 		tmp = ioread32(ioaddr + CSR18);
1816 		tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1817 		tmp |= comet_csr18_pm_mode;
1818 		iowrite32(tmp, ioaddr + CSR18);
1819 
1820 		/* Set the Wake-up Control/Status Register to the given WOL options*/
1821 		tmp = ioread32(ioaddr + CSR13);
1822 		tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1823 		if (wolopts & WAKE_MAGIC)
1824 			tmp |= comet_csr13_mpre;
1825 		if (wolopts & WAKE_PHY)
1826 			tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1827 		/* Clear the event flags */
1828 		tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1829 		iowrite32(tmp, ioaddr + CSR13);
1830 	}
1831 }
1832 
1833 #ifdef CONFIG_PM
1834 
1835 
tulip_suspend(struct pci_dev * pdev,pm_message_t state)1836 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1837 {
1838 	pci_power_t pstate;
1839 	struct net_device *dev = pci_get_drvdata(pdev);
1840 	struct tulip_private *tp = netdev_priv(dev);
1841 
1842 	if (!dev)
1843 		return -EINVAL;
1844 
1845 	if (!netif_running(dev))
1846 		goto save_state;
1847 
1848 	tulip_down(dev);
1849 
1850 	netif_device_detach(dev);
1851 	/* FIXME: it needlessly adds an error path. */
1852 	free_irq(tp->pdev->irq, dev);
1853 
1854 save_state:
1855 	pci_save_state(pdev);
1856 	pci_disable_device(pdev);
1857 	pstate = pci_choose_state(pdev, state);
1858 	if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1859 		int rc;
1860 
1861 		tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1862 		rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1863 		if (rc)
1864 			pr_err("pci_enable_wake failed (%d)\n", rc);
1865 	}
1866 	pci_set_power_state(pdev, pstate);
1867 
1868 	return 0;
1869 }
1870 
1871 
tulip_resume(struct pci_dev * pdev)1872 static int tulip_resume(struct pci_dev *pdev)
1873 {
1874 	struct net_device *dev = pci_get_drvdata(pdev);
1875 	struct tulip_private *tp = netdev_priv(dev);
1876 	void __iomem *ioaddr = tp->base_addr;
1877 	int retval;
1878 	unsigned int tmp;
1879 
1880 	if (!dev)
1881 		return -EINVAL;
1882 
1883 	pci_set_power_state(pdev, PCI_D0);
1884 	pci_restore_state(pdev);
1885 
1886 	if (!netif_running(dev))
1887 		return 0;
1888 
1889 	if ((retval = pci_enable_device(pdev))) {
1890 		pr_err("pci_enable_device failed in resume\n");
1891 		return retval;
1892 	}
1893 
1894 	retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1895 			     dev->name, dev);
1896 	if (retval) {
1897 		pr_err("request_irq failed in resume\n");
1898 		return retval;
1899 	}
1900 
1901 	if (tp->flags & COMET_PM) {
1902 		pci_enable_wake(pdev, PCI_D3hot, 0);
1903 		pci_enable_wake(pdev, PCI_D3cold, 0);
1904 
1905 		/* Clear the PMES flag */
1906 		tmp = ioread32(ioaddr + CSR20);
1907 		tmp |= comet_csr20_pmes;
1908 		iowrite32(tmp, ioaddr + CSR20);
1909 
1910 		/* Disable all wake-up events */
1911 		tulip_set_wolopts(pdev, 0);
1912 	}
1913 	netif_device_attach(dev);
1914 
1915 	if (netif_running(dev))
1916 		tulip_up(dev);
1917 
1918 	return 0;
1919 }
1920 
1921 #endif /* CONFIG_PM */
1922 
1923 
tulip_remove_one(struct pci_dev * pdev)1924 static void tulip_remove_one(struct pci_dev *pdev)
1925 {
1926 	struct net_device *dev = pci_get_drvdata (pdev);
1927 	struct tulip_private *tp;
1928 
1929 	if (!dev)
1930 		return;
1931 
1932 	tp = netdev_priv(dev);
1933 	unregister_netdev(dev);
1934 	pci_free_consistent (pdev,
1935 			     sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1936 			     sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1937 			     tp->rx_ring, tp->rx_ring_dma);
1938 	kfree (tp->mtable);
1939 	pci_iounmap(pdev, tp->base_addr);
1940 	free_netdev (dev);
1941 	pci_release_regions (pdev);
1942 	pci_disable_device(pdev);
1943 
1944 	/* pci_power_off (pdev, -1); */
1945 }
1946 
1947 #ifdef CONFIG_NET_POLL_CONTROLLER
1948 /*
1949  * Polling 'interrupt' - used by things like netconsole to send skbs
1950  * without having to re-enable interrupts. It's not called while
1951  * the interrupt routine is executing.
1952  */
1953 
poll_tulip(struct net_device * dev)1954 static void poll_tulip (struct net_device *dev)
1955 {
1956 	struct tulip_private *tp = netdev_priv(dev);
1957 	const int irq = tp->pdev->irq;
1958 
1959 	/* disable_irq here is not very nice, but with the lockless
1960 	   interrupt handler we have no other choice. */
1961 	disable_irq(irq);
1962 	tulip_interrupt (irq, dev);
1963 	enable_irq(irq);
1964 }
1965 #endif
1966 
1967 static struct pci_driver tulip_driver = {
1968 	.name		= DRV_NAME,
1969 	.id_table	= tulip_pci_tbl,
1970 	.probe		= tulip_init_one,
1971 	.remove		= tulip_remove_one,
1972 #ifdef CONFIG_PM
1973 	.suspend	= tulip_suspend,
1974 	.resume		= tulip_resume,
1975 #endif /* CONFIG_PM */
1976 };
1977 
1978 
tulip_init(void)1979 static int __init tulip_init (void)
1980 {
1981 #ifdef MODULE
1982 	pr_info("%s", version);
1983 #endif
1984 
1985 	/* copy module parms into globals */
1986 	tulip_rx_copybreak = rx_copybreak;
1987 	tulip_max_interrupt_work = max_interrupt_work;
1988 
1989 	/* probe for and init boards */
1990 	return pci_register_driver(&tulip_driver);
1991 }
1992 
1993 
tulip_cleanup(void)1994 static void __exit tulip_cleanup (void)
1995 {
1996 	pci_unregister_driver (&tulip_driver);
1997 }
1998 
1999 
2000 module_init(tulip_init);
2001 module_exit(tulip_cleanup);
2002