This source file includes following definitions.
- tulip_timer
- tulip_set_power_state
- tulip_up
- tulip_open
- tulip_tx_timeout
- tulip_init_ring
- tulip_start_xmit
- tulip_clean_tx_ring
- tulip_down
- tulip_free_ring
- tulip_close
- tulip_get_stats
- tulip_get_drvinfo
- tulip_ethtool_set_wol
- tulip_ethtool_get_wol
- private_ioctl
- build_setup_frame_hash
- build_setup_frame_perfect
- set_rx_mode
- tulip_mwi_config
- tulip_uli_dm_quirk
- tulip_init_one
- tulip_set_wolopts
- tulip_suspend
- tulip_resume
- tulip_remove_one
- poll_tulip
- tulip_init
- tulip_cleanup
1
2
3
4
5
6
7
8
9
10
11
12 #define pr_fmt(fmt) "tulip: " fmt
13
14 #define DRV_NAME "tulip"
15 #ifdef CONFIG_TULIP_NAPI
16 #define DRV_VERSION "1.1.15-NAPI"
17 #else
18 #define DRV_VERSION "1.1.15"
19 #endif
20 #define DRV_RELDATE "Feb 27, 2007"
21
22
23 #include <linux/module.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
26 #include "tulip.h"
27 #include <linux/init.h>
28 #include <linux/interrupt.h>
29 #include <linux/etherdevice.h>
30 #include <linux/delay.h>
31 #include <linux/mii.h>
32 #include <linux/crc32.h>
33 #include <asm/unaligned.h>
34 #include <linux/uaccess.h>
35
36 #ifdef CONFIG_SPARC
37 #include <asm/prom.h>
38 #endif
39
40 static char version[] =
41 "Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
42
43
44
45
46 static unsigned int max_interrupt_work = 25;
47
48 #define MAX_UNITS 8
49
50 static int full_duplex[MAX_UNITS];
51 static int options[MAX_UNITS];
52 static int mtu[MAX_UNITS];
53
54
55 const char * const medianame[32] = {
56 "10baseT", "10base2", "AUI", "100baseTx",
57 "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
58 "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
59 "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
60 "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
61 "","","","", "","","","", "","","","Transceiver reset",
62 };
63
64
65 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
66 defined(CONFIG_SPARC) || defined(__ia64__) || \
67 defined(__sh__) || defined(__mips__)
68 static int rx_copybreak = 1518;
69 #else
70 static int rx_copybreak = 100;
71 #endif
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86 #if defined(__alpha__) || defined(__ia64__)
87 static int csr0 = 0x01A00000 | 0xE000;
88 #elif defined(__i386__) || defined(__powerpc__) || defined(__x86_64__)
89 static int csr0 = 0x01A00000 | 0x8000;
90 #elif defined(CONFIG_SPARC) || defined(__hppa__)
91
92
93
94
95 static int csr0 = 0x01A00000 | 0x9000;
96 #elif defined(__arm__) || defined(__sh__)
97 static int csr0 = 0x01A00000 | 0x4800;
98 #elif defined(__mips__)
99 static int csr0 = 0x00200000 | 0x4000;
100 #else
101 static int csr0;
102 #endif
103
104
105
106 #define TX_TIMEOUT (4*HZ)
107
108
109 MODULE_AUTHOR("The Linux Kernel Team");
110 MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
111 MODULE_LICENSE("GPL");
112 MODULE_VERSION(DRV_VERSION);
113 module_param(tulip_debug, int, 0);
114 module_param(max_interrupt_work, int, 0);
115 module_param(rx_copybreak, int, 0);
116 module_param(csr0, int, 0);
117 module_param_array(options, int, NULL, 0);
118 module_param_array(full_duplex, int, NULL, 0);
119
120 #ifdef TULIP_DEBUG
121 int tulip_debug = TULIP_DEBUG;
122 #else
123 int tulip_debug = 1;
124 #endif
125
126 static void tulip_timer(struct timer_list *t)
127 {
128 struct tulip_private *tp = from_timer(tp, t, timer);
129 struct net_device *dev = tp->dev;
130
131 if (netif_running(dev))
132 schedule_work(&tp->media_work);
133 }
134
135
136
137
138
139
140
141 const struct tulip_chip_table tulip_tbl[] = {
142 { },
143 { },
144
145
146 { "Digital DS21140 Tulip", 128, 0x0001ebef,
147 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI, tulip_timer,
148 tulip_media_task },
149
150
151 { "Digital DS21142/43 Tulip", 128, 0x0801fbff,
152 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
153 | HAS_INTR_MITIGATION | HAS_PCI_MWI, tulip_timer, t21142_media_task },
154
155
156 { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
157 HAS_MII | HAS_PNICNWAY, pnic_timer, },
158
159
160 { "Macronix 98713 PMAC", 128, 0x0001ebef,
161 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
162
163
164 { "Macronix 98715 PMAC", 256, 0x0001ebef,
165 HAS_MEDIA_TABLE, mxic_timer, },
166
167
168 { "Macronix 98725 PMAC", 256, 0x0001ebef,
169 HAS_MEDIA_TABLE, mxic_timer, },
170
171
172 { "ASIX AX88140", 128, 0x0001fbff,
173 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY
174 | IS_ASIX, tulip_timer, tulip_media_task },
175
176
177 { "Lite-On PNIC-II", 256, 0x0801fbff,
178 HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI, pnic2_timer, },
179
180
181 { "ADMtek Comet", 256, 0x0001abef,
182 HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer, },
183
184
185 { "Compex 9881 PMAC", 128, 0x0001ebef,
186 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer, },
187
188
189 { "Intel DS21145 Tulip", 128, 0x0801fbff,
190 HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
191 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task },
192
193
194 #ifdef CONFIG_TULIP_DM910X
195 { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
196 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI,
197 tulip_timer, tulip_media_task },
198 #else
199 { NULL },
200 #endif
201
202
203 { "Conexant LANfinity", 256, 0x0001ebef,
204 HAS_MII | HAS_ACPI, tulip_timer, tulip_media_task },
205
206 };
207
208
209 static const struct pci_device_id tulip_pci_tbl[] = {
210 { 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
211 { 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
212 { 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
213 { 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
214 { 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
215
216 { 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
217 { 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
218 { 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
219 { 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
220 { 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
221 { 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
222 { 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
223 { 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
224 { 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
225 { 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
226 { 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
227 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
228 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
229 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
230 #ifdef CONFIG_TULIP_DM910X
231 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
232 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
233 #endif
234 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
235 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
236 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
237 { 0x1186, 0x1541, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
238 { 0x1186, 0x1561, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
239 { 0x1186, 0x1591, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
240 { 0x14f1, 0x1803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CONEXANT },
241 { 0x1626, 0x8410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
242 { 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
243 { 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
244 { 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
245 { 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
246 { 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
247 { 0x1414, 0x0001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
248 { 0x1414, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
249 { }
250 };
251 MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
252
253
254
255 const char tulip_media_cap[32] =
256 {0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
257
258 static void tulip_tx_timeout(struct net_device *dev);
259 static void tulip_init_ring(struct net_device *dev);
260 static void tulip_free_ring(struct net_device *dev);
261 static netdev_tx_t tulip_start_xmit(struct sk_buff *skb,
262 struct net_device *dev);
263 static int tulip_open(struct net_device *dev);
264 static int tulip_close(struct net_device *dev);
265 static void tulip_up(struct net_device *dev);
266 static void tulip_down(struct net_device *dev);
267 static struct net_device_stats *tulip_get_stats(struct net_device *dev);
268 static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
269 static void set_rx_mode(struct net_device *dev);
270 static void tulip_set_wolopts(struct pci_dev *pdev, u32 wolopts);
271 #ifdef CONFIG_NET_POLL_CONTROLLER
272 static void poll_tulip(struct net_device *dev);
273 #endif
274
275 static void tulip_set_power_state (struct tulip_private *tp,
276 int sleep, int snooze)
277 {
278 if (tp->flags & HAS_ACPI) {
279 u32 tmp, newtmp;
280 pci_read_config_dword (tp->pdev, CFDD, &tmp);
281 newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
282 if (sleep)
283 newtmp |= CFDD_Sleep;
284 else if (snooze)
285 newtmp |= CFDD_Snooze;
286 if (tmp != newtmp)
287 pci_write_config_dword (tp->pdev, CFDD, newtmp);
288 }
289
290 }
291
292
293 static void tulip_up(struct net_device *dev)
294 {
295 struct tulip_private *tp = netdev_priv(dev);
296 void __iomem *ioaddr = tp->base_addr;
297 int next_tick = 3*HZ;
298 u32 reg;
299 int i;
300
301 #ifdef CONFIG_TULIP_NAPI
302 napi_enable(&tp->napi);
303 #endif
304
305
306 tulip_set_power_state (tp, 0, 0);
307
308
309 pci_enable_wake(tp->pdev, PCI_D3hot, 0);
310 pci_enable_wake(tp->pdev, PCI_D3cold, 0);
311 tulip_set_wolopts(tp->pdev, 0);
312
313
314 if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
315 iowrite32(0x00040000, ioaddr + CSR6);
316
317
318 iowrite32(0x00000001, ioaddr + CSR0);
319 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®);
320 udelay(100);
321
322
323
324
325 iowrite32(tp->csr0, ioaddr + CSR0);
326 pci_read_config_dword(tp->pdev, PCI_COMMAND, ®);
327 udelay(100);
328
329 if (tulip_debug > 1)
330 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
331
332 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
333 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
334 tp->cur_rx = tp->cur_tx = 0;
335 tp->dirty_rx = tp->dirty_tx = 0;
336
337 if (tp->flags & MC_HASH_ONLY) {
338 u32 addr_low = get_unaligned_le32(dev->dev_addr);
339 u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
340 if (tp->chip_id == AX88140) {
341 iowrite32(0, ioaddr + CSR13);
342 iowrite32(addr_low, ioaddr + CSR14);
343 iowrite32(1, ioaddr + CSR13);
344 iowrite32(addr_high, ioaddr + CSR14);
345 } else if (tp->flags & COMET_MAC_ADDR) {
346 iowrite32(addr_low, ioaddr + 0xA4);
347 iowrite32(addr_high, ioaddr + 0xA8);
348 iowrite32(0, ioaddr + CSR27);
349 iowrite32(0, ioaddr + CSR28);
350 }
351 } else {
352
353 u16 *eaddrs = (u16 *)dev->dev_addr;
354 u16 *setup_frm = &tp->setup_frame[15*6];
355 dma_addr_t mapping;
356
357
358 memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
359
360 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
361 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
362 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
363
364 mapping = pci_map_single(tp->pdev, tp->setup_frame,
365 sizeof(tp->setup_frame),
366 PCI_DMA_TODEVICE);
367 tp->tx_buffers[tp->cur_tx].skb = NULL;
368 tp->tx_buffers[tp->cur_tx].mapping = mapping;
369
370
371 tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
372 tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
373 tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
374
375 tp->cur_tx++;
376 }
377
378 tp->saved_if_port = dev->if_port;
379 if (dev->if_port == 0)
380 dev->if_port = tp->default_port;
381
382
383 i = 0;
384 if (tp->mtable == NULL)
385 goto media_picked;
386 if (dev->if_port) {
387 int looking_for = tulip_media_cap[dev->if_port] & MediaIsMII ? 11 :
388 (dev->if_port == 12 ? 0 : dev->if_port);
389 for (i = 0; i < tp->mtable->leafcount; i++)
390 if (tp->mtable->mleaf[i].media == looking_for) {
391 dev_info(&dev->dev,
392 "Using user-specified media %s\n",
393 medianame[dev->if_port]);
394 goto media_picked;
395 }
396 }
397 if ((tp->mtable->defaultmedia & 0x0800) == 0) {
398 int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
399 for (i = 0; i < tp->mtable->leafcount; i++)
400 if (tp->mtable->mleaf[i].media == looking_for) {
401 dev_info(&dev->dev,
402 "Using EEPROM-set media %s\n",
403 medianame[looking_for]);
404 goto media_picked;
405 }
406 }
407
408 for (i = tp->mtable->leafcount - 1;
409 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
410 ;
411 media_picked:
412
413 tp->csr6 = 0;
414 tp->cur_index = i;
415 tp->nwayset = 0;
416
417 if (dev->if_port) {
418 if (tp->chip_id == DC21143 &&
419 (tulip_media_cap[dev->if_port] & MediaIsMII)) {
420
421 iowrite32(0x0000, ioaddr + CSR13);
422 iowrite32(0x0000, ioaddr + CSR14);
423 iowrite32(0x0008, ioaddr + CSR15);
424 }
425 tulip_select_media(dev, 1);
426 } else if (tp->chip_id == DC21142) {
427 if (tp->mii_cnt) {
428 tulip_select_media(dev, 1);
429 if (tulip_debug > 1)
430 dev_info(&dev->dev,
431 "Using MII transceiver %d, status %04x\n",
432 tp->phys[0],
433 tulip_mdio_read(dev, tp->phys[0], 1));
434 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
435 tp->csr6 = csr6_mask_hdcap;
436 dev->if_port = 11;
437 iowrite32(0x0000, ioaddr + CSR13);
438 iowrite32(0x0000, ioaddr + CSR14);
439 } else
440 t21142_start_nway(dev);
441 } else if (tp->chip_id == PNIC2) {
442
443 tp->sym_advertise = 0x01E0;
444
445 iowrite32(ioread32(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
446 iowrite32(ioread32(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
447 pnic2_start_nway(dev);
448 } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
449 if (tp->mii_cnt) {
450 dev->if_port = 11;
451 tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
452 iowrite32(0x0001, ioaddr + CSR15);
453 } else if (ioread32(ioaddr + CSR5) & TPLnkPass)
454 pnic_do_nway(dev);
455 else {
456
457 iowrite32(0x32, ioaddr + CSR12);
458 tp->csr6 = 0x00420000;
459 iowrite32(0x0001B078, ioaddr + 0xB8);
460 iowrite32(0x0201B078, ioaddr + 0xB8);
461 next_tick = 1*HZ;
462 }
463 } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) &&
464 ! tp->medialock) {
465 dev->if_port = 0;
466 tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
467 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
468 } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
469
470 dev->if_port = 0;
471 tp->csr6 = 0x01a80200;
472 iowrite32(0x0f370000 | ioread16(ioaddr + 0x80), ioaddr + 0x80);
473 iowrite32(0x11000 | ioread16(ioaddr + 0xa0), ioaddr + 0xa0);
474 } else if (tp->chip_id == COMET || tp->chip_id == CONEXANT) {
475
476 iowrite32(ioread32(ioaddr + 0x88) | 1, ioaddr + 0x88);
477 dev->if_port = tp->mii_cnt ? 11 : 0;
478 tp->csr6 = 0x00040000;
479 } else if (tp->chip_id == AX88140) {
480 tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
481 } else
482 tulip_select_media(dev, 1);
483
484
485 tulip_stop_rxtx(tp);
486 barrier();
487 udelay(5);
488 iowrite32(tp->csr6 | TxOn, ioaddr + CSR6);
489
490
491 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
492 iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
493 tulip_start_rxtx(tp);
494 iowrite32(0, ioaddr + CSR2);
495
496 if (tulip_debug > 2) {
497 netdev_dbg(dev, "Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
498 ioread32(ioaddr + CSR0),
499 ioread32(ioaddr + CSR5),
500 ioread32(ioaddr + CSR6));
501 }
502
503
504
505 tp->timer.expires = RUN_AT(next_tick);
506 add_timer(&tp->timer);
507 #ifdef CONFIG_TULIP_NAPI
508 timer_setup(&tp->oom_timer, oom_timer, 0);
509 #endif
510 }
511
512 static int
513 tulip_open(struct net_device *dev)
514 {
515 struct tulip_private *tp = netdev_priv(dev);
516 int retval;
517
518 tulip_init_ring (dev);
519
520 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
521 dev->name, dev);
522 if (retval)
523 goto free_ring;
524
525 tulip_up (dev);
526
527 netif_start_queue (dev);
528
529 return 0;
530
531 free_ring:
532 tulip_free_ring (dev);
533 return retval;
534 }
535
536
537 static void tulip_tx_timeout(struct net_device *dev)
538 {
539 struct tulip_private *tp = netdev_priv(dev);
540 void __iomem *ioaddr = tp->base_addr;
541 unsigned long flags;
542
543 spin_lock_irqsave (&tp->lock, flags);
544
545 if (tulip_media_cap[dev->if_port] & MediaIsMII) {
546
547 if (tulip_debug > 1)
548 dev_warn(&dev->dev,
549 "Transmit timeout using MII device\n");
550 } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
551 tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
552 tp->chip_id == DM910X) {
553 dev_warn(&dev->dev,
554 "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
555 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
556 ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
557 ioread32(ioaddr + CSR15));
558 tp->timeout_recovery = 1;
559 schedule_work(&tp->media_work);
560 goto out_unlock;
561 } else if (tp->chip_id == PNIC2) {
562 dev_warn(&dev->dev,
563 "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
564 (int)ioread32(ioaddr + CSR5),
565 (int)ioread32(ioaddr + CSR6),
566 (int)ioread32(ioaddr + CSR7),
567 (int)ioread32(ioaddr + CSR12));
568 } else {
569 dev_warn(&dev->dev,
570 "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
571 ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
572 dev->if_port = 0;
573 }
574
575 #if defined(way_too_many_messages)
576 if (tulip_debug > 3) {
577 int i;
578 for (i = 0; i < RX_RING_SIZE; i++) {
579 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
580 int j;
581 printk(KERN_DEBUG
582 "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
583 i,
584 (unsigned int)tp->rx_ring[i].status,
585 (unsigned int)tp->rx_ring[i].length,
586 (unsigned int)tp->rx_ring[i].buffer1,
587 (unsigned int)tp->rx_ring[i].buffer2,
588 buf[0], buf[1], buf[2]);
589 for (j = 0; ((j < 1600) && buf[j] != 0xee); j++)
590 if (j < 100)
591 pr_cont(" %02x", buf[j]);
592 pr_cont(" j=%d\n", j);
593 }
594 printk(KERN_DEBUG " Rx ring %p: ", tp->rx_ring);
595 for (i = 0; i < RX_RING_SIZE; i++)
596 pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
597 printk(KERN_DEBUG " Tx ring %p: ", tp->tx_ring);
598 for (i = 0; i < TX_RING_SIZE; i++)
599 pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
600 pr_cont("\n");
601 }
602 #endif
603
604 tulip_tx_timeout_complete(tp, ioaddr);
605
606 out_unlock:
607 spin_unlock_irqrestore (&tp->lock, flags);
608 netif_trans_update(dev);
609 netif_wake_queue (dev);
610 }
611
612
613
614 static void tulip_init_ring(struct net_device *dev)
615 {
616 struct tulip_private *tp = netdev_priv(dev);
617 int i;
618
619 tp->susp_rx = 0;
620 tp->ttimer = 0;
621 tp->nir = 0;
622
623 for (i = 0; i < RX_RING_SIZE; i++) {
624 tp->rx_ring[i].status = 0x00000000;
625 tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
626 tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
627 tp->rx_buffers[i].skb = NULL;
628 tp->rx_buffers[i].mapping = 0;
629 }
630
631 tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
632 tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
633
634 for (i = 0; i < RX_RING_SIZE; i++) {
635 dma_addr_t mapping;
636
637
638
639
640 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
641 tp->rx_buffers[i].skb = skb;
642 if (skb == NULL)
643 break;
644 mapping = pci_map_single(tp->pdev, skb->data,
645 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
646 tp->rx_buffers[i].mapping = mapping;
647 tp->rx_ring[i].status = cpu_to_le32(DescOwned);
648 tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
649 }
650 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
651
652
653
654 for (i = 0; i < TX_RING_SIZE; i++) {
655 tp->tx_buffers[i].skb = NULL;
656 tp->tx_buffers[i].mapping = 0;
657 tp->tx_ring[i].status = 0x00000000;
658 tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
659 }
660 tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
661 }
662
663 static netdev_tx_t
664 tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
665 {
666 struct tulip_private *tp = netdev_priv(dev);
667 int entry;
668 u32 flag;
669 dma_addr_t mapping;
670 unsigned long flags;
671
672 spin_lock_irqsave(&tp->lock, flags);
673
674
675 entry = tp->cur_tx % TX_RING_SIZE;
676
677 tp->tx_buffers[entry].skb = skb;
678 mapping = pci_map_single(tp->pdev, skb->data,
679 skb->len, PCI_DMA_TODEVICE);
680 tp->tx_buffers[entry].mapping = mapping;
681 tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
682
683 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {
684 flag = 0x60000000;
685 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
686 flag = 0xe0000000;
687 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
688 flag = 0x60000000;
689 } else {
690 flag = 0xe0000000;
691 netif_stop_queue(dev);
692 }
693 if (entry == TX_RING_SIZE-1)
694 flag = 0xe0000000 | DESC_RING_WRAP;
695
696 tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
697
698
699 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
700 wmb();
701
702 tp->cur_tx++;
703
704
705 iowrite32(0, tp->base_addr + CSR1);
706
707 spin_unlock_irqrestore(&tp->lock, flags);
708
709 return NETDEV_TX_OK;
710 }
711
712 static void tulip_clean_tx_ring(struct tulip_private *tp)
713 {
714 unsigned int dirty_tx;
715
716 for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
717 dirty_tx++) {
718 int entry = dirty_tx % TX_RING_SIZE;
719 int status = le32_to_cpu(tp->tx_ring[entry].status);
720
721 if (status < 0) {
722 tp->dev->stats.tx_errors++;
723 tp->tx_ring[entry].status = 0;
724 }
725
726
727 if (tp->tx_buffers[entry].skb == NULL) {
728
729 if (tp->tx_buffers[entry].mapping)
730 pci_unmap_single(tp->pdev,
731 tp->tx_buffers[entry].mapping,
732 sizeof(tp->setup_frame),
733 PCI_DMA_TODEVICE);
734 continue;
735 }
736
737 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
738 tp->tx_buffers[entry].skb->len,
739 PCI_DMA_TODEVICE);
740
741
742 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
743 tp->tx_buffers[entry].skb = NULL;
744 tp->tx_buffers[entry].mapping = 0;
745 }
746 }
747
748 static void tulip_down (struct net_device *dev)
749 {
750 struct tulip_private *tp = netdev_priv(dev);
751 void __iomem *ioaddr = tp->base_addr;
752 unsigned long flags;
753
754 cancel_work_sync(&tp->media_work);
755
756 #ifdef CONFIG_TULIP_NAPI
757 napi_disable(&tp->napi);
758 #endif
759
760 del_timer_sync (&tp->timer);
761 #ifdef CONFIG_TULIP_NAPI
762 del_timer_sync (&tp->oom_timer);
763 #endif
764 spin_lock_irqsave (&tp->lock, flags);
765
766
767 iowrite32 (0x00000000, ioaddr + CSR7);
768
769
770 tulip_stop_rxtx(tp);
771
772
773 tulip_refill_rx(dev);
774
775
776 tulip_clean_tx_ring(tp);
777
778 if (ioread32(ioaddr + CSR6) != 0xffffffff)
779 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
780
781 spin_unlock_irqrestore (&tp->lock, flags);
782
783 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
784
785 dev->if_port = tp->saved_if_port;
786
787
788 tulip_set_power_state (tp, 0, 1);
789 }
790
791 static void tulip_free_ring (struct net_device *dev)
792 {
793 struct tulip_private *tp = netdev_priv(dev);
794 int i;
795
796
797 for (i = 0; i < RX_RING_SIZE; i++) {
798 struct sk_buff *skb = tp->rx_buffers[i].skb;
799 dma_addr_t mapping = tp->rx_buffers[i].mapping;
800
801 tp->rx_buffers[i].skb = NULL;
802 tp->rx_buffers[i].mapping = 0;
803
804 tp->rx_ring[i].status = 0;
805 tp->rx_ring[i].length = 0;
806
807 tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
808 if (skb) {
809 pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
810 PCI_DMA_FROMDEVICE);
811 dev_kfree_skb (skb);
812 }
813 }
814
815 for (i = 0; i < TX_RING_SIZE; i++) {
816 struct sk_buff *skb = tp->tx_buffers[i].skb;
817
818 if (skb != NULL) {
819 pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
820 skb->len, PCI_DMA_TODEVICE);
821 dev_kfree_skb (skb);
822 }
823 tp->tx_buffers[i].skb = NULL;
824 tp->tx_buffers[i].mapping = 0;
825 }
826 }
827
828 static int tulip_close (struct net_device *dev)
829 {
830 struct tulip_private *tp = netdev_priv(dev);
831 void __iomem *ioaddr = tp->base_addr;
832
833 netif_stop_queue (dev);
834
835 tulip_down (dev);
836
837 if (tulip_debug > 1)
838 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
839 ioread32 (ioaddr + CSR5));
840
841 free_irq (tp->pdev->irq, dev);
842
843 tulip_free_ring (dev);
844
845 return 0;
846 }
847
848 static struct net_device_stats *tulip_get_stats(struct net_device *dev)
849 {
850 struct tulip_private *tp = netdev_priv(dev);
851 void __iomem *ioaddr = tp->base_addr;
852
853 if (netif_running(dev)) {
854 unsigned long flags;
855
856 spin_lock_irqsave (&tp->lock, flags);
857
858 dev->stats.rx_missed_errors += ioread32(ioaddr + CSR8) & 0xffff;
859
860 spin_unlock_irqrestore(&tp->lock, flags);
861 }
862
863 return &dev->stats;
864 }
865
866
867 static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
868 {
869 struct tulip_private *np = netdev_priv(dev);
870 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
871 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
872 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
873 }
874
875
876 static int tulip_ethtool_set_wol(struct net_device *dev,
877 struct ethtool_wolinfo *wolinfo)
878 {
879 struct tulip_private *tp = netdev_priv(dev);
880
881 if (wolinfo->wolopts & (~tp->wolinfo.supported))
882 return -EOPNOTSUPP;
883
884 tp->wolinfo.wolopts = wolinfo->wolopts;
885 device_set_wakeup_enable(&tp->pdev->dev, tp->wolinfo.wolopts);
886 return 0;
887 }
888
889 static void tulip_ethtool_get_wol(struct net_device *dev,
890 struct ethtool_wolinfo *wolinfo)
891 {
892 struct tulip_private *tp = netdev_priv(dev);
893
894 wolinfo->supported = tp->wolinfo.supported;
895 wolinfo->wolopts = tp->wolinfo.wolopts;
896 return;
897 }
898
899
900 static const struct ethtool_ops ops = {
901 .get_drvinfo = tulip_get_drvinfo,
902 .set_wol = tulip_ethtool_set_wol,
903 .get_wol = tulip_ethtool_get_wol,
904 };
905
906
907 static int private_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
908 {
909 struct tulip_private *tp = netdev_priv(dev);
910 void __iomem *ioaddr = tp->base_addr;
911 struct mii_ioctl_data *data = if_mii(rq);
912 const unsigned int phy_idx = 0;
913 int phy = tp->phys[phy_idx] & 0x1f;
914 unsigned int regnum = data->reg_num;
915
916 switch (cmd) {
917 case SIOCGMIIPHY:
918 if (tp->mii_cnt)
919 data->phy_id = phy;
920 else if (tp->flags & HAS_NWAY)
921 data->phy_id = 32;
922 else if (tp->chip_id == COMET)
923 data->phy_id = 1;
924 else
925 return -ENODEV;
926
927
928 case SIOCGMIIREG:
929 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
930 int csr12 = ioread32 (ioaddr + CSR12);
931 int csr14 = ioread32 (ioaddr + CSR14);
932 switch (regnum) {
933 case 0:
934 if (((csr14<<5) & 0x1000) ||
935 (dev->if_port == 5 && tp->nwayset))
936 data->val_out = 0x1000;
937 else
938 data->val_out = (tulip_media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
939 | (tulip_media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
940 break;
941 case 1:
942 data->val_out =
943 0x1848 +
944 ((csr12&0x7000) == 0x5000 ? 0x20 : 0) +
945 ((csr12&0x06) == 6 ? 0 : 4);
946 data->val_out |= 0x6048;
947 break;
948 case 4:
949
950 data->val_out =
951 ((ioread32(ioaddr + CSR6) >> 3) & 0x0040) +
952 ((csr14 >> 1) & 0x20) + 1;
953 data->val_out |= ((csr14 >> 9) & 0x03C0);
954 break;
955 case 5: data->val_out = tp->lpar; break;
956 default: data->val_out = 0; break;
957 }
958 } else {
959 data->val_out = tulip_mdio_read (dev, data->phy_id & 0x1f, regnum);
960 }
961 return 0;
962
963 case SIOCSMIIREG:
964 if (regnum & ~0x1f)
965 return -EINVAL;
966 if (data->phy_id == phy) {
967 u16 value = data->val_in;
968 switch (regnum) {
969 case 0:
970 tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
971 if (tp->full_duplex_lock)
972 tp->full_duplex = (value & 0x0100) ? 1 : 0;
973 break;
974 case 4:
975 tp->advertising[phy_idx] =
976 tp->mii_advertise = data->val_in;
977 break;
978 }
979 }
980 if (data->phy_id == 32 && (tp->flags & HAS_NWAY)) {
981 u16 value = data->val_in;
982 if (regnum == 0) {
983 if ((value & 0x1200) == 0x1200) {
984 if (tp->chip_id == PNIC2) {
985 pnic2_start_nway (dev);
986 } else {
987 t21142_start_nway (dev);
988 }
989 }
990 } else if (regnum == 4)
991 tp->sym_advertise = value;
992 } else {
993 tulip_mdio_write (dev, data->phy_id & 0x1f, regnum, data->val_in);
994 }
995 return 0;
996 default:
997 return -EOPNOTSUPP;
998 }
999
1000 return -EOPNOTSUPP;
1001 }
1002
1003
1004
1005
1006
1007
1008
1009 static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1010 {
1011 struct tulip_private *tp = netdev_priv(dev);
1012 u16 hash_table[32];
1013 struct netdev_hw_addr *ha;
1014 int i;
1015 u16 *eaddrs;
1016
1017 memset(hash_table, 0, sizeof(hash_table));
1018 __set_bit_le(255, hash_table);
1019
1020 netdev_for_each_mc_addr(ha, dev) {
1021 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1022
1023 __set_bit_le(index, hash_table);
1024 }
1025 for (i = 0; i < 32; i++) {
1026 *setup_frm++ = hash_table[i];
1027 *setup_frm++ = hash_table[i];
1028 }
1029 setup_frm = &tp->setup_frame[13*6];
1030
1031
1032 eaddrs = (u16 *)dev->dev_addr;
1033 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1034 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1035 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1036 }
1037
1038 static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1039 {
1040 struct tulip_private *tp = netdev_priv(dev);
1041 struct netdev_hw_addr *ha;
1042 u16 *eaddrs;
1043
1044
1045
1046 netdev_for_each_mc_addr(ha, dev) {
1047 eaddrs = (u16 *) ha->addr;
1048 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1049 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1050 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1051 }
1052
1053 memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
1054 setup_frm = &tp->setup_frame[15*6];
1055
1056
1057 eaddrs = (u16 *)dev->dev_addr;
1058 *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
1059 *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
1060 *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
1061 }
1062
1063
1064 static void set_rx_mode(struct net_device *dev)
1065 {
1066 struct tulip_private *tp = netdev_priv(dev);
1067 void __iomem *ioaddr = tp->base_addr;
1068 int csr6;
1069
1070 csr6 = ioread32(ioaddr + CSR6) & ~0x00D5;
1071
1072 tp->csr6 &= ~0x00D5;
1073 if (dev->flags & IFF_PROMISC) {
1074 tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
1075 csr6 |= AcceptAllMulticast | AcceptAllPhys;
1076 } else if ((netdev_mc_count(dev) > 1000) ||
1077 (dev->flags & IFF_ALLMULTI)) {
1078
1079 tp->csr6 |= AcceptAllMulticast;
1080 csr6 |= AcceptAllMulticast;
1081 } else if (tp->flags & MC_HASH_ONLY) {
1082
1083
1084 struct netdev_hw_addr *ha;
1085 if (netdev_mc_count(dev) > 64) {
1086
1087 tp->csr6 |= AcceptAllMulticast;
1088 csr6 |= AcceptAllMulticast;
1089 } else {
1090 u32 mc_filter[2] = {0, 0};
1091 int filterbit;
1092 netdev_for_each_mc_addr(ha, dev) {
1093 if (tp->flags & COMET_MAC_ADDR)
1094 filterbit = ether_crc_le(ETH_ALEN,
1095 ha->addr);
1096 else
1097 filterbit = ether_crc(ETH_ALEN,
1098 ha->addr) >> 26;
1099 filterbit &= 0x3f;
1100 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1101 if (tulip_debug > 2)
1102 dev_info(&dev->dev,
1103 "Added filter for %pM %08x bit %d\n",
1104 ha->addr,
1105 ether_crc(ETH_ALEN, ha->addr),
1106 filterbit);
1107 }
1108 if (mc_filter[0] == tp->mc_filter[0] &&
1109 mc_filter[1] == tp->mc_filter[1])
1110 ;
1111 else if (tp->flags & IS_ASIX) {
1112 iowrite32(2, ioaddr + CSR13);
1113 iowrite32(mc_filter[0], ioaddr + CSR14);
1114 iowrite32(3, ioaddr + CSR13);
1115 iowrite32(mc_filter[1], ioaddr + CSR14);
1116 } else if (tp->flags & COMET_MAC_ADDR) {
1117 iowrite32(mc_filter[0], ioaddr + CSR27);
1118 iowrite32(mc_filter[1], ioaddr + CSR28);
1119 }
1120 tp->mc_filter[0] = mc_filter[0];
1121 tp->mc_filter[1] = mc_filter[1];
1122 }
1123 } else {
1124 unsigned long flags;
1125 u32 tx_flags = 0x08000000 | 192;
1126
1127
1128
1129 if (netdev_mc_count(dev) > 14) {
1130
1131 build_setup_frame_hash(tp->setup_frame, dev);
1132 tx_flags = 0x08400000 | 192;
1133 } else {
1134 build_setup_frame_perfect(tp->setup_frame, dev);
1135 }
1136
1137 spin_lock_irqsave(&tp->lock, flags);
1138
1139 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1140
1141 } else {
1142 unsigned int entry;
1143 int dummy = -1;
1144
1145
1146
1147 entry = tp->cur_tx++ % TX_RING_SIZE;
1148
1149 if (entry != 0) {
1150
1151 tp->tx_buffers[entry].skb = NULL;
1152 tp->tx_buffers[entry].mapping = 0;
1153 tp->tx_ring[entry].length =
1154 (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
1155 tp->tx_ring[entry].buffer1 = 0;
1156
1157 dummy = entry;
1158 entry = tp->cur_tx++ % TX_RING_SIZE;
1159
1160 }
1161
1162 tp->tx_buffers[entry].skb = NULL;
1163 tp->tx_buffers[entry].mapping =
1164 pci_map_single(tp->pdev, tp->setup_frame,
1165 sizeof(tp->setup_frame),
1166 PCI_DMA_TODEVICE);
1167
1168 if (entry == TX_RING_SIZE-1)
1169 tx_flags |= DESC_RING_WRAP;
1170 tp->tx_ring[entry].length = cpu_to_le32(tx_flags);
1171 tp->tx_ring[entry].buffer1 =
1172 cpu_to_le32(tp->tx_buffers[entry].mapping);
1173 tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
1174 if (dummy >= 0)
1175 tp->tx_ring[dummy].status = cpu_to_le32(DescOwned);
1176 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2)
1177 netif_stop_queue(dev);
1178
1179
1180 iowrite32(0, ioaddr + CSR1);
1181 }
1182
1183 spin_unlock_irqrestore(&tp->lock, flags);
1184 }
1185
1186 iowrite32(csr6, ioaddr + CSR6);
1187 }
1188
1189 #ifdef CONFIG_TULIP_MWI
1190 static void tulip_mwi_config(struct pci_dev *pdev, struct net_device *dev)
1191 {
1192 struct tulip_private *tp = netdev_priv(dev);
1193 u8 cache;
1194 u16 pci_command;
1195 u32 csr0;
1196
1197 if (tulip_debug > 3)
1198 netdev_dbg(dev, "tulip_mwi_config()\n");
1199
1200 tp->csr0 = csr0 = 0;
1201
1202
1203 csr0 |= MRM | MWI;
1204
1205
1206
1207
1208 pci_try_set_mwi(pdev);
1209
1210
1211 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
1212 if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
1213 csr0 &= ~MWI;
1214
1215
1216 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
1217 if ((csr0 & MWI) && (cache == 0)) {
1218 csr0 &= ~MWI;
1219 pci_clear_mwi(pdev);
1220 }
1221
1222
1223
1224
1225 switch (cache) {
1226 case 8:
1227 csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
1228 break;
1229 case 16:
1230 csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
1231 break;
1232 case 32:
1233 csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
1234 break;
1235 default:
1236 cache = 0;
1237 break;
1238 }
1239
1240
1241
1242
1243 if (cache)
1244 goto out;
1245
1246
1247 if (csr0 & MWI) {
1248 pci_clear_mwi(pdev);
1249 csr0 &= ~MWI;
1250 }
1251
1252
1253
1254
1255 csr0 |= (8 << BurstLenShift) | (1 << CALShift);
1256
1257 out:
1258 tp->csr0 = csr0;
1259 if (tulip_debug > 2)
1260 netdev_dbg(dev, "MWI config cacheline=%d, csr0=%08x\n",
1261 cache, csr0);
1262 }
1263 #endif
1264
1265
1266
1267
1268
1269
1270 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
1271 {
1272 if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
1273 return 1;
1274 return 0;
1275 }
1276
1277 static const struct net_device_ops tulip_netdev_ops = {
1278 .ndo_open = tulip_open,
1279 .ndo_start_xmit = tulip_start_xmit,
1280 .ndo_tx_timeout = tulip_tx_timeout,
1281 .ndo_stop = tulip_close,
1282 .ndo_get_stats = tulip_get_stats,
1283 .ndo_do_ioctl = private_ioctl,
1284 .ndo_set_rx_mode = set_rx_mode,
1285 .ndo_set_mac_address = eth_mac_addr,
1286 .ndo_validate_addr = eth_validate_addr,
1287 #ifdef CONFIG_NET_POLL_CONTROLLER
1288 .ndo_poll_controller = poll_tulip,
1289 #endif
1290 };
1291
1292 const struct pci_device_id early_486_chipsets[] = {
1293 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1294 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1295 { },
1296 };
1297
1298 static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1299 {
1300 struct tulip_private *tp;
1301
1302 static unsigned char last_phys_addr[ETH_ALEN] = {
1303 0x00, 'L', 'i', 'n', 'u', 'x'
1304 };
1305 static int last_irq;
1306 int i, irq;
1307 unsigned short sum;
1308 unsigned char *ee_data;
1309 struct net_device *dev;
1310 void __iomem *ioaddr;
1311 static int board_idx = -1;
1312 int chip_idx = ent->driver_data;
1313 const char *chip_name = tulip_tbl[chip_idx].chip_name;
1314 unsigned int eeprom_missing = 0;
1315 unsigned int force_csr0 = 0;
1316
1317 #ifndef MODULE
1318 if (tulip_debug > 0)
1319 printk_once(KERN_INFO "%s", version);
1320 #endif
1321
1322 board_idx++;
1323
1324
1325
1326
1327
1328
1329 if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
1330 pr_err("skipping LMC card\n");
1331 return -ENODEV;
1332 } else if (pdev->subsystem_vendor == PCI_VENDOR_ID_SBE &&
1333 (pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_T3E3 ||
1334 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P0 ||
1335 pdev->subsystem_device == PCI_SUBDEVICE_ID_SBE_2T3E3_P1)) {
1336 pr_err("skipping SBE T3E3 port\n");
1337 return -ENODEV;
1338 }
1339
1340
1341
1342
1343
1344
1345
1346 #ifdef CONFIG_TULIP_DM910X
1347 if (chip_idx == DM910X) {
1348 struct device_node *dp;
1349
1350 if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
1351 pdev->revision < 0x30) {
1352 pr_info("skipping early DM9100 with Crc bug (use dmfe)\n");
1353 return -ENODEV;
1354 }
1355
1356 dp = pci_device_to_OF_node(pdev);
1357 if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
1358 pr_info("skipping DM910x expansion card (use dmfe)\n");
1359 return -ENODEV;
1360 }
1361 }
1362 #endif
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (pci_dev_present(early_486_chipsets)) {
1378 csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
1379 force_csr0 = 1;
1380 }
1381
1382
1383 if (chip_idx == AX88140) {
1384 if ((csr0 & 0x3f00) == 0)
1385 csr0 |= 0x2000;
1386 }
1387
1388
1389 if (chip_idx == LC82C168)
1390 csr0 &= ~0xfff10000;
1391
1392
1393 if (tulip_uli_dm_quirk(pdev)) {
1394 csr0 &= ~0x01f100ff;
1395 #if defined(CONFIG_SPARC)
1396 csr0 = (csr0 & ~0xff00) | 0xe000;
1397 #endif
1398 }
1399
1400
1401
1402
1403 i = pci_enable_device(pdev);
1404 if (i) {
1405 pr_err("Cannot enable tulip board #%d, aborting\n", board_idx);
1406 return i;
1407 }
1408
1409 irq = pdev->irq;
1410
1411
1412 dev = alloc_etherdev (sizeof (*tp));
1413 if (!dev)
1414 return -ENOMEM;
1415
1416 SET_NETDEV_DEV(dev, &pdev->dev);
1417 if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
1418 pr_err("%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
1419 pci_name(pdev),
1420 (unsigned long long)pci_resource_len (pdev, 0),
1421 (unsigned long long)pci_resource_start (pdev, 0));
1422 goto err_out_free_netdev;
1423 }
1424
1425
1426
1427 if (pci_request_regions (pdev, DRV_NAME))
1428 goto err_out_free_netdev;
1429
1430 ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
1431
1432 if (!ioaddr)
1433 goto err_out_free_res;
1434
1435
1436
1437
1438
1439 tp = netdev_priv(dev);
1440 tp->dev = dev;
1441
1442 tp->rx_ring = pci_alloc_consistent(pdev,
1443 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
1444 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
1445 &tp->rx_ring_dma);
1446 if (!tp->rx_ring)
1447 goto err_out_mtable;
1448 tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
1449 tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
1450
1451 tp->chip_id = chip_idx;
1452 tp->flags = tulip_tbl[chip_idx].flags;
1453
1454 tp->wolinfo.supported = 0;
1455 tp->wolinfo.wolopts = 0;
1456
1457 if (chip_idx == COMET ) {
1458 u32 sig;
1459 pci_read_config_dword (pdev, 0x80, &sig);
1460 if (sig == 0x09811317) {
1461 tp->flags |= COMET_PM;
1462 tp->wolinfo.supported = WAKE_PHY | WAKE_MAGIC;
1463 pr_info("%s: Enabled WOL support for AN983B\n",
1464 __func__);
1465 }
1466 }
1467 tp->pdev = pdev;
1468 tp->base_addr = ioaddr;
1469 tp->revision = pdev->revision;
1470 tp->csr0 = csr0;
1471 spin_lock_init(&tp->lock);
1472 spin_lock_init(&tp->mii_lock);
1473 timer_setup(&tp->timer, tulip_tbl[tp->chip_id].media_timer, 0);
1474
1475 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1476
1477 #ifdef CONFIG_TULIP_MWI
1478 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1479 tulip_mwi_config (pdev, dev);
1480 #endif
1481
1482
1483 tulip_stop_rxtx(tp);
1484
1485 pci_set_master(pdev);
1486
1487 #ifdef CONFIG_GSC
1488 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) {
1489 switch (pdev->subsystem_device) {
1490 default:
1491 break;
1492 case 0x1061:
1493 case 0x1062:
1494 case 0x1063:
1495 case 0x1098:
1496 case 0x1099:
1497 case 0x10EE:
1498 tp->flags |= HAS_SWAPPED_SEEPROM | NEEDS_FAKE_MEDIA_TABLE;
1499 chip_name = "GSC DS21140 Tulip";
1500 }
1501 }
1502 #endif
1503
1504
1505 ioread32(ioaddr + CSR8);
1506
1507
1508
1509
1510
1511 ee_data = tp->eeprom;
1512 memset(ee_data, 0, sizeof(tp->eeprom));
1513 sum = 0;
1514 if (chip_idx == LC82C168) {
1515 for (i = 0; i < 3; i++) {
1516 int value, boguscnt = 100000;
1517 iowrite32(0x600 | i, ioaddr + 0x98);
1518 do {
1519 value = ioread32(ioaddr + CSR9);
1520 } while (value < 0 && --boguscnt > 0);
1521 put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
1522 sum += value & 0xffff;
1523 }
1524 } else if (chip_idx == COMET) {
1525
1526 put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
1527 put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
1528 for (i = 0; i < 6; i ++)
1529 sum += dev->dev_addr[i];
1530 } else {
1531
1532 int sa_offset = 0;
1533 int ee_addr_size = tulip_read_eeprom(dev, 0xff, 8) & 0x40000 ? 8 : 6;
1534 int ee_max_addr = ((1 << ee_addr_size) - 1) * sizeof(u16);
1535
1536 if (ee_max_addr > sizeof(tp->eeprom))
1537 ee_max_addr = sizeof(tp->eeprom);
1538
1539 for (i = 0; i < ee_max_addr ; i += sizeof(u16)) {
1540 u16 data = tulip_read_eeprom(dev, i/2, ee_addr_size);
1541 ee_data[i] = data & 0xff;
1542 ee_data[i + 1] = data >> 8;
1543 }
1544
1545
1546
1547
1548 for (i = 0; i < 8; i ++)
1549 if (ee_data[i] != ee_data[16+i])
1550 sa_offset = 20;
1551 if (chip_idx == CONEXANT) {
1552
1553 if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
1554 sa_offset = 0x19A;
1555 } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
1556 ee_data[2] == 0) {
1557 sa_offset = 2;
1558 }
1559 #ifdef CONFIG_MIPS_COBALT
1560 if ((pdev->bus->number == 0) &&
1561 ((PCI_SLOT(pdev->devfn) == 7) ||
1562 (PCI_SLOT(pdev->devfn) == 12))) {
1563
1564 sa_offset = 0;
1565
1566 memcpy(ee_data + 16, ee_data, 8);
1567 }
1568 #endif
1569 #ifdef CONFIG_GSC
1570
1571 if (ee_data[0] == 0x61 && ee_data[1] == 0x10) {
1572
1573 ee_data[0] = ee_data[2];
1574 ee_data[1] = ee_data[3];
1575 ee_data[2] = 0x61;
1576 ee_data[3] = 0x10;
1577
1578
1579
1580
1581
1582 for (i = 4; i >= 0; i -= 2) {
1583 ee_data[17 + i + 3] = ee_data[17 + i];
1584 ee_data[16 + i + 5] = ee_data[16 + i];
1585 }
1586 }
1587 #endif
1588
1589 for (i = 0; i < 6; i ++) {
1590 dev->dev_addr[i] = ee_data[i + sa_offset];
1591 sum += ee_data[i + sa_offset];
1592 }
1593 }
1594
1595 if ((dev->dev_addr[0] == 0xA0 ||
1596 dev->dev_addr[0] == 0xC0 ||
1597 dev->dev_addr[0] == 0x02) &&
1598 dev->dev_addr[1] == 0x00)
1599 for (i = 0; i < 6; i+=2) {
1600 char tmp = dev->dev_addr[i];
1601 dev->dev_addr[i] = dev->dev_addr[i+1];
1602 dev->dev_addr[i+1] = tmp;
1603 }
1604
1605
1606
1607
1608
1609
1610
1611 if (sum == 0 || sum == 6*0xff) {
1612 #if defined(CONFIG_SPARC)
1613 struct device_node *dp = pci_device_to_OF_node(pdev);
1614 const unsigned char *addr;
1615 int len;
1616 #endif
1617 eeprom_missing = 1;
1618 for (i = 0; i < 5; i++)
1619 dev->dev_addr[i] = last_phys_addr[i];
1620 dev->dev_addr[i] = last_phys_addr[i] + 1;
1621 #if defined(CONFIG_SPARC)
1622 addr = of_get_property(dp, "local-mac-address", &len);
1623 if (addr && len == ETH_ALEN)
1624 memcpy(dev->dev_addr, addr, ETH_ALEN);
1625 #endif
1626 #if defined(__i386__) || defined(__x86_64__)
1627 if (last_irq)
1628 irq = last_irq;
1629 #endif
1630 }
1631
1632 for (i = 0; i < 6; i++)
1633 last_phys_addr[i] = dev->dev_addr[i];
1634 last_irq = irq;
1635
1636
1637 if (board_idx >= 0 && board_idx < MAX_UNITS) {
1638 if (options[board_idx] & MEDIA_MASK)
1639 tp->default_port = options[board_idx] & MEDIA_MASK;
1640 if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
1641 tp->full_duplex = 1;
1642 if (mtu[board_idx] > 0)
1643 dev->mtu = mtu[board_idx];
1644 }
1645 if (dev->mem_start & MEDIA_MASK)
1646 tp->default_port = dev->mem_start & MEDIA_MASK;
1647 if (tp->default_port) {
1648 pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
1649 board_idx, medianame[tp->default_port & MEDIA_MASK]);
1650 tp->medialock = 1;
1651 if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
1652 tp->full_duplex = 1;
1653 }
1654 if (tp->full_duplex)
1655 tp->full_duplex_lock = 1;
1656
1657 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1658 static const u16 media2advert[] = {
1659 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1660 };
1661 tp->mii_advertise = media2advert[tp->default_port - 9];
1662 tp->mii_advertise |= (tp->flags & HAS_8023X);
1663 }
1664
1665 if (tp->flags & HAS_MEDIA_TABLE) {
1666 sprintf(dev->name, DRV_NAME "%d", board_idx);
1667 tulip_parse_eeprom(dev);
1668 strcpy(dev->name, "eth%d");
1669 }
1670
1671 if ((tp->flags & ALWAYS_CHECK_MII) ||
1672 (tp->mtable && tp->mtable->has_mii) ||
1673 ( ! tp->mtable && (tp->flags & HAS_MII))) {
1674 if (tp->mtable && tp->mtable->has_mii) {
1675 for (i = 0; i < tp->mtable->leafcount; i++)
1676 if (tp->mtable->mleaf[i].media == 11) {
1677 tp->cur_index = i;
1678 tp->saved_if_port = dev->if_port;
1679 tulip_select_media(dev, 2);
1680 dev->if_port = tp->saved_if_port;
1681 break;
1682 }
1683 }
1684
1685
1686
1687
1688 tulip_find_mii (dev, board_idx);
1689 }
1690
1691
1692 dev->netdev_ops = &tulip_netdev_ops;
1693 dev->watchdog_timeo = TX_TIMEOUT;
1694 #ifdef CONFIG_TULIP_NAPI
1695 netif_napi_add(dev, &tp->napi, tulip_poll, 16);
1696 #endif
1697 dev->ethtool_ops = &ops;
1698
1699 if (register_netdev(dev))
1700 goto err_out_free_ring;
1701
1702 pci_set_drvdata(pdev, dev);
1703
1704 dev_info(&dev->dev,
1705 #ifdef CONFIG_TULIP_MMIO
1706 "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
1707 #else
1708 "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
1709 #endif
1710 chip_name, pdev->revision,
1711 (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
1712 eeprom_missing ? " EEPROM not present," : "",
1713 dev->dev_addr, irq);
1714
1715 if (tp->chip_id == PNIC2)
1716 tp->link_change = pnic2_lnk_change;
1717 else if (tp->flags & HAS_NWAY)
1718 tp->link_change = t21142_lnk_change;
1719 else if (tp->flags & HAS_PNICNWAY)
1720 tp->link_change = pnic_lnk_change;
1721
1722
1723 switch (chip_idx) {
1724 case DC21140:
1725 case DM910X:
1726 default:
1727 if (tp->mtable)
1728 iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
1729 break;
1730 case DC21142:
1731 if (tp->mii_cnt || tulip_media_cap[dev->if_port] & MediaIsMII) {
1732 iowrite32(csr6_mask_defstate, ioaddr + CSR6);
1733 iowrite32(0x0000, ioaddr + CSR13);
1734 iowrite32(0x0000, ioaddr + CSR14);
1735 iowrite32(csr6_mask_hdcap, ioaddr + CSR6);
1736 } else
1737 t21142_start_nway(dev);
1738 break;
1739 case PNIC2:
1740
1741 iowrite32(0x0000, ioaddr + CSR13);
1742 iowrite32(0x0000, ioaddr + CSR14);
1743 break;
1744 case LC82C168:
1745 if ( ! tp->mii_cnt) {
1746 tp->nway = 1;
1747 tp->nwayset = 0;
1748 iowrite32(csr6_ttm | csr6_ca, ioaddr + CSR6);
1749 iowrite32(0x30, ioaddr + CSR12);
1750 iowrite32(0x0001F078, ioaddr + CSR6);
1751 iowrite32(0x0201F078, ioaddr + CSR6);
1752 }
1753 break;
1754 case MX98713:
1755 case COMPEX9881:
1756 iowrite32(0x00000000, ioaddr + CSR6);
1757 iowrite32(0x000711C0, ioaddr + CSR14);
1758 iowrite32(0x00000001, ioaddr + CSR13);
1759 break;
1760 case MX98715:
1761 case MX98725:
1762 iowrite32(0x01a80000, ioaddr + CSR6);
1763 iowrite32(0xFFFFFFFF, ioaddr + CSR14);
1764 iowrite32(0x00001000, ioaddr + CSR12);
1765 break;
1766 case COMET:
1767
1768 break;
1769 }
1770
1771
1772 tulip_set_power_state (tp, 0, 1);
1773
1774 return 0;
1775
1776 err_out_free_ring:
1777 pci_free_consistent (pdev,
1778 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1779 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1780 tp->rx_ring, tp->rx_ring_dma);
1781
1782 err_out_mtable:
1783 kfree (tp->mtable);
1784 pci_iounmap(pdev, ioaddr);
1785
1786 err_out_free_res:
1787 pci_release_regions (pdev);
1788
1789 err_out_free_netdev:
1790 free_netdev (dev);
1791 return -ENODEV;
1792 }
1793
1794
1795
1796 static void tulip_set_wolopts (struct pci_dev *pdev, u32 wolopts)
1797 {
1798 struct net_device *dev = pci_get_drvdata(pdev);
1799 struct tulip_private *tp = netdev_priv(dev);
1800 void __iomem *ioaddr = tp->base_addr;
1801
1802 if (tp->flags & COMET_PM) {
1803
1804 unsigned int tmp;
1805
1806 tmp = ioread32(ioaddr + CSR18);
1807 tmp &= ~(comet_csr18_pmes_sticky | comet_csr18_apm_mode | comet_csr18_d3a);
1808 tmp |= comet_csr18_pm_mode;
1809 iowrite32(tmp, ioaddr + CSR18);
1810
1811
1812 tmp = ioread32(ioaddr + CSR13);
1813 tmp &= ~(comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_wfre | comet_csr13_lsce | comet_csr13_mpre);
1814 if (wolopts & WAKE_MAGIC)
1815 tmp |= comet_csr13_mpre;
1816 if (wolopts & WAKE_PHY)
1817 tmp |= comet_csr13_linkoffe | comet_csr13_linkone | comet_csr13_lsce;
1818
1819 tmp |= comet_csr13_wfr | comet_csr13_mpr | comet_csr13_lsc;
1820 iowrite32(tmp, ioaddr + CSR13);
1821 }
1822 }
1823
1824 #ifdef CONFIG_PM
1825
1826
1827 static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1828 {
1829 pci_power_t pstate;
1830 struct net_device *dev = pci_get_drvdata(pdev);
1831 struct tulip_private *tp = netdev_priv(dev);
1832
1833 if (!dev)
1834 return -EINVAL;
1835
1836 if (!netif_running(dev))
1837 goto save_state;
1838
1839 tulip_down(dev);
1840
1841 netif_device_detach(dev);
1842
1843 free_irq(tp->pdev->irq, dev);
1844
1845 save_state:
1846 pci_save_state(pdev);
1847 pci_disable_device(pdev);
1848 pstate = pci_choose_state(pdev, state);
1849 if (state.event == PM_EVENT_SUSPEND && pstate != PCI_D0) {
1850 int rc;
1851
1852 tulip_set_wolopts(pdev, tp->wolinfo.wolopts);
1853 rc = pci_enable_wake(pdev, pstate, tp->wolinfo.wolopts);
1854 if (rc)
1855 pr_err("pci_enable_wake failed (%d)\n", rc);
1856 }
1857 pci_set_power_state(pdev, pstate);
1858
1859 return 0;
1860 }
1861
1862
1863 static int tulip_resume(struct pci_dev *pdev)
1864 {
1865 struct net_device *dev = pci_get_drvdata(pdev);
1866 struct tulip_private *tp = netdev_priv(dev);
1867 void __iomem *ioaddr = tp->base_addr;
1868 int retval;
1869 unsigned int tmp;
1870
1871 if (!dev)
1872 return -EINVAL;
1873
1874 pci_set_power_state(pdev, PCI_D0);
1875 pci_restore_state(pdev);
1876
1877 if (!netif_running(dev))
1878 return 0;
1879
1880 if ((retval = pci_enable_device(pdev))) {
1881 pr_err("pci_enable_device failed in resume\n");
1882 return retval;
1883 }
1884
1885 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1886 dev->name, dev);
1887 if (retval) {
1888 pr_err("request_irq failed in resume\n");
1889 return retval;
1890 }
1891
1892 if (tp->flags & COMET_PM) {
1893 pci_enable_wake(pdev, PCI_D3hot, 0);
1894 pci_enable_wake(pdev, PCI_D3cold, 0);
1895
1896
1897 tmp = ioread32(ioaddr + CSR20);
1898 tmp |= comet_csr20_pmes;
1899 iowrite32(tmp, ioaddr + CSR20);
1900
1901
1902 tulip_set_wolopts(pdev, 0);
1903 }
1904 netif_device_attach(dev);
1905
1906 if (netif_running(dev))
1907 tulip_up(dev);
1908
1909 return 0;
1910 }
1911
1912 #endif
1913
1914
1915 static void tulip_remove_one(struct pci_dev *pdev)
1916 {
1917 struct net_device *dev = pci_get_drvdata (pdev);
1918 struct tulip_private *tp;
1919
1920 if (!dev)
1921 return;
1922
1923 tp = netdev_priv(dev);
1924 unregister_netdev(dev);
1925 pci_free_consistent (pdev,
1926 sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
1927 sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
1928 tp->rx_ring, tp->rx_ring_dma);
1929 kfree (tp->mtable);
1930 pci_iounmap(pdev, tp->base_addr);
1931 free_netdev (dev);
1932 pci_release_regions (pdev);
1933 pci_disable_device(pdev);
1934
1935
1936 }
1937
1938 #ifdef CONFIG_NET_POLL_CONTROLLER
1939
1940
1941
1942
1943
1944
1945 static void poll_tulip (struct net_device *dev)
1946 {
1947 struct tulip_private *tp = netdev_priv(dev);
1948 const int irq = tp->pdev->irq;
1949
1950
1951
1952 disable_irq(irq);
1953 tulip_interrupt (irq, dev);
1954 enable_irq(irq);
1955 }
1956 #endif
1957
1958 static struct pci_driver tulip_driver = {
1959 .name = DRV_NAME,
1960 .id_table = tulip_pci_tbl,
1961 .probe = tulip_init_one,
1962 .remove = tulip_remove_one,
1963 #ifdef CONFIG_PM
1964 .suspend = tulip_suspend,
1965 .resume = tulip_resume,
1966 #endif
1967 };
1968
1969
1970 static int __init tulip_init (void)
1971 {
1972 #ifdef MODULE
1973 pr_info("%s", version);
1974 #endif
1975
1976 if (!csr0) {
1977 pr_warn("tulip: unknown CPU architecture, using default csr0\n");
1978
1979 csr0 = 0x00A00000 | 0x4800;
1980 }
1981
1982
1983 tulip_rx_copybreak = rx_copybreak;
1984 tulip_max_interrupt_work = max_interrupt_work;
1985
1986
1987 return pci_register_driver(&tulip_driver);
1988 }
1989
1990
1991 static void __exit tulip_cleanup (void)
1992 {
1993 pci_unregister_driver (&tulip_driver);
1994 }
1995
1996
1997 module_init(tulip_init);
1998 module_exit(tulip_cleanup);