This source file includes following definitions.
- w840_probe1
- eeprom_read
- mdio_sync
- mdio_read
- mdio_write
- netdev_open
- update_link
- update_csr6
- netdev_timer
- init_rxtx_rings
- free_rxtx_rings
- init_registers
- tx_timeout
- alloc_ringdesc
- free_ringdesc
- start_tx
- netdev_tx_done
- intr_handler
- netdev_rx
- netdev_error
- get_stats
- __set_rx_mode
- set_rx_mode
- netdev_get_drvinfo
- netdev_get_link_ksettings
- netdev_set_link_ksettings
- netdev_nway_reset
- netdev_get_link
- netdev_get_msglevel
- netdev_set_msglevel
- netdev_ioctl
- netdev_close
- w840_remove1
- w840_suspend
- w840_resume
- w840_init
- w840_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
48
49 #define DRV_NAME "winbond-840"
50 #define DRV_VERSION "1.01-e"
51 #define DRV_RELDATE "Sep-11-2006"
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69 static int debug = 1;
70 static int max_interrupt_work = 20;
71
72
73 static int multicast_filter_limit = 32;
74
75
76
77 static int rx_copybreak;
78
79
80
81
82
83
84 #define MAX_UNITS 8
85 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
86 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
87
88
89
90
91
92
93
94
95 #define TX_QUEUE_LEN 10
96 #define TX_QUEUE_LEN_RESTART 5
97
98 #define TX_BUFLIMIT (1024-128)
99
100
101
102
103
104 #define TX_FIFO_SIZE (2048)
105 #define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
106
107
108
109
110 #define TX_TIMEOUT (2*HZ)
111
112
113 #include <linux/module.h>
114 #include <linux/kernel.h>
115 #include <linux/string.h>
116 #include <linux/timer.h>
117 #include <linux/errno.h>
118 #include <linux/ioport.h>
119 #include <linux/interrupt.h>
120 #include <linux/pci.h>
121 #include <linux/dma-mapping.h>
122 #include <linux/netdevice.h>
123 #include <linux/etherdevice.h>
124 #include <linux/skbuff.h>
125 #include <linux/init.h>
126 #include <linux/delay.h>
127 #include <linux/ethtool.h>
128 #include <linux/mii.h>
129 #include <linux/rtnetlink.h>
130 #include <linux/crc32.h>
131 #include <linux/bitops.h>
132 #include <linux/uaccess.h>
133 #include <asm/processor.h>
134 #include <asm/io.h>
135 #include <asm/irq.h>
136
137 #include "tulip.h"
138
139 #undef PKT_BUF_SZ
140 #define PKT_BUF_SZ 1536
141
142
143 static const char version[] __initconst =
144 "v" DRV_VERSION " (2.4 port) "
145 DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
146 " http://www.scyld.com/network/drivers.html\n";
147
148 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
149 MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
150 MODULE_LICENSE("GPL");
151 MODULE_VERSION(DRV_VERSION);
152
153 module_param(max_interrupt_work, int, 0);
154 module_param(debug, int, 0);
155 module_param(rx_copybreak, int, 0);
156 module_param(multicast_filter_limit, int, 0);
157 module_param_array(options, int, NULL, 0);
158 module_param_array(full_duplex, int, NULL, 0);
159 MODULE_PARM_DESC(max_interrupt_work, "winbond-840 maximum events handled per interrupt");
160 MODULE_PARM_DESC(debug, "winbond-840 debug level (0-6)");
161 MODULE_PARM_DESC(rx_copybreak, "winbond-840 copy breakpoint for copy-only-tiny-frames");
162 MODULE_PARM_DESC(multicast_filter_limit, "winbond-840 maximum number of filtered multicast addresses");
163 MODULE_PARM_DESC(options, "winbond-840: Bits 0-3: media type, bit 17: full duplex");
164 MODULE_PARM_DESC(full_duplex, "winbond-840 full duplex setting(s) (1)");
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218 enum chip_capability_flags {
219 CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
220 };
221
222 static const struct pci_device_id w840_pci_tbl[] = {
223 { 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
224 { 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
225 { 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
226 { }
227 };
228 MODULE_DEVICE_TABLE(pci, w840_pci_tbl);
229
230 enum {
231 netdev_res_size = 128,
232 };
233
234 struct pci_id_info {
235 const char *name;
236 int drv_flags;
237 };
238
239 static const struct pci_id_info pci_id_tbl[] = {
240 {
241 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
242 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
243 { "Compex RL100-ATX", CanHaveMII | HasBrokenTx},
244 { }
245 };
246
247
248
249
250
251
252
253
254
255
256
257 enum w840_offsets {
258 PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
259 RxRingPtr=0x0C, TxRingPtr=0x10,
260 IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
261 RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
262 CurRxDescAddr=0x30, CurRxBufAddr=0x34,
263 MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
264 CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
265 };
266
267
268 enum rx_mode_bits {
269 AcceptErr=0x80,
270 RxAcceptBroadcast=0x20, AcceptMulticast=0x10,
271 RxAcceptAllPhys=0x08, AcceptMyPhys=0x02,
272 };
273
274 enum mii_reg_bits {
275 MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
276 MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
277 };
278
279
280 struct w840_rx_desc {
281 s32 status;
282 s32 length;
283 u32 buffer1;
284 u32 buffer2;
285 };
286
287 struct w840_tx_desc {
288 s32 status;
289 s32 length;
290 u32 buffer1, buffer2;
291 };
292
293 #define MII_CNT 1
294 struct netdev_private {
295 struct w840_rx_desc *rx_ring;
296 dma_addr_t rx_addr[RX_RING_SIZE];
297 struct w840_tx_desc *tx_ring;
298 dma_addr_t tx_addr[TX_RING_SIZE];
299 dma_addr_t ring_dma_addr;
300
301 struct sk_buff* rx_skbuff[RX_RING_SIZE];
302
303 struct sk_buff* tx_skbuff[TX_RING_SIZE];
304 struct net_device_stats stats;
305 struct timer_list timer;
306
307 spinlock_t lock;
308 int chip_id, drv_flags;
309 struct pci_dev *pci_dev;
310 int csr6;
311 struct w840_rx_desc *rx_head_desc;
312 unsigned int cur_rx, dirty_rx;
313 unsigned int rx_buf_sz;
314 unsigned int cur_tx, dirty_tx;
315 unsigned int tx_q_bytes;
316 unsigned int tx_full;
317
318 int mii_cnt;
319 unsigned char phys[MII_CNT];
320 u32 mii;
321 struct mii_if_info mii_if;
322 void __iomem *base_addr;
323 };
324
325 static int eeprom_read(void __iomem *ioaddr, int location);
326 static int mdio_read(struct net_device *dev, int phy_id, int location);
327 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
328 static int netdev_open(struct net_device *dev);
329 static int update_link(struct net_device *dev);
330 static void netdev_timer(struct timer_list *t);
331 static void init_rxtx_rings(struct net_device *dev);
332 static void free_rxtx_rings(struct netdev_private *np);
333 static void init_registers(struct net_device *dev);
334 static void tx_timeout(struct net_device *dev);
335 static int alloc_ringdesc(struct net_device *dev);
336 static void free_ringdesc(struct netdev_private *np);
337 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
338 static irqreturn_t intr_handler(int irq, void *dev_instance);
339 static void netdev_error(struct net_device *dev, int intr_status);
340 static int netdev_rx(struct net_device *dev);
341 static u32 __set_rx_mode(struct net_device *dev);
342 static void set_rx_mode(struct net_device *dev);
343 static struct net_device_stats *get_stats(struct net_device *dev);
344 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
345 static const struct ethtool_ops netdev_ethtool_ops;
346 static int netdev_close(struct net_device *dev);
347
348 static const struct net_device_ops netdev_ops = {
349 .ndo_open = netdev_open,
350 .ndo_stop = netdev_close,
351 .ndo_start_xmit = start_tx,
352 .ndo_get_stats = get_stats,
353 .ndo_set_rx_mode = set_rx_mode,
354 .ndo_do_ioctl = netdev_ioctl,
355 .ndo_tx_timeout = tx_timeout,
356 .ndo_set_mac_address = eth_mac_addr,
357 .ndo_validate_addr = eth_validate_addr,
358 };
359
360 static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
361 {
362 struct net_device *dev;
363 struct netdev_private *np;
364 static int find_cnt;
365 int chip_idx = ent->driver_data;
366 int irq;
367 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
368 void __iomem *ioaddr;
369
370 i = pci_enable_device(pdev);
371 if (i) return i;
372
373 pci_set_master(pdev);
374
375 irq = pdev->irq;
376
377 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
378 pr_warn("Device %s disabled due to DMA limitations\n",
379 pci_name(pdev));
380 return -EIO;
381 }
382 dev = alloc_etherdev(sizeof(*np));
383 if (!dev)
384 return -ENOMEM;
385 SET_NETDEV_DEV(dev, &pdev->dev);
386
387 if (pci_request_regions(pdev, DRV_NAME))
388 goto err_out_netdev;
389
390 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
391 if (!ioaddr)
392 goto err_out_free_res;
393
394 for (i = 0; i < 3; i++)
395 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
396
397
398
399 iowrite32(0x00000001, ioaddr + PCIBusCfg);
400
401 np = netdev_priv(dev);
402 np->pci_dev = pdev;
403 np->chip_id = chip_idx;
404 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
405 spin_lock_init(&np->lock);
406 np->mii_if.dev = dev;
407 np->mii_if.mdio_read = mdio_read;
408 np->mii_if.mdio_write = mdio_write;
409 np->base_addr = ioaddr;
410
411 pci_set_drvdata(pdev, dev);
412
413 if (dev->mem_start)
414 option = dev->mem_start;
415
416
417 if (option > 0) {
418 if (option & 0x200)
419 np->mii_if.full_duplex = 1;
420 if (option & 15)
421 dev_info(&dev->dev,
422 "ignoring user supplied media type %d",
423 option & 15);
424 }
425 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
426 np->mii_if.full_duplex = 1;
427
428 if (np->mii_if.full_duplex)
429 np->mii_if.force_media = 1;
430
431
432 dev->netdev_ops = &netdev_ops;
433 dev->ethtool_ops = &netdev_ethtool_ops;
434 dev->watchdog_timeo = TX_TIMEOUT;
435
436 i = register_netdev(dev);
437 if (i)
438 goto err_out_cleardev;
439
440 dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
441 pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
442
443 if (np->drv_flags & CanHaveMII) {
444 int phy, phy_idx = 0;
445 for (phy = 1; phy < 32 && phy_idx < MII_CNT; phy++) {
446 int mii_status = mdio_read(dev, phy, MII_BMSR);
447 if (mii_status != 0xffff && mii_status != 0x0000) {
448 np->phys[phy_idx++] = phy;
449 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
450 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
451 mdio_read(dev, phy, MII_PHYSID2);
452 dev_info(&dev->dev,
453 "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
454 np->mii, phy, mii_status,
455 np->mii_if.advertising);
456 }
457 }
458 np->mii_cnt = phy_idx;
459 np->mii_if.phy_id = np->phys[0];
460 if (phy_idx == 0) {
461 dev_warn(&dev->dev,
462 "MII PHY not found -- this device may not operate correctly\n");
463 }
464 }
465
466 find_cnt++;
467 return 0;
468
469 err_out_cleardev:
470 pci_iounmap(pdev, ioaddr);
471 err_out_free_res:
472 pci_release_regions(pdev);
473 err_out_netdev:
474 free_netdev (dev);
475 return -ENODEV;
476 }
477
478
479
480
481
482
483
484
485
486
487
488
489
490 #define eeprom_delay(ee_addr) ioread32(ee_addr)
491
492 enum EEPROM_Ctrl_Bits {
493 EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
494 EE_ChipSelect=0x801, EE_DataIn=0x08,
495 };
496
497
498 enum EEPROM_Cmds {
499 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
500 };
501
502 static int eeprom_read(void __iomem *addr, int location)
503 {
504 int i;
505 int retval = 0;
506 void __iomem *ee_addr = addr + EECtrl;
507 int read_cmd = location | EE_ReadCmd;
508 iowrite32(EE_ChipSelect, ee_addr);
509
510
511 for (i = 10; i >= 0; i--) {
512 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
513 iowrite32(dataval, ee_addr);
514 eeprom_delay(ee_addr);
515 iowrite32(dataval | EE_ShiftClk, ee_addr);
516 eeprom_delay(ee_addr);
517 }
518 iowrite32(EE_ChipSelect, ee_addr);
519 eeprom_delay(ee_addr);
520
521 for (i = 16; i > 0; i--) {
522 iowrite32(EE_ChipSelect | EE_ShiftClk, ee_addr);
523 eeprom_delay(ee_addr);
524 retval = (retval << 1) | ((ioread32(ee_addr) & EE_DataIn) ? 1 : 0);
525 iowrite32(EE_ChipSelect, ee_addr);
526 eeprom_delay(ee_addr);
527 }
528
529
530 iowrite32(0, ee_addr);
531 return retval;
532 }
533
534
535
536
537
538
539
540
541 #define mdio_delay(mdio_addr) ioread32(mdio_addr)
542
543
544
545
546 static char mii_preamble_required = 1;
547
548 #define MDIO_WRITE0 (MDIO_EnbOutput)
549 #define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
550
551
552
553 static void mdio_sync(void __iomem *mdio_addr)
554 {
555 int bits = 32;
556
557
558 while (--bits >= 0) {
559 iowrite32(MDIO_WRITE1, mdio_addr);
560 mdio_delay(mdio_addr);
561 iowrite32(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
562 mdio_delay(mdio_addr);
563 }
564 }
565
566 static int mdio_read(struct net_device *dev, int phy_id, int location)
567 {
568 struct netdev_private *np = netdev_priv(dev);
569 void __iomem *mdio_addr = np->base_addr + MIICtrl;
570 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
571 int i, retval = 0;
572
573 if (mii_preamble_required)
574 mdio_sync(mdio_addr);
575
576
577 for (i = 15; i >= 0; i--) {
578 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
579
580 iowrite32(dataval, mdio_addr);
581 mdio_delay(mdio_addr);
582 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
583 mdio_delay(mdio_addr);
584 }
585
586 for (i = 20; i > 0; i--) {
587 iowrite32(MDIO_EnbIn, mdio_addr);
588 mdio_delay(mdio_addr);
589 retval = (retval << 1) | ((ioread32(mdio_addr) & MDIO_DataIn) ? 1 : 0);
590 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
591 mdio_delay(mdio_addr);
592 }
593 return (retval>>1) & 0xffff;
594 }
595
596 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
597 {
598 struct netdev_private *np = netdev_priv(dev);
599 void __iomem *mdio_addr = np->base_addr + MIICtrl;
600 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
601 int i;
602
603 if (location == 4 && phy_id == np->phys[0])
604 np->mii_if.advertising = value;
605
606 if (mii_preamble_required)
607 mdio_sync(mdio_addr);
608
609
610 for (i = 31; i >= 0; i--) {
611 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
612
613 iowrite32(dataval, mdio_addr);
614 mdio_delay(mdio_addr);
615 iowrite32(dataval | MDIO_ShiftClk, mdio_addr);
616 mdio_delay(mdio_addr);
617 }
618
619 for (i = 2; i > 0; i--) {
620 iowrite32(MDIO_EnbIn, mdio_addr);
621 mdio_delay(mdio_addr);
622 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
623 mdio_delay(mdio_addr);
624 }
625 }
626
627
628 static int netdev_open(struct net_device *dev)
629 {
630 struct netdev_private *np = netdev_priv(dev);
631 void __iomem *ioaddr = np->base_addr;
632 const int irq = np->pci_dev->irq;
633 int i;
634
635 iowrite32(0x00000001, ioaddr + PCIBusCfg);
636
637 netif_device_detach(dev);
638 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
639 if (i)
640 goto out_err;
641
642 if (debug > 1)
643 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
644
645 if((i=alloc_ringdesc(dev)))
646 goto out_err;
647
648 spin_lock_irq(&np->lock);
649 netif_device_attach(dev);
650 init_registers(dev);
651 spin_unlock_irq(&np->lock);
652
653 netif_start_queue(dev);
654 if (debug > 2)
655 netdev_dbg(dev, "Done netdev_open()\n");
656
657
658 timer_setup(&np->timer, netdev_timer, 0);
659 np->timer.expires = jiffies + 1*HZ;
660 add_timer(&np->timer);
661 return 0;
662 out_err:
663 netif_device_attach(dev);
664 return i;
665 }
666
667 #define MII_DAVICOM_DM9101 0x0181b800
668
669 static int update_link(struct net_device *dev)
670 {
671 struct netdev_private *np = netdev_priv(dev);
672 int duplex, fasteth, result, mii_reg;
673
674
675 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
676
677 if (mii_reg == 0xffff)
678 return np->csr6;
679
680 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR);
681 if (!(mii_reg & 0x4)) {
682 if (netif_carrier_ok(dev)) {
683 if (debug)
684 dev_info(&dev->dev,
685 "MII #%d reports no link. Disabling watchdog\n",
686 np->phys[0]);
687 netif_carrier_off(dev);
688 }
689 return np->csr6;
690 }
691 if (!netif_carrier_ok(dev)) {
692 if (debug)
693 dev_info(&dev->dev,
694 "MII #%d link is back. Enabling watchdog\n",
695 np->phys[0]);
696 netif_carrier_on(dev);
697 }
698
699 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
700
701
702
703
704
705
706
707
708 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR);
709 duplex = mii_reg & BMCR_FULLDPLX;
710 fasteth = mii_reg & BMCR_SPEED100;
711 } else {
712 int negotiated;
713 mii_reg = mdio_read(dev, np->phys[0], MII_LPA);
714 negotiated = mii_reg & np->mii_if.advertising;
715
716 duplex = (negotiated & LPA_100FULL) || ((negotiated & 0x02C0) == LPA_10FULL);
717 fasteth = negotiated & 0x380;
718 }
719 duplex |= np->mii_if.force_media;
720
721 result = np->csr6 & ~0x20000200;
722 if (duplex)
723 result |= 0x200;
724 if (fasteth)
725 result |= 0x20000000;
726 if (result != np->csr6 && debug)
727 dev_info(&dev->dev,
728 "Setting %dMBit-%s-duplex based on MII#%d\n",
729 fasteth ? 100 : 10, duplex ? "full" : "half",
730 np->phys[0]);
731 return result;
732 }
733
734 #define RXTX_TIMEOUT 2000
735 static inline void update_csr6(struct net_device *dev, int new)
736 {
737 struct netdev_private *np = netdev_priv(dev);
738 void __iomem *ioaddr = np->base_addr;
739 int limit = RXTX_TIMEOUT;
740
741 if (!netif_device_present(dev))
742 new = 0;
743 if (new==np->csr6)
744 return;
745
746 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig);
747
748 for (;;) {
749 int csr5 = ioread32(ioaddr + IntrStatus);
750 int t;
751
752 t = (csr5 >> 17) & 0x07;
753 if (t==0||t==1) {
754
755 t = (csr5 >> 20) & 0x07;
756 if (t==0||t==1)
757 break;
758 }
759
760 limit--;
761 if(!limit) {
762 dev_info(&dev->dev,
763 "couldn't stop rxtx, IntrStatus %xh\n", csr5);
764 break;
765 }
766 udelay(1);
767 }
768 np->csr6 = new;
769
770 iowrite32(np->csr6, ioaddr + NetworkConfig);
771 if (new & 0x200)
772 np->mii_if.full_duplex = 1;
773 }
774
775 static void netdev_timer(struct timer_list *t)
776 {
777 struct netdev_private *np = from_timer(np, t, timer);
778 struct net_device *dev = pci_get_drvdata(np->pci_dev);
779 void __iomem *ioaddr = np->base_addr;
780
781 if (debug > 2)
782 netdev_dbg(dev, "Media selection timer tick, status %08x config %08x\n",
783 ioread32(ioaddr + IntrStatus),
784 ioread32(ioaddr + NetworkConfig));
785 spin_lock_irq(&np->lock);
786 update_csr6(dev, update_link(dev));
787 spin_unlock_irq(&np->lock);
788 np->timer.expires = jiffies + 10*HZ;
789 add_timer(&np->timer);
790 }
791
792 static void init_rxtx_rings(struct net_device *dev)
793 {
794 struct netdev_private *np = netdev_priv(dev);
795 int i;
796
797 np->rx_head_desc = &np->rx_ring[0];
798 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE];
799
800
801 for (i = 0; i < RX_RING_SIZE; i++) {
802 np->rx_ring[i].length = np->rx_buf_sz;
803 np->rx_ring[i].status = 0;
804 np->rx_skbuff[i] = NULL;
805 }
806
807 np->rx_ring[i-1].length |= DescEndRing;
808
809
810 for (i = 0; i < RX_RING_SIZE; i++) {
811 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz);
812 np->rx_skbuff[i] = skb;
813 if (skb == NULL)
814 break;
815 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
816 np->rx_buf_sz,PCI_DMA_FROMDEVICE);
817
818 np->rx_ring[i].buffer1 = np->rx_addr[i];
819 np->rx_ring[i].status = DescOwned;
820 }
821
822 np->cur_rx = 0;
823 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
824
825
826 for (i = 0; i < TX_RING_SIZE; i++) {
827 np->tx_skbuff[i] = NULL;
828 np->tx_ring[i].status = 0;
829 }
830 np->tx_full = 0;
831 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0;
832
833 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr);
834 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE,
835 np->base_addr + TxRingPtr);
836
837 }
838
839 static void free_rxtx_rings(struct netdev_private* np)
840 {
841 int i;
842
843 for (i = 0; i < RX_RING_SIZE; i++) {
844 np->rx_ring[i].status = 0;
845 if (np->rx_skbuff[i]) {
846 pci_unmap_single(np->pci_dev,
847 np->rx_addr[i],
848 np->rx_skbuff[i]->len,
849 PCI_DMA_FROMDEVICE);
850 dev_kfree_skb(np->rx_skbuff[i]);
851 }
852 np->rx_skbuff[i] = NULL;
853 }
854 for (i = 0; i < TX_RING_SIZE; i++) {
855 if (np->tx_skbuff[i]) {
856 pci_unmap_single(np->pci_dev,
857 np->tx_addr[i],
858 np->tx_skbuff[i]->len,
859 PCI_DMA_TODEVICE);
860 dev_kfree_skb(np->tx_skbuff[i]);
861 }
862 np->tx_skbuff[i] = NULL;
863 }
864 }
865
866 static void init_registers(struct net_device *dev)
867 {
868 struct netdev_private *np = netdev_priv(dev);
869 void __iomem *ioaddr = np->base_addr;
870 int i;
871
872 for (i = 0; i < 6; i++)
873 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
874
875
876 #ifdef __BIG_ENDIAN
877 i = (1<<20);
878 #else
879 i = 0;
880 #endif
881 i |= (0x04<<2);
882 i |= 0x02;
883
884
885
886
887
888
889
890
891
892
893 #if defined (__i386__) && !defined(MODULE)
894
895 if (boot_cpu_data.x86 <= 4) {
896 i |= 0x4800;
897 dev_info(&dev->dev,
898 "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
899 } else {
900 i |= 0xE000;
901 }
902 #elif defined(__powerpc__) || defined(__i386__) || defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
903 i |= 0xE000;
904 #elif defined(CONFIG_SPARC) || defined (CONFIG_PARISC) || defined(CONFIG_ARM)
905 i |= 0x4800;
906 #else
907 dev_warn(&dev->dev, "unknown CPU architecture, using default csr0 setting\n");
908 i |= 0x4800;
909 #endif
910 iowrite32(i, ioaddr + PCIBusCfg);
911
912 np->csr6 = 0;
913
914
915 update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
916
917
918 iowrite32(0x1A0F5, ioaddr + IntrStatus);
919 iowrite32(0x1A0F5, ioaddr + IntrEnable);
920
921 iowrite32(0, ioaddr + RxStartDemand);
922 }
923
924 static void tx_timeout(struct net_device *dev)
925 {
926 struct netdev_private *np = netdev_priv(dev);
927 void __iomem *ioaddr = np->base_addr;
928 const int irq = np->pci_dev->irq;
929
930 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
931 ioread32(ioaddr + IntrStatus));
932
933 {
934 int i;
935 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
936 for (i = 0; i < RX_RING_SIZE; i++)
937 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
938 printk(KERN_CONT "\n");
939 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
940 for (i = 0; i < TX_RING_SIZE; i++)
941 printk(KERN_CONT " %08x", np->tx_ring[i].status);
942 printk(KERN_CONT "\n");
943 }
944 printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
945 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
946 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
947
948 disable_irq(irq);
949 spin_lock_irq(&np->lock);
950
951
952
953
954
955
956 iowrite32(1, np->base_addr+PCIBusCfg);
957 udelay(1);
958
959 free_rxtx_rings(np);
960 init_rxtx_rings(dev);
961 init_registers(dev);
962 spin_unlock_irq(&np->lock);
963 enable_irq(irq);
964
965 netif_wake_queue(dev);
966 netif_trans_update(dev);
967 np->stats.tx_errors++;
968 }
969
970
971 static int alloc_ringdesc(struct net_device *dev)
972 {
973 struct netdev_private *np = netdev_priv(dev);
974
975 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
976
977 np->rx_ring = pci_alloc_consistent(np->pci_dev,
978 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
979 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
980 &np->ring_dma_addr);
981 if(!np->rx_ring)
982 return -ENOMEM;
983 init_rxtx_rings(dev);
984 return 0;
985 }
986
987 static void free_ringdesc(struct netdev_private *np)
988 {
989 pci_free_consistent(np->pci_dev,
990 sizeof(struct w840_rx_desc)*RX_RING_SIZE +
991 sizeof(struct w840_tx_desc)*TX_RING_SIZE,
992 np->rx_ring, np->ring_dma_addr);
993
994 }
995
996 static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
997 {
998 struct netdev_private *np = netdev_priv(dev);
999 unsigned entry;
1000
1001
1002
1003
1004
1005 entry = np->cur_tx % TX_RING_SIZE;
1006
1007 np->tx_addr[entry] = pci_map_single(np->pci_dev,
1008 skb->data,skb->len, PCI_DMA_TODEVICE);
1009 np->tx_skbuff[entry] = skb;
1010
1011 np->tx_ring[entry].buffer1 = np->tx_addr[entry];
1012 if (skb->len < TX_BUFLIMIT) {
1013 np->tx_ring[entry].length = DescWholePkt | skb->len;
1014 } else {
1015 int len = skb->len - TX_BUFLIMIT;
1016
1017 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT;
1018 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT;
1019 }
1020 if(entry == TX_RING_SIZE-1)
1021 np->tx_ring[entry].length |= DescEndRing;
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033 spin_lock_irq(&np->lock);
1034 np->cur_tx++;
1035
1036 wmb();
1037 np->tx_ring[entry].status = DescOwned;
1038 wmb();
1039 iowrite32(0, np->base_addr + TxStartDemand);
1040 np->tx_q_bytes += skb->len;
1041
1042
1043 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN ||
1044 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) {
1045 netif_stop_queue(dev);
1046 wmb();
1047 np->tx_full = 1;
1048 }
1049 spin_unlock_irq(&np->lock);
1050
1051 if (debug > 4) {
1052 netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
1053 np->cur_tx, entry);
1054 }
1055 return NETDEV_TX_OK;
1056 }
1057
1058 static void netdev_tx_done(struct net_device *dev)
1059 {
1060 struct netdev_private *np = netdev_priv(dev);
1061 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
1062 int entry = np->dirty_tx % TX_RING_SIZE;
1063 int tx_status = np->tx_ring[entry].status;
1064
1065 if (tx_status < 0)
1066 break;
1067 if (tx_status & 0x8000) {
1068 #ifndef final_version
1069 if (debug > 1)
1070 netdev_dbg(dev, "Transmit error, Tx status %08x\n",
1071 tx_status);
1072 #endif
1073 np->stats.tx_errors++;
1074 if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
1075 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
1076 if (tx_status & 0x0200) np->stats.tx_window_errors++;
1077 if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
1078 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0)
1079 np->stats.tx_heartbeat_errors++;
1080 } else {
1081 #ifndef final_version
1082 if (debug > 3)
1083 netdev_dbg(dev, "Transmit slot %d ok, Tx status %08x\n",
1084 entry, tx_status);
1085 #endif
1086 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
1087 np->stats.collisions += (tx_status >> 3) & 15;
1088 np->stats.tx_packets++;
1089 }
1090
1091 pci_unmap_single(np->pci_dev,np->tx_addr[entry],
1092 np->tx_skbuff[entry]->len,
1093 PCI_DMA_TODEVICE);
1094 np->tx_q_bytes -= np->tx_skbuff[entry]->len;
1095 dev_kfree_skb_irq(np->tx_skbuff[entry]);
1096 np->tx_skbuff[entry] = NULL;
1097 }
1098 if (np->tx_full &&
1099 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART &&
1100 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) {
1101
1102 np->tx_full = 0;
1103 wmb();
1104 netif_wake_queue(dev);
1105 }
1106 }
1107
1108
1109
1110 static irqreturn_t intr_handler(int irq, void *dev_instance)
1111 {
1112 struct net_device *dev = (struct net_device *)dev_instance;
1113 struct netdev_private *np = netdev_priv(dev);
1114 void __iomem *ioaddr = np->base_addr;
1115 int work_limit = max_interrupt_work;
1116 int handled = 0;
1117
1118 if (!netif_device_present(dev))
1119 return IRQ_NONE;
1120 do {
1121 u32 intr_status = ioread32(ioaddr + IntrStatus);
1122
1123
1124 iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
1125
1126 if (debug > 4)
1127 netdev_dbg(dev, "Interrupt, status %04x\n", intr_status);
1128
1129 if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
1130 break;
1131
1132 handled = 1;
1133
1134 if (intr_status & (RxIntr | RxNoBuf))
1135 netdev_rx(dev);
1136 if (intr_status & RxNoBuf)
1137 iowrite32(0, ioaddr + RxStartDemand);
1138
1139 if (intr_status & (TxNoBuf | TxIntr) &&
1140 np->cur_tx != np->dirty_tx) {
1141 spin_lock(&np->lock);
1142 netdev_tx_done(dev);
1143 spin_unlock(&np->lock);
1144 }
1145
1146
1147 if (intr_status & (AbnormalIntr | TxFIFOUnderflow | SystemError |
1148 TimerInt | TxDied))
1149 netdev_error(dev, intr_status);
1150
1151 if (--work_limit < 0) {
1152 dev_warn(&dev->dev,
1153 "Too much work at interrupt, status=0x%04x\n",
1154 intr_status);
1155
1156
1157 spin_lock(&np->lock);
1158 if (netif_device_present(dev)) {
1159 iowrite32(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
1160 iowrite32(10, ioaddr + GPTimer);
1161 }
1162 spin_unlock(&np->lock);
1163 break;
1164 }
1165 } while (1);
1166
1167 if (debug > 3)
1168 netdev_dbg(dev, "exiting interrupt, status=%#4.4x\n",
1169 ioread32(ioaddr + IntrStatus));
1170 return IRQ_RETVAL(handled);
1171 }
1172
1173
1174
1175 static int netdev_rx(struct net_device *dev)
1176 {
1177 struct netdev_private *np = netdev_priv(dev);
1178 int entry = np->cur_rx % RX_RING_SIZE;
1179 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
1180
1181 if (debug > 4) {
1182 netdev_dbg(dev, " In netdev_rx(), entry %d status %04x\n",
1183 entry, np->rx_ring[entry].status);
1184 }
1185
1186
1187 while (--work_limit >= 0) {
1188 struct w840_rx_desc *desc = np->rx_head_desc;
1189 s32 status = desc->status;
1190
1191 if (debug > 4)
1192 netdev_dbg(dev, " netdev_rx() status was %08x\n",
1193 status);
1194 if (status < 0)
1195 break;
1196 if ((status & 0x38008300) != 0x0300) {
1197 if ((status & 0x38000300) != 0x0300) {
1198
1199 if ((status & 0xffff) != 0x7fff) {
1200 dev_warn(&dev->dev,
1201 "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
1202 np->cur_rx, status);
1203 np->stats.rx_length_errors++;
1204 }
1205 } else if (status & 0x8000) {
1206
1207 if (debug > 2)
1208 netdev_dbg(dev, "Receive error, Rx status %08x\n",
1209 status);
1210 np->stats.rx_errors++;
1211 if (status & 0x0890) np->stats.rx_length_errors++;
1212 if (status & 0x004C) np->stats.rx_frame_errors++;
1213 if (status & 0x0002) np->stats.rx_crc_errors++;
1214 }
1215 } else {
1216 struct sk_buff *skb;
1217
1218 int pkt_len = ((status >> 16) & 0x7ff) - 4;
1219
1220 #ifndef final_version
1221 if (debug > 4)
1222 netdev_dbg(dev, " netdev_rx() normal Rx pkt length %d status %x\n",
1223 pkt_len, status);
1224 #endif
1225
1226
1227 if (pkt_len < rx_copybreak &&
1228 (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1229 skb_reserve(skb, 2);
1230 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1231 np->rx_skbuff[entry]->len,
1232 PCI_DMA_FROMDEVICE);
1233 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
1234 skb_put(skb, pkt_len);
1235 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1236 np->rx_skbuff[entry]->len,
1237 PCI_DMA_FROMDEVICE);
1238 } else {
1239 pci_unmap_single(np->pci_dev,np->rx_addr[entry],
1240 np->rx_skbuff[entry]->len,
1241 PCI_DMA_FROMDEVICE);
1242 skb_put(skb = np->rx_skbuff[entry], pkt_len);
1243 np->rx_skbuff[entry] = NULL;
1244 }
1245 #ifndef final_version
1246
1247 if (debug > 5)
1248 netdev_dbg(dev, " Rx data %pM %pM %02x%02x %pI4\n",
1249 &skb->data[0], &skb->data[6],
1250 skb->data[12], skb->data[13],
1251 &skb->data[14]);
1252 #endif
1253 skb->protocol = eth_type_trans(skb, dev);
1254 netif_rx(skb);
1255 np->stats.rx_packets++;
1256 np->stats.rx_bytes += pkt_len;
1257 }
1258 entry = (++np->cur_rx) % RX_RING_SIZE;
1259 np->rx_head_desc = &np->rx_ring[entry];
1260 }
1261
1262
1263 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1264 struct sk_buff *skb;
1265 entry = np->dirty_rx % RX_RING_SIZE;
1266 if (np->rx_skbuff[entry] == NULL) {
1267 skb = netdev_alloc_skb(dev, np->rx_buf_sz);
1268 np->rx_skbuff[entry] = skb;
1269 if (skb == NULL)
1270 break;
1271 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1272 skb->data,
1273 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1274 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1275 }
1276 wmb();
1277 np->rx_ring[entry].status = DescOwned;
1278 }
1279
1280 return 0;
1281 }
1282
1283 static void netdev_error(struct net_device *dev, int intr_status)
1284 {
1285 struct netdev_private *np = netdev_priv(dev);
1286 void __iomem *ioaddr = np->base_addr;
1287
1288 if (debug > 2)
1289 netdev_dbg(dev, "Abnormal event, %08x\n", intr_status);
1290 if (intr_status == 0xffffffff)
1291 return;
1292 spin_lock(&np->lock);
1293 if (intr_status & TxFIFOUnderflow) {
1294 int new;
1295
1296 #if 0
1297
1298
1299
1300 new = np->csr6 + 0x4000;
1301 #else
1302 new = (np->csr6 >> 14)&0x7f;
1303 if (new < 64)
1304 new *= 2;
1305 else
1306 new = 127;
1307 new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
1308 #endif
1309 netdev_dbg(dev, "Tx underflow, new csr6 %08x\n", new);
1310 update_csr6(dev, new);
1311 }
1312 if (intr_status & RxDied) {
1313 np->stats.rx_errors++;
1314 }
1315 if (intr_status & TimerInt) {
1316
1317 if (netif_device_present(dev))
1318 iowrite32(0x1A0F5, ioaddr + IntrEnable);
1319 }
1320 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1321 iowrite32(0, ioaddr + RxStartDemand);
1322 spin_unlock(&np->lock);
1323 }
1324
1325 static struct net_device_stats *get_stats(struct net_device *dev)
1326 {
1327 struct netdev_private *np = netdev_priv(dev);
1328 void __iomem *ioaddr = np->base_addr;
1329
1330
1331 spin_lock_irq(&np->lock);
1332 if (netif_running(dev) && netif_device_present(dev))
1333 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1334 spin_unlock_irq(&np->lock);
1335
1336 return &np->stats;
1337 }
1338
1339
1340 static u32 __set_rx_mode(struct net_device *dev)
1341 {
1342 struct netdev_private *np = netdev_priv(dev);
1343 void __iomem *ioaddr = np->base_addr;
1344 u32 mc_filter[2];
1345 u32 rx_mode;
1346
1347 if (dev->flags & IFF_PROMISC) {
1348 memset(mc_filter, 0xff, sizeof(mc_filter));
1349 rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
1350 | AcceptMyPhys;
1351 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
1352 (dev->flags & IFF_ALLMULTI)) {
1353
1354 memset(mc_filter, 0xff, sizeof(mc_filter));
1355 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1356 } else {
1357 struct netdev_hw_addr *ha;
1358
1359 memset(mc_filter, 0, sizeof(mc_filter));
1360 netdev_for_each_mc_addr(ha, dev) {
1361 int filbit;
1362
1363 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1364 filbit &= 0x3f;
1365 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1366 }
1367 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1368 }
1369 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1370 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1371 return rx_mode;
1372 }
1373
1374 static void set_rx_mode(struct net_device *dev)
1375 {
1376 struct netdev_private *np = netdev_priv(dev);
1377 u32 rx_mode = __set_rx_mode(dev);
1378 spin_lock_irq(&np->lock);
1379 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode);
1380 spin_unlock_irq(&np->lock);
1381 }
1382
1383 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1384 {
1385 struct netdev_private *np = netdev_priv(dev);
1386
1387 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1388 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1389 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1390 }
1391
1392 static int netdev_get_link_ksettings(struct net_device *dev,
1393 struct ethtool_link_ksettings *cmd)
1394 {
1395 struct netdev_private *np = netdev_priv(dev);
1396
1397 spin_lock_irq(&np->lock);
1398 mii_ethtool_get_link_ksettings(&np->mii_if, cmd);
1399 spin_unlock_irq(&np->lock);
1400
1401 return 0;
1402 }
1403
1404 static int netdev_set_link_ksettings(struct net_device *dev,
1405 const struct ethtool_link_ksettings *cmd)
1406 {
1407 struct netdev_private *np = netdev_priv(dev);
1408 int rc;
1409
1410 spin_lock_irq(&np->lock);
1411 rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd);
1412 spin_unlock_irq(&np->lock);
1413
1414 return rc;
1415 }
1416
1417 static int netdev_nway_reset(struct net_device *dev)
1418 {
1419 struct netdev_private *np = netdev_priv(dev);
1420 return mii_nway_restart(&np->mii_if);
1421 }
1422
1423 static u32 netdev_get_link(struct net_device *dev)
1424 {
1425 struct netdev_private *np = netdev_priv(dev);
1426 return mii_link_ok(&np->mii_if);
1427 }
1428
1429 static u32 netdev_get_msglevel(struct net_device *dev)
1430 {
1431 return debug;
1432 }
1433
1434 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1435 {
1436 debug = value;
1437 }
1438
1439 static const struct ethtool_ops netdev_ethtool_ops = {
1440 .get_drvinfo = netdev_get_drvinfo,
1441 .nway_reset = netdev_nway_reset,
1442 .get_link = netdev_get_link,
1443 .get_msglevel = netdev_get_msglevel,
1444 .set_msglevel = netdev_set_msglevel,
1445 .get_link_ksettings = netdev_get_link_ksettings,
1446 .set_link_ksettings = netdev_set_link_ksettings,
1447 };
1448
1449 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1450 {
1451 struct mii_ioctl_data *data = if_mii(rq);
1452 struct netdev_private *np = netdev_priv(dev);
1453
1454 switch(cmd) {
1455 case SIOCGMIIPHY:
1456 data->phy_id = ((struct netdev_private *)netdev_priv(dev))->phys[0] & 0x1f;
1457
1458
1459 case SIOCGMIIREG:
1460 spin_lock_irq(&np->lock);
1461 data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
1462 spin_unlock_irq(&np->lock);
1463 return 0;
1464
1465 case SIOCSMIIREG:
1466 spin_lock_irq(&np->lock);
1467 mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
1468 spin_unlock_irq(&np->lock);
1469 return 0;
1470 default:
1471 return -EOPNOTSUPP;
1472 }
1473 }
1474
1475 static int netdev_close(struct net_device *dev)
1476 {
1477 struct netdev_private *np = netdev_priv(dev);
1478 void __iomem *ioaddr = np->base_addr;
1479
1480 netif_stop_queue(dev);
1481
1482 if (debug > 1) {
1483 netdev_dbg(dev, "Shutting down ethercard, status was %08x Config %08x\n",
1484 ioread32(ioaddr + IntrStatus),
1485 ioread32(ioaddr + NetworkConfig));
1486 netdev_dbg(dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
1487 np->cur_tx, np->dirty_tx,
1488 np->cur_rx, np->dirty_rx);
1489 }
1490
1491
1492 spin_lock_irq(&np->lock);
1493 netif_device_detach(dev);
1494 update_csr6(dev, 0);
1495 iowrite32(0x0000, ioaddr + IntrEnable);
1496 spin_unlock_irq(&np->lock);
1497
1498 free_irq(np->pci_dev->irq, dev);
1499 wmb();
1500 netif_device_attach(dev);
1501
1502 if (ioread32(ioaddr + NetworkConfig) != 0xffffffff)
1503 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1504
1505 #ifdef __i386__
1506 if (debug > 2) {
1507 int i;
1508
1509 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring);
1510 for (i = 0; i < TX_RING_SIZE; i++)
1511 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1512 i, np->tx_ring[i].length,
1513 np->tx_ring[i].status, np->tx_ring[i].buffer1);
1514 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring);
1515 for (i = 0; i < RX_RING_SIZE; i++) {
1516 printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
1517 i, np->rx_ring[i].length,
1518 np->rx_ring[i].status, np->rx_ring[i].buffer1);
1519 }
1520 }
1521 #endif
1522
1523 del_timer_sync(&np->timer);
1524
1525 free_rxtx_rings(np);
1526 free_ringdesc(np);
1527
1528 return 0;
1529 }
1530
1531 static void w840_remove1(struct pci_dev *pdev)
1532 {
1533 struct net_device *dev = pci_get_drvdata(pdev);
1534
1535 if (dev) {
1536 struct netdev_private *np = netdev_priv(dev);
1537 unregister_netdev(dev);
1538 pci_release_regions(pdev);
1539 pci_iounmap(pdev, np->base_addr);
1540 free_netdev(dev);
1541 }
1542 }
1543
1544 #ifdef CONFIG_PM
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569 static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1570 {
1571 struct net_device *dev = pci_get_drvdata (pdev);
1572 struct netdev_private *np = netdev_priv(dev);
1573 void __iomem *ioaddr = np->base_addr;
1574
1575 rtnl_lock();
1576 if (netif_running (dev)) {
1577 del_timer_sync(&np->timer);
1578
1579 spin_lock_irq(&np->lock);
1580 netif_device_detach(dev);
1581 update_csr6(dev, 0);
1582 iowrite32(0, ioaddr + IntrEnable);
1583 spin_unlock_irq(&np->lock);
1584
1585 synchronize_irq(np->pci_dev->irq);
1586 netif_tx_disable(dev);
1587
1588 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
1589
1590
1591
1592 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable));
1593
1594
1595
1596 free_rxtx_rings(np);
1597 } else {
1598 netif_device_detach(dev);
1599 }
1600 rtnl_unlock();
1601 return 0;
1602 }
1603
1604 static int w840_resume (struct pci_dev *pdev)
1605 {
1606 struct net_device *dev = pci_get_drvdata (pdev);
1607 struct netdev_private *np = netdev_priv(dev);
1608 int retval = 0;
1609
1610 rtnl_lock();
1611 if (netif_device_present(dev))
1612 goto out;
1613 if (netif_running(dev)) {
1614 if ((retval = pci_enable_device(pdev))) {
1615 dev_err(&dev->dev,
1616 "pci_enable_device failed in resume\n");
1617 goto out;
1618 }
1619 spin_lock_irq(&np->lock);
1620 iowrite32(1, np->base_addr+PCIBusCfg);
1621 ioread32(np->base_addr+PCIBusCfg);
1622 udelay(1);
1623 netif_device_attach(dev);
1624 init_rxtx_rings(dev);
1625 init_registers(dev);
1626 spin_unlock_irq(&np->lock);
1627
1628 netif_wake_queue(dev);
1629
1630 mod_timer(&np->timer, jiffies + 1*HZ);
1631 } else {
1632 netif_device_attach(dev);
1633 }
1634 out:
1635 rtnl_unlock();
1636 return retval;
1637 }
1638 #endif
1639
1640 static struct pci_driver w840_driver = {
1641 .name = DRV_NAME,
1642 .id_table = w840_pci_tbl,
1643 .probe = w840_probe1,
1644 .remove = w840_remove1,
1645 #ifdef CONFIG_PM
1646 .suspend = w840_suspend,
1647 .resume = w840_resume,
1648 #endif
1649 };
1650
1651 static int __init w840_init(void)
1652 {
1653 printk(version);
1654 return pci_register_driver(&w840_driver);
1655 }
1656
1657 static void __exit w840_exit(void)
1658 {
1659 pci_unregister_driver(&w840_driver);
1660 }
1661
1662 module_init(w840_init);
1663 module_exit(w840_exit);