This source file includes following definitions.
- rhine_wait_bit
- rhine_wait_bit_high
- rhine_wait_bit_low
- rhine_get_events
- rhine_ack_events
- rhine_power_init
- rhine_chip_reset
- enable_mmio
- verify_mmio
- rhine_reload_eeprom
- rhine_poll
- rhine_kick_tx_threshold
- rhine_tx_err
- rhine_update_rx_crc_and_missed_errord
- rhine_napipoll
- rhine_hw_init
- rhine_init_one_common
- rhine_init_one_pci
- rhine_init_one_platform
- alloc_ring
- free_ring
- rhine_skb_dma_init
- rhine_reset_rbufs
- rhine_skb_dma_nic_store
- alloc_rbufs
- free_rbufs
- alloc_tbufs
- free_tbufs
- rhine_check_media
- rhine_set_carrier
- rhine_set_cam
- rhine_set_vlan_cam
- rhine_set_cam_mask
- rhine_set_vlan_cam_mask
- rhine_init_cam_filter
- rhine_update_vcam
- rhine_vlan_rx_add_vid
- rhine_vlan_rx_kill_vid
- init_registers
- rhine_enable_linkmon
- rhine_disable_linkmon
- mdio_read
- mdio_write
- rhine_task_disable
- rhine_task_enable
- rhine_open
- rhine_reset_task
- rhine_tx_timeout
- rhine_tx_queue_full
- rhine_start_tx
- rhine_irq_disable
- rhine_interrupt
- rhine_tx
- rhine_get_vlan_tci
- rhine_rx_vlan_tag
- rhine_rx
- rhine_restart_tx
- rhine_slow_event_task
- rhine_get_stats64
- rhine_set_rx_mode
- netdev_get_drvinfo
- netdev_get_link_ksettings
- netdev_set_link_ksettings
- netdev_nway_reset
- netdev_get_link
- netdev_get_msglevel
- netdev_set_msglevel
- rhine_get_wol
- rhine_set_wol
- netdev_ioctl
- rhine_close
- rhine_remove_one_pci
- rhine_remove_one_platform
- rhine_shutdown_pci
- rhine_suspend
- rhine_resume
- rhine_init
- rhine_cleanup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.1"
36 #define DRV_RELDATE "2010-10-09"
37
38 #include <linux/types.h>
39
40
41
42 static int debug = 0;
43 #define RHINE_MSG_DEFAULT \
44 (0x0000)
45
46
47
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
52 #else
53 static int rx_copybreak;
54 #endif
55
56
57
58 static bool avoid_D3;
59
60
61
62
63
64
65
66
67 static const int multicast_filter_limit = 32;
68
69
70
71
72
73
74
75
76
77
78
79 #define TX_RING_SIZE 64
80 #define TX_QUEUE_LEN (TX_RING_SIZE - 6)
81 #define RX_RING_SIZE 64
82
83
84
85
86 #define TX_TIMEOUT (2*HZ)
87
88 #define PKT_BUF_SZ 1536
89
90 #include <linux/module.h>
91 #include <linux/moduleparam.h>
92 #include <linux/kernel.h>
93 #include <linux/string.h>
94 #include <linux/timer.h>
95 #include <linux/errno.h>
96 #include <linux/ioport.h>
97 #include <linux/interrupt.h>
98 #include <linux/pci.h>
99 #include <linux/of_device.h>
100 #include <linux/of_irq.h>
101 #include <linux/platform_device.h>
102 #include <linux/dma-mapping.h>
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/skbuff.h>
106 #include <linux/init.h>
107 #include <linux/delay.h>
108 #include <linux/mii.h>
109 #include <linux/ethtool.h>
110 #include <linux/crc32.h>
111 #include <linux/if_vlan.h>
112 #include <linux/bitops.h>
113 #include <linux/workqueue.h>
114 #include <asm/processor.h>
115 #include <asm/io.h>
116 #include <asm/irq.h>
117 #include <linux/uaccess.h>
118 #include <linux/dmi.h>
119
120
121 static const char version[] =
122 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
123
124 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
125 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
126 MODULE_LICENSE("GPL");
127
128 module_param(debug, int, 0);
129 module_param(rx_copybreak, int, 0);
130 module_param(avoid_D3, bool, 0);
131 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
132 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
133 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
134
135 #define MCAM_SIZE 32
136 #define VCAM_SIZE 32
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238 enum rhine_revs {
239 VT86C100A = 0x00,
240 VTunknown0 = 0x20,
241 VT6102 = 0x40,
242 VT8231 = 0x50,
243 VT8233 = 0x60,
244 VT8235 = 0x74,
245 VT8237 = 0x78,
246 VTunknown1 = 0x7C,
247 VT6105 = 0x80,
248 VT6105_B0 = 0x83,
249 VT6105L = 0x8A,
250 VT6107 = 0x8C,
251 VTunknown2 = 0x8E,
252 VT6105M = 0x90,
253 };
254
255 enum rhine_quirks {
256 rqWOL = 0x0001,
257 rqForceReset = 0x0002,
258 rq6patterns = 0x0040,
259 rqStatusWBRace = 0x0080,
260 rqRhineI = 0x0100,
261 rqIntPHY = 0x0200,
262 rqMgmt = 0x0400,
263 rqNeedEnMMIO = 0x0800,
264
265
266
267 };
268
269
270
271
272
273
274
275 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
276
277 static const struct pci_device_id rhine_pci_tbl[] = {
278 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, },
279 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, },
280 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, },
281 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, },
282 { }
283 };
284 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
285
286
287
288
289 static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
290 static const struct of_device_id rhine_of_tbl[] = {
291 { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
292 { }
293 };
294 MODULE_DEVICE_TABLE(of, rhine_of_tbl);
295
296
297 enum register_offsets {
298 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
299 ChipCmd1=0x09, TQWake=0x0A,
300 IntrStatus=0x0C, IntrEnable=0x0E,
301 MulticastFilter0=0x10, MulticastFilter1=0x14,
302 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
303 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
304 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
305 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
306 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
307 StickyHW=0x83, IntrStatus2=0x84,
308 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
309 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
310 WOLcrClr1=0xA6, WOLcgClr=0xA7,
311 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
312 };
313
314
315 enum backoff_bits {
316 BackOptional=0x01, BackModify=0x02,
317 BackCaptureEffect=0x04, BackRandom=0x08
318 };
319
320
321 enum tcr_bits {
322 TCR_PQEN=0x01,
323 TCR_LB0=0x02,
324 TCR_LB1=0x04,
325 TCR_OFSET=0x08,
326 TCR_RTGOPT=0x10,
327 TCR_RTFT0=0x20,
328 TCR_RTFT1=0x40,
329 TCR_RTSF=0x80,
330 };
331
332
333 enum camcon_bits {
334 CAMC_CAMEN=0x01,
335 CAMC_VCAMSL=0x02,
336 CAMC_CAMWR=0x04,
337 CAMC_CAMRD=0x08,
338 };
339
340
341 enum bcr1_bits {
342 BCR1_POT0=0x01,
343 BCR1_POT1=0x02,
344 BCR1_POT2=0x04,
345 BCR1_CTFT0=0x08,
346 BCR1_CTFT1=0x10,
347 BCR1_CTSF=0x20,
348 BCR1_TXQNOBK=0x40,
349 BCR1_VIDFR=0x80,
350 BCR1_MED0=0x40,
351 BCR1_MED1=0x80,
352 };
353
354
355 static const int mmio_verify_registers[] = {
356 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
357 0
358 };
359
360
361 enum intr_status_bits {
362 IntrRxDone = 0x0001,
363 IntrTxDone = 0x0002,
364 IntrRxErr = 0x0004,
365 IntrTxError = 0x0008,
366 IntrRxEmpty = 0x0020,
367 IntrPCIErr = 0x0040,
368 IntrStatsMax = 0x0080,
369 IntrRxEarly = 0x0100,
370 IntrTxUnderrun = 0x0210,
371 IntrRxOverflow = 0x0400,
372 IntrRxDropped = 0x0800,
373 IntrRxNoBuf = 0x1000,
374 IntrTxAborted = 0x2000,
375 IntrLinkChange = 0x4000,
376 IntrRxWakeUp = 0x8000,
377 IntrTxDescRace = 0x080000,
378 IntrNormalSummary = IntrRxDone | IntrTxDone,
379 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
380 IntrTxUnderrun,
381 };
382
383
384 enum wol_bits {
385 WOLucast = 0x10,
386 WOLmagic = 0x20,
387 WOLbmcast = 0x30,
388 WOLlnkon = 0x40,
389 WOLlnkoff = 0x80,
390 };
391
392
393 struct rx_desc {
394 __le32 rx_status;
395 __le32 desc_length;
396 __le32 addr;
397 __le32 next_desc;
398 };
399 struct tx_desc {
400 __le32 tx_status;
401 __le32 desc_length;
402 __le32 addr;
403 __le32 next_desc;
404 };
405
406
407 #define TXDESC 0x00e08000
408
409 enum rx_status_bits {
410 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
411 };
412
413
414 enum desc_status_bits {
415 DescOwn=0x80000000
416 };
417
418
419 enum desc_length_bits {
420 DescTag=0x00010000
421 };
422
423
424 enum chip_cmd_bits {
425 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
426 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
427 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
428 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
429 };
430
431 struct rhine_stats {
432 u64 packets;
433 u64 bytes;
434 struct u64_stats_sync syncp;
435 };
436
437 struct rhine_private {
438
439 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
440
441
442 struct rx_desc *rx_ring;
443 struct tx_desc *tx_ring;
444 dma_addr_t rx_ring_dma;
445 dma_addr_t tx_ring_dma;
446
447
448 struct sk_buff *rx_skbuff[RX_RING_SIZE];
449 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
450
451
452 struct sk_buff *tx_skbuff[TX_RING_SIZE];
453 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
454
455
456 unsigned char *tx_buf[TX_RING_SIZE];
457 unsigned char *tx_bufs;
458 dma_addr_t tx_bufs_dma;
459
460 int irq;
461 long pioaddr;
462 struct net_device *dev;
463 struct napi_struct napi;
464 spinlock_t lock;
465 struct mutex task_lock;
466 bool task_enable;
467 struct work_struct slow_event_task;
468 struct work_struct reset_task;
469
470 u32 msg_enable;
471
472
473 u32 quirks;
474 unsigned int cur_rx;
475 unsigned int cur_tx, dirty_tx;
476 unsigned int rx_buf_sz;
477 struct rhine_stats rx_stats;
478 struct rhine_stats tx_stats;
479 u8 wolopts;
480
481 u8 tx_thresh, rx_thresh;
482
483 struct mii_if_info mii_if;
484 void __iomem *base;
485 };
486
487 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
488 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
489 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
490
491 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
492 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
493 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
494
495 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
496 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
497 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
498
499 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
500 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
501 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
502
503
504 static int mdio_read(struct net_device *dev, int phy_id, int location);
505 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
506 static int rhine_open(struct net_device *dev);
507 static void rhine_reset_task(struct work_struct *work);
508 static void rhine_slow_event_task(struct work_struct *work);
509 static void rhine_tx_timeout(struct net_device *dev);
510 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
511 struct net_device *dev);
512 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
513 static void rhine_tx(struct net_device *dev);
514 static int rhine_rx(struct net_device *dev, int limit);
515 static void rhine_set_rx_mode(struct net_device *dev);
516 static void rhine_get_stats64(struct net_device *dev,
517 struct rtnl_link_stats64 *stats);
518 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
519 static const struct ethtool_ops netdev_ethtool_ops;
520 static int rhine_close(struct net_device *dev);
521 static int rhine_vlan_rx_add_vid(struct net_device *dev,
522 __be16 proto, u16 vid);
523 static int rhine_vlan_rx_kill_vid(struct net_device *dev,
524 __be16 proto, u16 vid);
525 static void rhine_restart_tx(struct net_device *dev);
526
527 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool low)
528 {
529 void __iomem *ioaddr = rp->base;
530 int i;
531
532 for (i = 0; i < 1024; i++) {
533 bool has_mask_bits = !!(ioread8(ioaddr + reg) & mask);
534
535 if (low ^ has_mask_bits)
536 break;
537 udelay(10);
538 }
539 if (i > 64) {
540 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
541 "count: %04d\n", low ? "low" : "high", reg, mask, i);
542 }
543 }
544
545 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
546 {
547 rhine_wait_bit(rp, reg, mask, false);
548 }
549
550 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
551 {
552 rhine_wait_bit(rp, reg, mask, true);
553 }
554
555 static u32 rhine_get_events(struct rhine_private *rp)
556 {
557 void __iomem *ioaddr = rp->base;
558 u32 intr_status;
559
560 intr_status = ioread16(ioaddr + IntrStatus);
561
562 if (rp->quirks & rqStatusWBRace)
563 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
564 return intr_status;
565 }
566
567 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
568 {
569 void __iomem *ioaddr = rp->base;
570
571 if (rp->quirks & rqStatusWBRace)
572 iowrite8(mask >> 16, ioaddr + IntrStatus2);
573 iowrite16(mask, ioaddr + IntrStatus);
574 }
575
576
577
578
579
580 static void rhine_power_init(struct net_device *dev)
581 {
582 struct rhine_private *rp = netdev_priv(dev);
583 void __iomem *ioaddr = rp->base;
584 u16 wolstat;
585
586 if (rp->quirks & rqWOL) {
587
588 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
589
590
591 iowrite8(0x80, ioaddr + WOLcgClr);
592
593
594 iowrite8(0xFF, ioaddr + WOLcrClr);
595
596 if (rp->quirks & rq6patterns)
597 iowrite8(0x03, ioaddr + WOLcrClr1);
598
599
600 wolstat = ioread8(ioaddr + PwrcsrSet);
601 if (rp->quirks & rq6patterns)
602 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
603
604
605 iowrite8(0xFF, ioaddr + PwrcsrClr);
606 if (rp->quirks & rq6patterns)
607 iowrite8(0x03, ioaddr + PwrcsrClr1);
608
609 if (wolstat) {
610 char *reason;
611 switch (wolstat) {
612 case WOLmagic:
613 reason = "Magic packet";
614 break;
615 case WOLlnkon:
616 reason = "Link went up";
617 break;
618 case WOLlnkoff:
619 reason = "Link went down";
620 break;
621 case WOLucast:
622 reason = "Unicast packet";
623 break;
624 case WOLbmcast:
625 reason = "Multicast/broadcast packet";
626 break;
627 default:
628 reason = "Unknown";
629 }
630 netdev_info(dev, "Woke system up. Reason: %s\n",
631 reason);
632 }
633 }
634 }
635
636 static void rhine_chip_reset(struct net_device *dev)
637 {
638 struct rhine_private *rp = netdev_priv(dev);
639 void __iomem *ioaddr = rp->base;
640 u8 cmd1;
641
642 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
643 IOSYNC;
644
645 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
646 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
647
648
649 if (rp->quirks & rqForceReset)
650 iowrite8(0x40, ioaddr + MiscCmd);
651
652
653 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
654 }
655
656 cmd1 = ioread8(ioaddr + ChipCmd1);
657 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
658 "failed" : "succeeded");
659 }
660
661 static void enable_mmio(long pioaddr, u32 quirks)
662 {
663 int n;
664
665 if (quirks & rqNeedEnMMIO) {
666 if (quirks & rqRhineI) {
667
668 n = inb(pioaddr + ConfigA) | 0x20;
669 outb(n, pioaddr + ConfigA);
670 } else {
671 n = inb(pioaddr + ConfigD) | 0x80;
672 outb(n, pioaddr + ConfigD);
673 }
674 }
675 }
676
677 static inline int verify_mmio(struct device *hwdev,
678 long pioaddr,
679 void __iomem *ioaddr,
680 u32 quirks)
681 {
682 if (quirks & rqNeedEnMMIO) {
683 int i = 0;
684
685
686 while (mmio_verify_registers[i]) {
687 int reg = mmio_verify_registers[i++];
688 unsigned char a = inb(pioaddr+reg);
689 unsigned char b = readb(ioaddr+reg);
690
691 if (a != b) {
692 dev_err(hwdev,
693 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
694 reg, a, b);
695 return -EIO;
696 }
697 }
698 }
699 return 0;
700 }
701
702
703
704
705
706 static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
707 {
708 struct rhine_private *rp = netdev_priv(dev);
709 void __iomem *ioaddr = rp->base;
710 int i;
711
712 outb(0x20, pioaddr + MACRegEEcsr);
713 for (i = 0; i < 1024; i++) {
714 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
715 break;
716 }
717 if (i > 512)
718 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
719
720
721
722
723
724
725 enable_mmio(pioaddr, rp->quirks);
726
727
728 if (rp->quirks & rqWOL)
729 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
730
731 }
732
733 #ifdef CONFIG_NET_POLL_CONTROLLER
734 static void rhine_poll(struct net_device *dev)
735 {
736 struct rhine_private *rp = netdev_priv(dev);
737 const int irq = rp->irq;
738
739 disable_irq(irq);
740 rhine_interrupt(irq, dev);
741 enable_irq(irq);
742 }
743 #endif
744
745 static void rhine_kick_tx_threshold(struct rhine_private *rp)
746 {
747 if (rp->tx_thresh < 0xe0) {
748 void __iomem *ioaddr = rp->base;
749
750 rp->tx_thresh += 0x20;
751 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
752 }
753 }
754
755 static void rhine_tx_err(struct rhine_private *rp, u32 status)
756 {
757 struct net_device *dev = rp->dev;
758
759 if (status & IntrTxAborted) {
760 netif_info(rp, tx_err, dev,
761 "Abort %08x, frame dropped\n", status);
762 }
763
764 if (status & IntrTxUnderrun) {
765 rhine_kick_tx_threshold(rp);
766 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
767 "Tx threshold now %02x\n", rp->tx_thresh);
768 }
769
770 if (status & IntrTxDescRace)
771 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
772
773 if ((status & IntrTxError) &&
774 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
775 rhine_kick_tx_threshold(rp);
776 netif_info(rp, tx_err, dev, "Unspecified error. "
777 "Tx threshold now %02x\n", rp->tx_thresh);
778 }
779
780 rhine_restart_tx(dev);
781 }
782
783 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
784 {
785 void __iomem *ioaddr = rp->base;
786 struct net_device_stats *stats = &rp->dev->stats;
787
788 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
789 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
790
791
792
793
794
795
796
797 iowrite32(0, ioaddr + RxMissed);
798 ioread16(ioaddr + RxCRCErrs);
799 ioread16(ioaddr + RxMissed);
800 }
801
802 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
803 IntrRxErr | \
804 IntrRxEmpty | \
805 IntrRxOverflow | \
806 IntrRxDropped | \
807 IntrRxNoBuf | \
808 IntrRxWakeUp)
809
810 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
811 IntrTxAborted | \
812 IntrTxUnderrun | \
813 IntrTxDescRace)
814 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
815
816 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
817 RHINE_EVENT_NAPI_TX | \
818 IntrStatsMax)
819 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
820 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
821
822 static int rhine_napipoll(struct napi_struct *napi, int budget)
823 {
824 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
825 struct net_device *dev = rp->dev;
826 void __iomem *ioaddr = rp->base;
827 u16 enable_mask = RHINE_EVENT & 0xffff;
828 int work_done = 0;
829 u32 status;
830
831 status = rhine_get_events(rp);
832 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
833
834 if (status & RHINE_EVENT_NAPI_RX)
835 work_done += rhine_rx(dev, budget);
836
837 if (status & RHINE_EVENT_NAPI_TX) {
838 if (status & RHINE_EVENT_NAPI_TX_ERR) {
839
840 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
841 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
842 netif_warn(rp, tx_err, dev, "Tx still on\n");
843 }
844
845 rhine_tx(dev);
846
847 if (status & RHINE_EVENT_NAPI_TX_ERR)
848 rhine_tx_err(rp, status);
849 }
850
851 if (status & IntrStatsMax) {
852 spin_lock(&rp->lock);
853 rhine_update_rx_crc_and_missed_errord(rp);
854 spin_unlock(&rp->lock);
855 }
856
857 if (status & RHINE_EVENT_SLOW) {
858 enable_mask &= ~RHINE_EVENT_SLOW;
859 schedule_work(&rp->slow_event_task);
860 }
861
862 if (work_done < budget) {
863 napi_complete_done(napi, work_done);
864 iowrite16(enable_mask, ioaddr + IntrEnable);
865 }
866 return work_done;
867 }
868
869 static void rhine_hw_init(struct net_device *dev, long pioaddr)
870 {
871 struct rhine_private *rp = netdev_priv(dev);
872
873
874 rhine_chip_reset(dev);
875
876
877 if (rp->quirks & rqRhineI)
878 msleep(5);
879
880
881 if (dev_is_pci(dev->dev.parent))
882 rhine_reload_eeprom(pioaddr, dev);
883 }
884
885 static const struct net_device_ops rhine_netdev_ops = {
886 .ndo_open = rhine_open,
887 .ndo_stop = rhine_close,
888 .ndo_start_xmit = rhine_start_tx,
889 .ndo_get_stats64 = rhine_get_stats64,
890 .ndo_set_rx_mode = rhine_set_rx_mode,
891 .ndo_validate_addr = eth_validate_addr,
892 .ndo_set_mac_address = eth_mac_addr,
893 .ndo_do_ioctl = netdev_ioctl,
894 .ndo_tx_timeout = rhine_tx_timeout,
895 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
896 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
897 #ifdef CONFIG_NET_POLL_CONTROLLER
898 .ndo_poll_controller = rhine_poll,
899 #endif
900 };
901
902 static int rhine_init_one_common(struct device *hwdev, u32 quirks,
903 long pioaddr, void __iomem *ioaddr, int irq)
904 {
905 struct net_device *dev;
906 struct rhine_private *rp;
907 int i, rc, phy_id;
908 const char *name;
909
910
911 rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
912 if (rc) {
913 dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
914 goto err_out;
915 }
916
917 dev = alloc_etherdev(sizeof(struct rhine_private));
918 if (!dev) {
919 rc = -ENOMEM;
920 goto err_out;
921 }
922 SET_NETDEV_DEV(dev, hwdev);
923
924 rp = netdev_priv(dev);
925 rp->dev = dev;
926 rp->quirks = quirks;
927 rp->pioaddr = pioaddr;
928 rp->base = ioaddr;
929 rp->irq = irq;
930 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
931
932 phy_id = rp->quirks & rqIntPHY ? 1 : 0;
933
934 u64_stats_init(&rp->tx_stats.syncp);
935 u64_stats_init(&rp->rx_stats.syncp);
936
937
938 rhine_power_init(dev);
939 rhine_hw_init(dev, pioaddr);
940
941 for (i = 0; i < 6; i++)
942 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
943
944 if (!is_valid_ether_addr(dev->dev_addr)) {
945
946 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
947 eth_hw_addr_random(dev);
948 netdev_info(dev, "Using random MAC address: %pM\n",
949 dev->dev_addr);
950 }
951
952
953 if (!phy_id)
954 phy_id = ioread8(ioaddr + 0x6C);
955
956 spin_lock_init(&rp->lock);
957 mutex_init(&rp->task_lock);
958 INIT_WORK(&rp->reset_task, rhine_reset_task);
959 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
960
961 rp->mii_if.dev = dev;
962 rp->mii_if.mdio_read = mdio_read;
963 rp->mii_if.mdio_write = mdio_write;
964 rp->mii_if.phy_id_mask = 0x1f;
965 rp->mii_if.reg_num_mask = 0x1f;
966
967
968 dev->netdev_ops = &rhine_netdev_ops;
969 dev->ethtool_ops = &netdev_ethtool_ops;
970 dev->watchdog_timeo = TX_TIMEOUT;
971
972 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
973
974 if (rp->quirks & rqRhineI)
975 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
976
977 if (rp->quirks & rqMgmt)
978 dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
979 NETIF_F_HW_VLAN_CTAG_RX |
980 NETIF_F_HW_VLAN_CTAG_FILTER;
981
982
983 rc = register_netdev(dev);
984 if (rc)
985 goto err_out_free_netdev;
986
987 if (rp->quirks & rqRhineI)
988 name = "Rhine";
989 else if (rp->quirks & rqStatusWBRace)
990 name = "Rhine II";
991 else if (rp->quirks & rqMgmt)
992 name = "Rhine III (Management Adapter)";
993 else
994 name = "Rhine III";
995
996 netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
997 name, ioaddr, dev->dev_addr, rp->irq);
998
999 dev_set_drvdata(hwdev, dev);
1000
1001 {
1002 u16 mii_cmd;
1003 int mii_status = mdio_read(dev, phy_id, 1);
1004 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1005 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1006 if (mii_status != 0xffff && mii_status != 0x0000) {
1007 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1008 netdev_info(dev,
1009 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1010 phy_id,
1011 mii_status, rp->mii_if.advertising,
1012 mdio_read(dev, phy_id, 5));
1013
1014
1015 if (mii_status & BMSR_LSTATUS)
1016 netif_carrier_on(dev);
1017 else
1018 netif_carrier_off(dev);
1019
1020 }
1021 }
1022 rp->mii_if.phy_id = phy_id;
1023 if (avoid_D3)
1024 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1025
1026 return 0;
1027
1028 err_out_free_netdev:
1029 free_netdev(dev);
1030 err_out:
1031 return rc;
1032 }
1033
1034 static int rhine_init_one_pci(struct pci_dev *pdev,
1035 const struct pci_device_id *ent)
1036 {
1037 struct device *hwdev = &pdev->dev;
1038 int rc;
1039 long pioaddr, memaddr;
1040 void __iomem *ioaddr;
1041 int io_size = pdev->revision < VTunknown0 ? 128 : 256;
1042
1043
1044
1045
1046
1047
1048 #ifdef CONFIG_VIA_RHINE_MMIO
1049 u32 quirks = rqNeedEnMMIO;
1050 #else
1051 u32 quirks = 0;
1052 #endif
1053
1054
1055 #ifndef MODULE
1056 pr_info_once("%s\n", version);
1057 #endif
1058
1059 rc = pci_enable_device(pdev);
1060 if (rc)
1061 goto err_out;
1062
1063 if (pdev->revision < VTunknown0) {
1064 quirks |= rqRhineI;
1065 } else if (pdev->revision >= VT6102) {
1066 quirks |= rqWOL | rqForceReset;
1067 if (pdev->revision < VT6105) {
1068 quirks |= rqStatusWBRace;
1069 } else {
1070 quirks |= rqIntPHY;
1071 if (pdev->revision >= VT6105_B0)
1072 quirks |= rq6patterns;
1073 if (pdev->revision >= VT6105M)
1074 quirks |= rqMgmt;
1075 }
1076 }
1077
1078
1079 if ((pci_resource_len(pdev, 0) < io_size) ||
1080 (pci_resource_len(pdev, 1) < io_size)) {
1081 rc = -EIO;
1082 dev_err(hwdev, "Insufficient PCI resources, aborting\n");
1083 goto err_out_pci_disable;
1084 }
1085
1086 pioaddr = pci_resource_start(pdev, 0);
1087 memaddr = pci_resource_start(pdev, 1);
1088
1089 pci_set_master(pdev);
1090
1091 rc = pci_request_regions(pdev, DRV_NAME);
1092 if (rc)
1093 goto err_out_pci_disable;
1094
1095 ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
1096 if (!ioaddr) {
1097 rc = -EIO;
1098 dev_err(hwdev,
1099 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
1100 dev_name(hwdev), io_size, memaddr);
1101 goto err_out_free_res;
1102 }
1103
1104 enable_mmio(pioaddr, quirks);
1105
1106 rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
1107 if (rc)
1108 goto err_out_unmap;
1109
1110 rc = rhine_init_one_common(&pdev->dev, quirks,
1111 pioaddr, ioaddr, pdev->irq);
1112 if (!rc)
1113 return 0;
1114
1115 err_out_unmap:
1116 pci_iounmap(pdev, ioaddr);
1117 err_out_free_res:
1118 pci_release_regions(pdev);
1119 err_out_pci_disable:
1120 pci_disable_device(pdev);
1121 err_out:
1122 return rc;
1123 }
1124
1125 static int rhine_init_one_platform(struct platform_device *pdev)
1126 {
1127 const struct of_device_id *match;
1128 const u32 *quirks;
1129 int irq;
1130 void __iomem *ioaddr;
1131
1132 match = of_match_device(rhine_of_tbl, &pdev->dev);
1133 if (!match)
1134 return -EINVAL;
1135
1136 ioaddr = devm_platform_ioremap_resource(pdev, 0);
1137 if (IS_ERR(ioaddr))
1138 return PTR_ERR(ioaddr);
1139
1140 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
1141 if (!irq)
1142 return -EINVAL;
1143
1144 quirks = match->data;
1145 if (!quirks)
1146 return -EINVAL;
1147
1148 return rhine_init_one_common(&pdev->dev, *quirks,
1149 (long)ioaddr, ioaddr, irq);
1150 }
1151
1152 static int alloc_ring(struct net_device* dev)
1153 {
1154 struct rhine_private *rp = netdev_priv(dev);
1155 struct device *hwdev = dev->dev.parent;
1156 void *ring;
1157 dma_addr_t ring_dma;
1158
1159 ring = dma_alloc_coherent(hwdev,
1160 RX_RING_SIZE * sizeof(struct rx_desc) +
1161 TX_RING_SIZE * sizeof(struct tx_desc),
1162 &ring_dma,
1163 GFP_ATOMIC);
1164 if (!ring) {
1165 netdev_err(dev, "Could not allocate DMA memory\n");
1166 return -ENOMEM;
1167 }
1168 if (rp->quirks & rqRhineI) {
1169 rp->tx_bufs = dma_alloc_coherent(hwdev,
1170 PKT_BUF_SZ * TX_RING_SIZE,
1171 &rp->tx_bufs_dma,
1172 GFP_ATOMIC);
1173 if (rp->tx_bufs == NULL) {
1174 dma_free_coherent(hwdev,
1175 RX_RING_SIZE * sizeof(struct rx_desc) +
1176 TX_RING_SIZE * sizeof(struct tx_desc),
1177 ring, ring_dma);
1178 return -ENOMEM;
1179 }
1180 }
1181
1182 rp->rx_ring = ring;
1183 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1184 rp->rx_ring_dma = ring_dma;
1185 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1186
1187 return 0;
1188 }
1189
1190 static void free_ring(struct net_device* dev)
1191 {
1192 struct rhine_private *rp = netdev_priv(dev);
1193 struct device *hwdev = dev->dev.parent;
1194
1195 dma_free_coherent(hwdev,
1196 RX_RING_SIZE * sizeof(struct rx_desc) +
1197 TX_RING_SIZE * sizeof(struct tx_desc),
1198 rp->rx_ring, rp->rx_ring_dma);
1199 rp->tx_ring = NULL;
1200
1201 if (rp->tx_bufs)
1202 dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
1203 rp->tx_bufs, rp->tx_bufs_dma);
1204
1205 rp->tx_bufs = NULL;
1206
1207 }
1208
1209 struct rhine_skb_dma {
1210 struct sk_buff *skb;
1211 dma_addr_t dma;
1212 };
1213
1214 static inline int rhine_skb_dma_init(struct net_device *dev,
1215 struct rhine_skb_dma *sd)
1216 {
1217 struct rhine_private *rp = netdev_priv(dev);
1218 struct device *hwdev = dev->dev.parent;
1219 const int size = rp->rx_buf_sz;
1220
1221 sd->skb = netdev_alloc_skb(dev, size);
1222 if (!sd->skb)
1223 return -ENOMEM;
1224
1225 sd->dma = dma_map_single(hwdev, sd->skb->data, size, DMA_FROM_DEVICE);
1226 if (unlikely(dma_mapping_error(hwdev, sd->dma))) {
1227 netif_err(rp, drv, dev, "Rx DMA mapping failure\n");
1228 dev_kfree_skb_any(sd->skb);
1229 return -EIO;
1230 }
1231
1232 return 0;
1233 }
1234
1235 static void rhine_reset_rbufs(struct rhine_private *rp)
1236 {
1237 int i;
1238
1239 rp->cur_rx = 0;
1240
1241 for (i = 0; i < RX_RING_SIZE; i++)
1242 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1243 }
1244
1245 static inline void rhine_skb_dma_nic_store(struct rhine_private *rp,
1246 struct rhine_skb_dma *sd, int entry)
1247 {
1248 rp->rx_skbuff_dma[entry] = sd->dma;
1249 rp->rx_skbuff[entry] = sd->skb;
1250
1251 rp->rx_ring[entry].addr = cpu_to_le32(sd->dma);
1252 dma_wmb();
1253 }
1254
1255 static void free_rbufs(struct net_device* dev);
1256
1257 static int alloc_rbufs(struct net_device *dev)
1258 {
1259 struct rhine_private *rp = netdev_priv(dev);
1260 dma_addr_t next;
1261 int rc, i;
1262
1263 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1264 next = rp->rx_ring_dma;
1265
1266
1267 for (i = 0; i < RX_RING_SIZE; i++) {
1268 rp->rx_ring[i].rx_status = 0;
1269 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1270 next += sizeof(struct rx_desc);
1271 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1272 rp->rx_skbuff[i] = NULL;
1273 }
1274
1275 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1276
1277
1278 for (i = 0; i < RX_RING_SIZE; i++) {
1279 struct rhine_skb_dma sd;
1280
1281 rc = rhine_skb_dma_init(dev, &sd);
1282 if (rc < 0) {
1283 free_rbufs(dev);
1284 goto out;
1285 }
1286
1287 rhine_skb_dma_nic_store(rp, &sd, i);
1288 }
1289
1290 rhine_reset_rbufs(rp);
1291 out:
1292 return rc;
1293 }
1294
1295 static void free_rbufs(struct net_device* dev)
1296 {
1297 struct rhine_private *rp = netdev_priv(dev);
1298 struct device *hwdev = dev->dev.parent;
1299 int i;
1300
1301
1302 for (i = 0; i < RX_RING_SIZE; i++) {
1303 rp->rx_ring[i].rx_status = 0;
1304 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1305 if (rp->rx_skbuff[i]) {
1306 dma_unmap_single(hwdev,
1307 rp->rx_skbuff_dma[i],
1308 rp->rx_buf_sz, DMA_FROM_DEVICE);
1309 dev_kfree_skb(rp->rx_skbuff[i]);
1310 }
1311 rp->rx_skbuff[i] = NULL;
1312 }
1313 }
1314
1315 static void alloc_tbufs(struct net_device* dev)
1316 {
1317 struct rhine_private *rp = netdev_priv(dev);
1318 dma_addr_t next;
1319 int i;
1320
1321 rp->dirty_tx = rp->cur_tx = 0;
1322 next = rp->tx_ring_dma;
1323 for (i = 0; i < TX_RING_SIZE; i++) {
1324 rp->tx_skbuff[i] = NULL;
1325 rp->tx_ring[i].tx_status = 0;
1326 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1327 next += sizeof(struct tx_desc);
1328 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1329 if (rp->quirks & rqRhineI)
1330 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1331 }
1332 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1333
1334 netdev_reset_queue(dev);
1335 }
1336
1337 static void free_tbufs(struct net_device* dev)
1338 {
1339 struct rhine_private *rp = netdev_priv(dev);
1340 struct device *hwdev = dev->dev.parent;
1341 int i;
1342
1343 for (i = 0; i < TX_RING_SIZE; i++) {
1344 rp->tx_ring[i].tx_status = 0;
1345 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1346 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0);
1347 if (rp->tx_skbuff[i]) {
1348 if (rp->tx_skbuff_dma[i]) {
1349 dma_unmap_single(hwdev,
1350 rp->tx_skbuff_dma[i],
1351 rp->tx_skbuff[i]->len,
1352 DMA_TO_DEVICE);
1353 }
1354 dev_kfree_skb(rp->tx_skbuff[i]);
1355 }
1356 rp->tx_skbuff[i] = NULL;
1357 rp->tx_buf[i] = NULL;
1358 }
1359 }
1360
1361 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1362 {
1363 struct rhine_private *rp = netdev_priv(dev);
1364 void __iomem *ioaddr = rp->base;
1365
1366 if (!rp->mii_if.force_media)
1367 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1368
1369 if (rp->mii_if.full_duplex)
1370 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1371 ioaddr + ChipCmd1);
1372 else
1373 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1374 ioaddr + ChipCmd1);
1375
1376 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1377 rp->mii_if.force_media, netif_carrier_ok(dev));
1378 }
1379
1380
1381 static void rhine_set_carrier(struct mii_if_info *mii)
1382 {
1383 struct net_device *dev = mii->dev;
1384 struct rhine_private *rp = netdev_priv(dev);
1385
1386 if (mii->force_media) {
1387
1388 if (!netif_carrier_ok(dev))
1389 netif_carrier_on(dev);
1390 }
1391
1392 rhine_check_media(dev, 0);
1393
1394 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1395 mii->force_media, netif_carrier_ok(dev));
1396 }
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1407 {
1408 int i;
1409
1410 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1411 wmb();
1412
1413
1414 idx &= (MCAM_SIZE - 1);
1415
1416 iowrite8((u8) idx, ioaddr + CamAddr);
1417
1418 for (i = 0; i < 6; i++, addr++)
1419 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1420 udelay(10);
1421 wmb();
1422
1423 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1424 udelay(10);
1425
1426 iowrite8(0, ioaddr + CamCon);
1427 }
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1438 {
1439 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1440 wmb();
1441
1442
1443 idx &= (VCAM_SIZE - 1);
1444
1445 iowrite8((u8) idx, ioaddr + CamAddr);
1446
1447 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1448 udelay(10);
1449 wmb();
1450
1451 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1452 udelay(10);
1453
1454 iowrite8(0, ioaddr + CamCon);
1455 }
1456
1457
1458
1459
1460
1461
1462
1463
1464 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1465 {
1466 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1467 wmb();
1468
1469
1470 iowrite32(mask, ioaddr + CamMask);
1471
1472
1473 iowrite8(0, ioaddr + CamCon);
1474 }
1475
1476
1477
1478
1479
1480
1481
1482
1483 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1484 {
1485 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1486 wmb();
1487
1488
1489 iowrite32(mask, ioaddr + CamMask);
1490
1491
1492 iowrite8(0, ioaddr + CamCon);
1493 }
1494
1495
1496
1497
1498
1499
1500
1501
1502 static void rhine_init_cam_filter(struct net_device *dev)
1503 {
1504 struct rhine_private *rp = netdev_priv(dev);
1505 void __iomem *ioaddr = rp->base;
1506
1507
1508 rhine_set_vlan_cam_mask(ioaddr, 0);
1509 rhine_set_cam_mask(ioaddr, 0);
1510
1511
1512 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1513 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1514 }
1515
1516
1517
1518
1519
1520
1521
1522 static void rhine_update_vcam(struct net_device *dev)
1523 {
1524 struct rhine_private *rp = netdev_priv(dev);
1525 void __iomem *ioaddr = rp->base;
1526 u16 vid;
1527 u32 vCAMmask = 0;
1528 unsigned int i = 0;
1529
1530 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1531 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1532 vCAMmask |= 1 << i;
1533 if (++i >= VCAM_SIZE)
1534 break;
1535 }
1536 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1537 }
1538
1539 static int rhine_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1540 {
1541 struct rhine_private *rp = netdev_priv(dev);
1542
1543 spin_lock_bh(&rp->lock);
1544 set_bit(vid, rp->active_vlans);
1545 rhine_update_vcam(dev);
1546 spin_unlock_bh(&rp->lock);
1547 return 0;
1548 }
1549
1550 static int rhine_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1551 {
1552 struct rhine_private *rp = netdev_priv(dev);
1553
1554 spin_lock_bh(&rp->lock);
1555 clear_bit(vid, rp->active_vlans);
1556 rhine_update_vcam(dev);
1557 spin_unlock_bh(&rp->lock);
1558 return 0;
1559 }
1560
1561 static void init_registers(struct net_device *dev)
1562 {
1563 struct rhine_private *rp = netdev_priv(dev);
1564 void __iomem *ioaddr = rp->base;
1565 int i;
1566
1567 for (i = 0; i < 6; i++)
1568 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1569
1570
1571 iowrite16(0x0006, ioaddr + PCIBusConfig);
1572
1573 iowrite8(0x20, ioaddr + TxConfig);
1574 rp->tx_thresh = 0x20;
1575 rp->rx_thresh = 0x60;
1576
1577 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1578 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1579
1580 rhine_set_rx_mode(dev);
1581
1582 if (rp->quirks & rqMgmt)
1583 rhine_init_cam_filter(dev);
1584
1585 napi_enable(&rp->napi);
1586
1587 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1588
1589 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1590 ioaddr + ChipCmd);
1591 rhine_check_media(dev, 1);
1592 }
1593
1594
1595 static void rhine_enable_linkmon(struct rhine_private *rp)
1596 {
1597 void __iomem *ioaddr = rp->base;
1598
1599 iowrite8(0, ioaddr + MIICmd);
1600 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1601 iowrite8(0x80, ioaddr + MIICmd);
1602
1603 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1604
1605 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1606 }
1607
1608
1609 static void rhine_disable_linkmon(struct rhine_private *rp)
1610 {
1611 void __iomem *ioaddr = rp->base;
1612
1613 iowrite8(0, ioaddr + MIICmd);
1614
1615 if (rp->quirks & rqRhineI) {
1616 iowrite8(0x01, ioaddr + MIIRegAddr);
1617
1618
1619 mdelay(1);
1620
1621
1622 iowrite8(0x80, ioaddr + MIICmd);
1623
1624 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1625
1626
1627 iowrite8(0, ioaddr + MIICmd);
1628 }
1629 else
1630 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1631 }
1632
1633
1634
1635 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1636 {
1637 struct rhine_private *rp = netdev_priv(dev);
1638 void __iomem *ioaddr = rp->base;
1639 int result;
1640
1641 rhine_disable_linkmon(rp);
1642
1643
1644 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1645 iowrite8(regnum, ioaddr + MIIRegAddr);
1646 iowrite8(0x40, ioaddr + MIICmd);
1647 rhine_wait_bit_low(rp, MIICmd, 0x40);
1648 result = ioread16(ioaddr + MIIData);
1649
1650 rhine_enable_linkmon(rp);
1651 return result;
1652 }
1653
1654 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1655 {
1656 struct rhine_private *rp = netdev_priv(dev);
1657 void __iomem *ioaddr = rp->base;
1658
1659 rhine_disable_linkmon(rp);
1660
1661
1662 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1663 iowrite8(regnum, ioaddr + MIIRegAddr);
1664 iowrite16(value, ioaddr + MIIData);
1665 iowrite8(0x20, ioaddr + MIICmd);
1666 rhine_wait_bit_low(rp, MIICmd, 0x20);
1667
1668 rhine_enable_linkmon(rp);
1669 }
1670
1671 static void rhine_task_disable(struct rhine_private *rp)
1672 {
1673 mutex_lock(&rp->task_lock);
1674 rp->task_enable = false;
1675 mutex_unlock(&rp->task_lock);
1676
1677 cancel_work_sync(&rp->slow_event_task);
1678 cancel_work_sync(&rp->reset_task);
1679 }
1680
1681 static void rhine_task_enable(struct rhine_private *rp)
1682 {
1683 mutex_lock(&rp->task_lock);
1684 rp->task_enable = true;
1685 mutex_unlock(&rp->task_lock);
1686 }
1687
1688 static int rhine_open(struct net_device *dev)
1689 {
1690 struct rhine_private *rp = netdev_priv(dev);
1691 void __iomem *ioaddr = rp->base;
1692 int rc;
1693
1694 rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
1695 if (rc)
1696 goto out;
1697
1698 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
1699
1700 rc = alloc_ring(dev);
1701 if (rc < 0)
1702 goto out_free_irq;
1703
1704 rc = alloc_rbufs(dev);
1705 if (rc < 0)
1706 goto out_free_ring;
1707
1708 alloc_tbufs(dev);
1709 rhine_chip_reset(dev);
1710 rhine_task_enable(rp);
1711 init_registers(dev);
1712
1713 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1714 __func__, ioread16(ioaddr + ChipCmd),
1715 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1716
1717 netif_start_queue(dev);
1718
1719 out:
1720 return rc;
1721
1722 out_free_ring:
1723 free_ring(dev);
1724 out_free_irq:
1725 free_irq(rp->irq, dev);
1726 goto out;
1727 }
1728
1729 static void rhine_reset_task(struct work_struct *work)
1730 {
1731 struct rhine_private *rp = container_of(work, struct rhine_private,
1732 reset_task);
1733 struct net_device *dev = rp->dev;
1734
1735 mutex_lock(&rp->task_lock);
1736
1737 if (!rp->task_enable)
1738 goto out_unlock;
1739
1740 napi_disable(&rp->napi);
1741 netif_tx_disable(dev);
1742 spin_lock_bh(&rp->lock);
1743
1744
1745 free_tbufs(dev);
1746 alloc_tbufs(dev);
1747
1748 rhine_reset_rbufs(rp);
1749
1750
1751 rhine_chip_reset(dev);
1752 init_registers(dev);
1753
1754 spin_unlock_bh(&rp->lock);
1755
1756 netif_trans_update(dev);
1757 dev->stats.tx_errors++;
1758 netif_wake_queue(dev);
1759
1760 out_unlock:
1761 mutex_unlock(&rp->task_lock);
1762 }
1763
1764 static void rhine_tx_timeout(struct net_device *dev)
1765 {
1766 struct rhine_private *rp = netdev_priv(dev);
1767 void __iomem *ioaddr = rp->base;
1768
1769 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1770 ioread16(ioaddr + IntrStatus),
1771 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1772
1773 schedule_work(&rp->reset_task);
1774 }
1775
1776 static inline bool rhine_tx_queue_full(struct rhine_private *rp)
1777 {
1778 return (rp->cur_tx - rp->dirty_tx) >= TX_QUEUE_LEN;
1779 }
1780
1781 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1782 struct net_device *dev)
1783 {
1784 struct rhine_private *rp = netdev_priv(dev);
1785 struct device *hwdev = dev->dev.parent;
1786 void __iomem *ioaddr = rp->base;
1787 unsigned entry;
1788
1789
1790
1791
1792
1793 entry = rp->cur_tx % TX_RING_SIZE;
1794
1795 if (skb_padto(skb, ETH_ZLEN))
1796 return NETDEV_TX_OK;
1797
1798 rp->tx_skbuff[entry] = skb;
1799
1800 if ((rp->quirks & rqRhineI) &&
1801 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1802
1803 if (skb->len > PKT_BUF_SZ) {
1804
1805 dev_kfree_skb_any(skb);
1806 rp->tx_skbuff[entry] = NULL;
1807 dev->stats.tx_dropped++;
1808 return NETDEV_TX_OK;
1809 }
1810
1811
1812 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1813 if (skb->len < ETH_ZLEN)
1814 memset(rp->tx_buf[entry] + skb->len, 0,
1815 ETH_ZLEN - skb->len);
1816 rp->tx_skbuff_dma[entry] = 0;
1817 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1818 (rp->tx_buf[entry] -
1819 rp->tx_bufs));
1820 } else {
1821 rp->tx_skbuff_dma[entry] =
1822 dma_map_single(hwdev, skb->data, skb->len,
1823 DMA_TO_DEVICE);
1824 if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
1825 dev_kfree_skb_any(skb);
1826 rp->tx_skbuff_dma[entry] = 0;
1827 dev->stats.tx_dropped++;
1828 return NETDEV_TX_OK;
1829 }
1830 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1831 }
1832
1833 rp->tx_ring[entry].desc_length =
1834 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1835
1836 if (unlikely(skb_vlan_tag_present(skb))) {
1837 u16 vid_pcp = skb_vlan_tag_get(skb);
1838
1839
1840 vid_pcp = (vid_pcp & VLAN_VID_MASK) |
1841 ((vid_pcp & VLAN_PRIO_MASK) >> 1);
1842 rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16);
1843
1844 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1845 }
1846 else
1847 rp->tx_ring[entry].tx_status = 0;
1848
1849 netdev_sent_queue(dev, skb->len);
1850
1851 dma_wmb();
1852 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1853 wmb();
1854
1855 rp->cur_tx++;
1856
1857
1858
1859
1860
1861 smp_wmb();
1862
1863
1864
1865 if (skb_vlan_tag_present(skb))
1866
1867 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1868
1869
1870 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1871 ioaddr + ChipCmd1);
1872 IOSYNC;
1873
1874
1875 if (rhine_tx_queue_full(rp)) {
1876 netif_stop_queue(dev);
1877 smp_rmb();
1878
1879 if (!rhine_tx_queue_full(rp))
1880 netif_wake_queue(dev);
1881 }
1882
1883 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1884 rp->cur_tx - 1, entry);
1885
1886 return NETDEV_TX_OK;
1887 }
1888
1889 static void rhine_irq_disable(struct rhine_private *rp)
1890 {
1891 iowrite16(0x0000, rp->base + IntrEnable);
1892 }
1893
1894
1895
1896 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1897 {
1898 struct net_device *dev = dev_instance;
1899 struct rhine_private *rp = netdev_priv(dev);
1900 u32 status;
1901 int handled = 0;
1902
1903 status = rhine_get_events(rp);
1904
1905 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1906
1907 if (status & RHINE_EVENT) {
1908 handled = 1;
1909
1910 rhine_irq_disable(rp);
1911 napi_schedule(&rp->napi);
1912 }
1913
1914 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1915 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1916 status);
1917 }
1918
1919 return IRQ_RETVAL(handled);
1920 }
1921
1922
1923
1924 static void rhine_tx(struct net_device *dev)
1925 {
1926 struct rhine_private *rp = netdev_priv(dev);
1927 struct device *hwdev = dev->dev.parent;
1928 unsigned int pkts_compl = 0, bytes_compl = 0;
1929 unsigned int dirty_tx = rp->dirty_tx;
1930 unsigned int cur_tx;
1931 struct sk_buff *skb;
1932
1933
1934
1935
1936
1937
1938
1939 smp_rmb();
1940 cur_tx = rp->cur_tx;
1941
1942 while (dirty_tx != cur_tx) {
1943 unsigned int entry = dirty_tx % TX_RING_SIZE;
1944 u32 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1945
1946 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1947 entry, txstatus);
1948 if (txstatus & DescOwn)
1949 break;
1950 skb = rp->tx_skbuff[entry];
1951 if (txstatus & 0x8000) {
1952 netif_dbg(rp, tx_done, dev,
1953 "Transmit error, Tx status %08x\n", txstatus);
1954 dev->stats.tx_errors++;
1955 if (txstatus & 0x0400)
1956 dev->stats.tx_carrier_errors++;
1957 if (txstatus & 0x0200)
1958 dev->stats.tx_window_errors++;
1959 if (txstatus & 0x0100)
1960 dev->stats.tx_aborted_errors++;
1961 if (txstatus & 0x0080)
1962 dev->stats.tx_heartbeat_errors++;
1963 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1964 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1965 dev->stats.tx_fifo_errors++;
1966 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1967 break;
1968 }
1969
1970 } else {
1971 if (rp->quirks & rqRhineI)
1972 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1973 else
1974 dev->stats.collisions += txstatus & 0x0F;
1975 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1976 (txstatus >> 3) & 0xF, txstatus & 0xF);
1977
1978 u64_stats_update_begin(&rp->tx_stats.syncp);
1979 rp->tx_stats.bytes += skb->len;
1980 rp->tx_stats.packets++;
1981 u64_stats_update_end(&rp->tx_stats.syncp);
1982 }
1983
1984 if (rp->tx_skbuff_dma[entry]) {
1985 dma_unmap_single(hwdev,
1986 rp->tx_skbuff_dma[entry],
1987 skb->len,
1988 DMA_TO_DEVICE);
1989 }
1990 bytes_compl += skb->len;
1991 pkts_compl++;
1992 dev_consume_skb_any(skb);
1993 rp->tx_skbuff[entry] = NULL;
1994 dirty_tx++;
1995 }
1996
1997 rp->dirty_tx = dirty_tx;
1998
1999 smp_wmb();
2000
2001 netdev_completed_queue(dev, pkts_compl, bytes_compl);
2002
2003
2004 if (!rhine_tx_queue_full(rp) && netif_queue_stopped(dev)) {
2005 netif_wake_queue(dev);
2006 smp_rmb();
2007
2008 if (rhine_tx_queue_full(rp))
2009 netif_stop_queue(dev);
2010 }
2011 }
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
2023 {
2024 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
2025 return be16_to_cpup((__be16 *)trailer);
2026 }
2027
2028 static inline void rhine_rx_vlan_tag(struct sk_buff *skb, struct rx_desc *desc,
2029 int data_size)
2030 {
2031 dma_rmb();
2032 if (unlikely(desc->desc_length & cpu_to_le32(DescTag))) {
2033 u16 vlan_tci;
2034
2035 vlan_tci = rhine_get_vlan_tci(skb, data_size);
2036 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2037 }
2038 }
2039
2040
2041 static int rhine_rx(struct net_device *dev, int limit)
2042 {
2043 struct rhine_private *rp = netdev_priv(dev);
2044 struct device *hwdev = dev->dev.parent;
2045 int entry = rp->cur_rx % RX_RING_SIZE;
2046 int count;
2047
2048 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
2049 entry, le32_to_cpu(rp->rx_ring[entry].rx_status));
2050
2051
2052 for (count = 0; count < limit; ++count) {
2053 struct rx_desc *desc = rp->rx_ring + entry;
2054 u32 desc_status = le32_to_cpu(desc->rx_status);
2055 int data_size = desc_status >> 16;
2056
2057 if (desc_status & DescOwn)
2058 break;
2059
2060 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
2061 desc_status);
2062
2063 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
2064 if ((desc_status & RxWholePkt) != RxWholePkt) {
2065 netdev_warn(dev,
2066 "Oversized Ethernet frame spanned multiple buffers, "
2067 "entry %#x length %d status %08x!\n",
2068 entry, data_size,
2069 desc_status);
2070 dev->stats.rx_length_errors++;
2071 } else if (desc_status & RxErr) {
2072
2073 netif_dbg(rp, rx_err, dev,
2074 "%s() Rx error %08x\n", __func__,
2075 desc_status);
2076 dev->stats.rx_errors++;
2077 if (desc_status & 0x0030)
2078 dev->stats.rx_length_errors++;
2079 if (desc_status & 0x0048)
2080 dev->stats.rx_fifo_errors++;
2081 if (desc_status & 0x0004)
2082 dev->stats.rx_frame_errors++;
2083 if (desc_status & 0x0002) {
2084
2085 spin_lock(&rp->lock);
2086 dev->stats.rx_crc_errors++;
2087 spin_unlock(&rp->lock);
2088 }
2089 }
2090 } else {
2091
2092 int pkt_len = data_size - 4;
2093 struct sk_buff *skb;
2094
2095
2096
2097 if (pkt_len < rx_copybreak) {
2098 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
2099 if (unlikely(!skb))
2100 goto drop;
2101
2102 dma_sync_single_for_cpu(hwdev,
2103 rp->rx_skbuff_dma[entry],
2104 rp->rx_buf_sz,
2105 DMA_FROM_DEVICE);
2106
2107 skb_copy_to_linear_data(skb,
2108 rp->rx_skbuff[entry]->data,
2109 pkt_len);
2110
2111 dma_sync_single_for_device(hwdev,
2112 rp->rx_skbuff_dma[entry],
2113 rp->rx_buf_sz,
2114 DMA_FROM_DEVICE);
2115 } else {
2116 struct rhine_skb_dma sd;
2117
2118 if (unlikely(rhine_skb_dma_init(dev, &sd) < 0))
2119 goto drop;
2120
2121 skb = rp->rx_skbuff[entry];
2122
2123 dma_unmap_single(hwdev,
2124 rp->rx_skbuff_dma[entry],
2125 rp->rx_buf_sz,
2126 DMA_FROM_DEVICE);
2127 rhine_skb_dma_nic_store(rp, &sd, entry);
2128 }
2129
2130 skb_put(skb, pkt_len);
2131
2132 rhine_rx_vlan_tag(skb, desc, data_size);
2133
2134 skb->protocol = eth_type_trans(skb, dev);
2135
2136 netif_receive_skb(skb);
2137
2138 u64_stats_update_begin(&rp->rx_stats.syncp);
2139 rp->rx_stats.bytes += pkt_len;
2140 rp->rx_stats.packets++;
2141 u64_stats_update_end(&rp->rx_stats.syncp);
2142 }
2143 give_descriptor_to_nic:
2144 desc->rx_status = cpu_to_le32(DescOwn);
2145 entry = (++rp->cur_rx) % RX_RING_SIZE;
2146 }
2147
2148 return count;
2149
2150 drop:
2151 dev->stats.rx_dropped++;
2152 goto give_descriptor_to_nic;
2153 }
2154
2155 static void rhine_restart_tx(struct net_device *dev) {
2156 struct rhine_private *rp = netdev_priv(dev);
2157 void __iomem *ioaddr = rp->base;
2158 int entry = rp->dirty_tx % TX_RING_SIZE;
2159 u32 intr_status;
2160
2161
2162
2163
2164
2165 intr_status = rhine_get_events(rp);
2166
2167 if ((intr_status & IntrTxErrSummary) == 0) {
2168
2169
2170 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
2171 ioaddr + TxRingPtr);
2172
2173 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
2174 ioaddr + ChipCmd);
2175
2176 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
2177
2178 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
2179
2180 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
2181 ioaddr + ChipCmd1);
2182 IOSYNC;
2183 }
2184 else {
2185
2186 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
2187 intr_status);
2188 }
2189
2190 }
2191
2192 static void rhine_slow_event_task(struct work_struct *work)
2193 {
2194 struct rhine_private *rp =
2195 container_of(work, struct rhine_private, slow_event_task);
2196 struct net_device *dev = rp->dev;
2197 u32 intr_status;
2198
2199 mutex_lock(&rp->task_lock);
2200
2201 if (!rp->task_enable)
2202 goto out_unlock;
2203
2204 intr_status = rhine_get_events(rp);
2205 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2206
2207 if (intr_status & IntrLinkChange)
2208 rhine_check_media(dev, 0);
2209
2210 if (intr_status & IntrPCIErr)
2211 netif_warn(rp, hw, dev, "PCI error\n");
2212
2213 iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable);
2214
2215 out_unlock:
2216 mutex_unlock(&rp->task_lock);
2217 }
2218
2219 static void
2220 rhine_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
2221 {
2222 struct rhine_private *rp = netdev_priv(dev);
2223 unsigned int start;
2224
2225 spin_lock_bh(&rp->lock);
2226 rhine_update_rx_crc_and_missed_errord(rp);
2227 spin_unlock_bh(&rp->lock);
2228
2229 netdev_stats_to_stats64(stats, &dev->stats);
2230
2231 do {
2232 start = u64_stats_fetch_begin_irq(&rp->rx_stats.syncp);
2233 stats->rx_packets = rp->rx_stats.packets;
2234 stats->rx_bytes = rp->rx_stats.bytes;
2235 } while (u64_stats_fetch_retry_irq(&rp->rx_stats.syncp, start));
2236
2237 do {
2238 start = u64_stats_fetch_begin_irq(&rp->tx_stats.syncp);
2239 stats->tx_packets = rp->tx_stats.packets;
2240 stats->tx_bytes = rp->tx_stats.bytes;
2241 } while (u64_stats_fetch_retry_irq(&rp->tx_stats.syncp, start));
2242 }
2243
2244 static void rhine_set_rx_mode(struct net_device *dev)
2245 {
2246 struct rhine_private *rp = netdev_priv(dev);
2247 void __iomem *ioaddr = rp->base;
2248 u32 mc_filter[2];
2249 u8 rx_mode = 0x0C;
2250 struct netdev_hw_addr *ha;
2251
2252 if (dev->flags & IFF_PROMISC) {
2253 rx_mode = 0x1C;
2254 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2255 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2256 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2257 (dev->flags & IFF_ALLMULTI)) {
2258
2259 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2260 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2261 } else if (rp->quirks & rqMgmt) {
2262 int i = 0;
2263 u32 mCAMmask = 0;
2264 netdev_for_each_mc_addr(ha, dev) {
2265 if (i == MCAM_SIZE)
2266 break;
2267 rhine_set_cam(ioaddr, i, ha->addr);
2268 mCAMmask |= 1 << i;
2269 i++;
2270 }
2271 rhine_set_cam_mask(ioaddr, mCAMmask);
2272 } else {
2273 memset(mc_filter, 0, sizeof(mc_filter));
2274 netdev_for_each_mc_addr(ha, dev) {
2275 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2276
2277 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2278 }
2279 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2280 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2281 }
2282
2283 if (rp->quirks & rqMgmt) {
2284 if (dev->flags & IFF_PROMISC)
2285 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2286 else
2287 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2288 }
2289 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2290 }
2291
2292 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2293 {
2294 struct device *hwdev = dev->dev.parent;
2295
2296 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2297 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2298 strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
2299 }
2300
2301 static int netdev_get_link_ksettings(struct net_device *dev,
2302 struct ethtool_link_ksettings *cmd)
2303 {
2304 struct rhine_private *rp = netdev_priv(dev);
2305
2306 mutex_lock(&rp->task_lock);
2307 mii_ethtool_get_link_ksettings(&rp->mii_if, cmd);
2308 mutex_unlock(&rp->task_lock);
2309
2310 return 0;
2311 }
2312
2313 static int netdev_set_link_ksettings(struct net_device *dev,
2314 const struct ethtool_link_ksettings *cmd)
2315 {
2316 struct rhine_private *rp = netdev_priv(dev);
2317 int rc;
2318
2319 mutex_lock(&rp->task_lock);
2320 rc = mii_ethtool_set_link_ksettings(&rp->mii_if, cmd);
2321 rhine_set_carrier(&rp->mii_if);
2322 mutex_unlock(&rp->task_lock);
2323
2324 return rc;
2325 }
2326
2327 static int netdev_nway_reset(struct net_device *dev)
2328 {
2329 struct rhine_private *rp = netdev_priv(dev);
2330
2331 return mii_nway_restart(&rp->mii_if);
2332 }
2333
2334 static u32 netdev_get_link(struct net_device *dev)
2335 {
2336 struct rhine_private *rp = netdev_priv(dev);
2337
2338 return mii_link_ok(&rp->mii_if);
2339 }
2340
2341 static u32 netdev_get_msglevel(struct net_device *dev)
2342 {
2343 struct rhine_private *rp = netdev_priv(dev);
2344
2345 return rp->msg_enable;
2346 }
2347
2348 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2349 {
2350 struct rhine_private *rp = netdev_priv(dev);
2351
2352 rp->msg_enable = value;
2353 }
2354
2355 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2356 {
2357 struct rhine_private *rp = netdev_priv(dev);
2358
2359 if (!(rp->quirks & rqWOL))
2360 return;
2361
2362 spin_lock_irq(&rp->lock);
2363 wol->supported = WAKE_PHY | WAKE_MAGIC |
2364 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2365 wol->wolopts = rp->wolopts;
2366 spin_unlock_irq(&rp->lock);
2367 }
2368
2369 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2370 {
2371 struct rhine_private *rp = netdev_priv(dev);
2372 u32 support = WAKE_PHY | WAKE_MAGIC |
2373 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST;
2374
2375 if (!(rp->quirks & rqWOL))
2376 return -EINVAL;
2377
2378 if (wol->wolopts & ~support)
2379 return -EINVAL;
2380
2381 spin_lock_irq(&rp->lock);
2382 rp->wolopts = wol->wolopts;
2383 spin_unlock_irq(&rp->lock);
2384
2385 return 0;
2386 }
2387
2388 static const struct ethtool_ops netdev_ethtool_ops = {
2389 .get_drvinfo = netdev_get_drvinfo,
2390 .nway_reset = netdev_nway_reset,
2391 .get_link = netdev_get_link,
2392 .get_msglevel = netdev_get_msglevel,
2393 .set_msglevel = netdev_set_msglevel,
2394 .get_wol = rhine_get_wol,
2395 .set_wol = rhine_set_wol,
2396 .get_link_ksettings = netdev_get_link_ksettings,
2397 .set_link_ksettings = netdev_set_link_ksettings,
2398 };
2399
2400 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2401 {
2402 struct rhine_private *rp = netdev_priv(dev);
2403 int rc;
2404
2405 if (!netif_running(dev))
2406 return -EINVAL;
2407
2408 mutex_lock(&rp->task_lock);
2409 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2410 rhine_set_carrier(&rp->mii_if);
2411 mutex_unlock(&rp->task_lock);
2412
2413 return rc;
2414 }
2415
2416 static int rhine_close(struct net_device *dev)
2417 {
2418 struct rhine_private *rp = netdev_priv(dev);
2419 void __iomem *ioaddr = rp->base;
2420
2421 rhine_task_disable(rp);
2422 napi_disable(&rp->napi);
2423 netif_stop_queue(dev);
2424
2425 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2426 ioread16(ioaddr + ChipCmd));
2427
2428
2429 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2430
2431 rhine_irq_disable(rp);
2432
2433
2434 iowrite16(CmdStop, ioaddr + ChipCmd);
2435
2436 free_irq(rp->irq, dev);
2437 free_rbufs(dev);
2438 free_tbufs(dev);
2439 free_ring(dev);
2440
2441 return 0;
2442 }
2443
2444
2445 static void rhine_remove_one_pci(struct pci_dev *pdev)
2446 {
2447 struct net_device *dev = pci_get_drvdata(pdev);
2448 struct rhine_private *rp = netdev_priv(dev);
2449
2450 unregister_netdev(dev);
2451
2452 pci_iounmap(pdev, rp->base);
2453 pci_release_regions(pdev);
2454
2455 free_netdev(dev);
2456 pci_disable_device(pdev);
2457 }
2458
2459 static int rhine_remove_one_platform(struct platform_device *pdev)
2460 {
2461 struct net_device *dev = platform_get_drvdata(pdev);
2462 struct rhine_private *rp = netdev_priv(dev);
2463
2464 unregister_netdev(dev);
2465
2466 iounmap(rp->base);
2467
2468 free_netdev(dev);
2469
2470 return 0;
2471 }
2472
2473 static void rhine_shutdown_pci(struct pci_dev *pdev)
2474 {
2475 struct net_device *dev = pci_get_drvdata(pdev);
2476 struct rhine_private *rp = netdev_priv(dev);
2477 void __iomem *ioaddr = rp->base;
2478
2479 if (!(rp->quirks & rqWOL))
2480 return;
2481
2482 rhine_power_init(dev);
2483
2484
2485 if (rp->quirks & rq6patterns)
2486 iowrite8(0x04, ioaddr + WOLcgClr);
2487
2488 spin_lock(&rp->lock);
2489
2490 if (rp->wolopts & WAKE_MAGIC) {
2491 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2492
2493
2494
2495
2496 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2497 }
2498
2499 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2500 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2501
2502 if (rp->wolopts & WAKE_PHY)
2503 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2504
2505 if (rp->wolopts & WAKE_UCAST)
2506 iowrite8(WOLucast, ioaddr + WOLcrSet);
2507
2508 if (rp->wolopts) {
2509
2510 iowrite8(0x01, ioaddr + PwcfgSet);
2511 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2512 }
2513
2514 spin_unlock(&rp->lock);
2515
2516 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2517 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2518
2519 pci_wake_from_d3(pdev, true);
2520 pci_set_power_state(pdev, PCI_D3hot);
2521 }
2522 }
2523
2524 #ifdef CONFIG_PM_SLEEP
2525 static int rhine_suspend(struct device *device)
2526 {
2527 struct net_device *dev = dev_get_drvdata(device);
2528 struct rhine_private *rp = netdev_priv(dev);
2529
2530 if (!netif_running(dev))
2531 return 0;
2532
2533 rhine_task_disable(rp);
2534 rhine_irq_disable(rp);
2535 napi_disable(&rp->napi);
2536
2537 netif_device_detach(dev);
2538
2539 if (dev_is_pci(device))
2540 rhine_shutdown_pci(to_pci_dev(device));
2541
2542 return 0;
2543 }
2544
2545 static int rhine_resume(struct device *device)
2546 {
2547 struct net_device *dev = dev_get_drvdata(device);
2548 struct rhine_private *rp = netdev_priv(dev);
2549
2550 if (!netif_running(dev))
2551 return 0;
2552
2553 enable_mmio(rp->pioaddr, rp->quirks);
2554 rhine_power_init(dev);
2555 free_tbufs(dev);
2556 alloc_tbufs(dev);
2557 rhine_reset_rbufs(rp);
2558 rhine_task_enable(rp);
2559 spin_lock_bh(&rp->lock);
2560 init_registers(dev);
2561 spin_unlock_bh(&rp->lock);
2562
2563 netif_device_attach(dev);
2564
2565 return 0;
2566 }
2567
2568 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2569 #define RHINE_PM_OPS (&rhine_pm_ops)
2570
2571 #else
2572
2573 #define RHINE_PM_OPS NULL
2574
2575 #endif
2576
2577 static struct pci_driver rhine_driver_pci = {
2578 .name = DRV_NAME,
2579 .id_table = rhine_pci_tbl,
2580 .probe = rhine_init_one_pci,
2581 .remove = rhine_remove_one_pci,
2582 .shutdown = rhine_shutdown_pci,
2583 .driver.pm = RHINE_PM_OPS,
2584 };
2585
2586 static struct platform_driver rhine_driver_platform = {
2587 .probe = rhine_init_one_platform,
2588 .remove = rhine_remove_one_platform,
2589 .driver = {
2590 .name = DRV_NAME,
2591 .of_match_table = rhine_of_tbl,
2592 .pm = RHINE_PM_OPS,
2593 }
2594 };
2595
2596 static const struct dmi_system_id rhine_dmi_table[] __initconst = {
2597 {
2598 .ident = "EPIA-M",
2599 .matches = {
2600 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2601 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2602 },
2603 },
2604 {
2605 .ident = "KV7",
2606 .matches = {
2607 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2608 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2609 },
2610 },
2611 { NULL }
2612 };
2613
2614 static int __init rhine_init(void)
2615 {
2616 int ret_pci, ret_platform;
2617
2618
2619 #ifdef MODULE
2620 pr_info("%s\n", version);
2621 #endif
2622 if (dmi_check_system(rhine_dmi_table)) {
2623
2624 avoid_D3 = true;
2625 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2626 }
2627 else if (avoid_D3)
2628 pr_info("avoid_D3 set\n");
2629
2630 ret_pci = pci_register_driver(&rhine_driver_pci);
2631 ret_platform = platform_driver_register(&rhine_driver_platform);
2632 if ((ret_pci < 0) && (ret_platform < 0))
2633 return ret_pci;
2634
2635 return 0;
2636 }
2637
2638
2639 static void __exit rhine_cleanup(void)
2640 {
2641 platform_driver_unregister(&rhine_driver_platform);
2642 pci_unregister_driver(&rhine_driver_pci);
2643 }
2644
2645
2646 module_init(rhine_init);
2647 module_exit(rhine_cleanup);