This source file includes following definitions.
- cp_set_rxbufsize
- cp_rx_skb
- cp_rx_err_acct
- cp_rx_csum_ok
- cp_rx_poll
- cp_interrupt
- cp_poll_controller
- cp_tx
- cp_tx_vlan_tag
- unwind_tx_frag_mapping
- cp_start_xmit
- __cp_set_rx_mode
- cp_set_rx_mode
- __cp_get_stats
- cp_get_stats
- cp_stop_hw
- cp_reset_hw
- cp_start_hw
- cp_enable_irq
- cp_init_hw
- cp_refill_rx
- cp_init_rings_index
- cp_init_rings
- cp_alloc_rings
- cp_clean_rings
- cp_free_rings
- cp_open
- cp_close
- cp_tx_timeout
- cp_change_mtu
- mdio_read
- mdio_write
- netdev_set_wol
- netdev_get_wol
- cp_get_drvinfo
- cp_get_ringparam
- cp_get_regs_len
- cp_get_sset_count
- cp_get_link_ksettings
- cp_set_link_ksettings
- cp_nway_reset
- cp_get_msglevel
- cp_set_msglevel
- cp_set_features
- cp_get_regs
- cp_get_wol
- cp_set_wol
- cp_get_strings
- cp_get_ethtool_stats
- cp_ioctl
- cp_set_mac_address
- eeprom_cmd_start
- eeprom_cmd
- eeprom_cmd_end
- eeprom_extend_cmd
- read_eeprom
- write_eeprom
- cp_get_eeprom_len
- cp_get_eeprom
- cp_set_eeprom
- cp_set_d3_state
- cp_features_check
- cp_init_one
- cp_remove_one
- cp_suspend
- cp_resume
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
50
51 #define DRV_NAME "8139cp"
52 #define DRV_VERSION "1.3"
53 #define DRV_RELDATE "Mar 22, 2004"
54
55
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
72 #include <linux/in.h>
73 #include <linux/ip.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
77 #include <asm/io.h>
78 #include <asm/irq.h>
79 #include <linux/uaccess.h>
80
81
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
84
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
89
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
93
94
95
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
99
100 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
101 NETIF_MSG_PROBE | \
102 NETIF_MSG_LINK)
103 #define CP_NUM_STATS 14
104 #define CP_STATS_SIZE 64
105 #define CP_REGS_SIZE (0xff + 1)
106 #define CP_REGS_VER 1
107 #define CP_RX_RING_SIZE 64
108 #define CP_TX_RING_SIZE 64
109 #define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
112 CP_STATS_SIZE)
113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
119
120 #define PKT_BUF_SZ 1536
121 #define CP_INTERNAL_PHY 32
122
123
124 #define RX_FIFO_THRESH 5
125 #define RX_DMA_BURST 4
126 #define TX_DMA_BURST 6
127 #define TX_EARLY_THRESH 256
128
129
130 #define TX_TIMEOUT (6*HZ)
131
132
133 #define CP_MIN_MTU 60
134 #define CP_MAX_MTU 4096
135
136 enum {
137
138 MAC0 = 0x00,
139 MAR0 = 0x08,
140 StatsAddr = 0x10,
141 TxRingAddr = 0x20,
142 HiTxRingAddr = 0x28,
143 Cmd = 0x37,
144 IntrMask = 0x3C,
145 IntrStatus = 0x3E,
146 TxConfig = 0x40,
147 ChipVersion = 0x43,
148 RxConfig = 0x44,
149 RxMissed = 0x4C,
150 Cfg9346 = 0x50,
151 Config1 = 0x52,
152 Config3 = 0x59,
153 Config4 = 0x5A,
154 MultiIntr = 0x5C,
155 BasicModeCtrl = 0x62,
156 BasicModeStatus = 0x64,
157 NWayAdvert = 0x66,
158 NWayLPAR = 0x68,
159 NWayExpansion = 0x6A,
160 TxDmaOkLowDesc = 0x82,
161 Config5 = 0xD8,
162 TxPoll = 0xD9,
163 RxMaxSize = 0xDA,
164 CpCmd = 0xE0,
165 IntrMitigate = 0xE2,
166 RxRingAddr = 0xE4,
167 TxThresh = 0xEC,
168 OldRxBufAddr = 0x30,
169 OldTSD0 = 0x10,
170
171
172 DescOwn = (1 << 31),
173 RingEnd = (1 << 30),
174 FirstFrag = (1 << 29),
175 LastFrag = (1 << 28),
176 LargeSend = (1 << 27),
177 MSSShift = 16,
178 MSSMask = 0x7ff,
179 TxError = (1 << 23),
180 RxError = (1 << 20),
181 IPCS = (1 << 18),
182 UDPCS = (1 << 17),
183 TCPCS = (1 << 16),
184 TxVlanTag = (1 << 17),
185 RxVlanTagged = (1 << 16),
186 IPFail = (1 << 15),
187 UDPFail = (1 << 14),
188 TCPFail = (1 << 13),
189 NormalTxPoll = (1 << 6),
190 PID1 = (1 << 17),
191 PID0 = (1 << 16),
192 RxProtoTCP = 1,
193 RxProtoUDP = 2,
194 RxProtoIP = 3,
195 TxFIFOUnder = (1 << 25),
196 TxOWC = (1 << 22),
197 TxLinkFail = (1 << 21),
198 TxMaxCol = (1 << 20),
199 TxColCntShift = 16,
200 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08,
201 RxErrFrame = (1 << 27),
202 RxMcast = (1 << 26),
203 RxErrCRC = (1 << 18),
204 RxErrRunt = (1 << 19),
205 RxErrLong = (1 << 21),
206 RxErrFIFO = (1 << 22),
207
208
209 DumpStats = (1 << 3),
210
211
212 RxCfgFIFOShift = 13,
213 RxCfgDMAShift = 8,
214 AcceptErr = 0x20,
215 AcceptRunt = 0x10,
216 AcceptBroadcast = 0x08,
217 AcceptMulticast = 0x04,
218 AcceptMyPhys = 0x02,
219 AcceptAllPhys = 0x01,
220
221
222 PciErr = (1 << 15),
223 TimerIntr = (1 << 14),
224 LenChg = (1 << 13),
225 SWInt = (1 << 8),
226 TxEmpty = (1 << 7),
227 RxFIFOOvr = (1 << 6),
228 LinkChg = (1 << 5),
229 RxEmpty = (1 << 4),
230 TxErr = (1 << 3),
231 TxOK = (1 << 2),
232 RxErr = (1 << 1),
233 RxOK = (1 << 0),
234 IntrResvd = (1 << 10),
235
236
237 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
238 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
239 RxErr | RxOK | IntrResvd,
240
241
242 CmdReset = (1 << 4),
243 RxOn = (1 << 3),
244 TxOn = (1 << 2),
245
246
247 RxVlanOn = (1 << 6),
248 RxChkSum = (1 << 5),
249 PCIDAC = (1 << 4),
250 PCIMulRW = (1 << 3),
251 CpRxOn = (1 << 1),
252 CpTxOn = (1 << 0),
253
254
255 Cfg9346_Lock = 0x00,
256 Cfg9346_Unlock = 0xC0,
257
258
259 IFG = (1 << 25) | (1 << 24),
260 TxDMAShift = 8,
261
262
263 TxThreshMask = 0x3f,
264 TxThreshMax = 2048,
265
266
267 DriverLoaded = (1 << 5),
268 LWACT = (1 << 4),
269 PMEnable = (1 << 0),
270
271
272 PARMEnable = (1 << 6),
273 MagicPacket = (1 << 5),
274 LinkUp = (1 << 4),
275
276
277 LWPTN = (1 << 1),
278 LWPME = (1 << 4),
279
280
281 BWF = (1 << 6),
282 MWF = (1 << 5),
283 UWF = (1 << 4),
284 LANWake = (1 << 1),
285 PMEStatus = (1 << 0),
286
287 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
288 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
289 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
290 };
291
292 static const unsigned int cp_rx_config =
293 (RX_FIFO_THRESH << RxCfgFIFOShift) |
294 (RX_DMA_BURST << RxCfgDMAShift);
295
296 struct cp_desc {
297 __le32 opts1;
298 __le32 opts2;
299 __le64 addr;
300 };
301
302 struct cp_dma_stats {
303 __le64 tx_ok;
304 __le64 rx_ok;
305 __le64 tx_err;
306 __le32 rx_err;
307 __le16 rx_fifo;
308 __le16 frame_align;
309 __le32 tx_ok_1col;
310 __le32 tx_ok_mcol;
311 __le64 rx_ok_phys;
312 __le64 rx_ok_bcast;
313 __le32 rx_ok_mcast;
314 __le16 tx_abort;
315 __le16 tx_underrun;
316 } __packed;
317
318 struct cp_extra_stats {
319 unsigned long rx_frags;
320 };
321
322 struct cp_private {
323 void __iomem *regs;
324 struct net_device *dev;
325 spinlock_t lock;
326 u32 msg_enable;
327
328 struct napi_struct napi;
329
330 struct pci_dev *pdev;
331 u32 rx_config;
332 u16 cpcmd;
333
334 struct cp_extra_stats cp_stats;
335
336 unsigned rx_head ____cacheline_aligned;
337 unsigned rx_tail;
338 struct cp_desc *rx_ring;
339 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
340
341 unsigned tx_head ____cacheline_aligned;
342 unsigned tx_tail;
343 struct cp_desc *tx_ring;
344 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
345 u32 tx_opts[CP_TX_RING_SIZE];
346
347 unsigned rx_buf_sz;
348 unsigned wol_enabled : 1;
349
350 dma_addr_t ring_dma;
351
352 struct mii_if_info mii_if;
353 };
354
355 #define cpr8(reg) readb(cp->regs + (reg))
356 #define cpr16(reg) readw(cp->regs + (reg))
357 #define cpr32(reg) readl(cp->regs + (reg))
358 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
359 #define cpw16(reg,val) writew((val), cp->regs + (reg))
360 #define cpw32(reg,val) writel((val), cp->regs + (reg))
361 #define cpw8_f(reg,val) do { \
362 writeb((val), cp->regs + (reg)); \
363 readb(cp->regs + (reg)); \
364 } while (0)
365 #define cpw16_f(reg,val) do { \
366 writew((val), cp->regs + (reg)); \
367 readw(cp->regs + (reg)); \
368 } while (0)
369 #define cpw32_f(reg,val) do { \
370 writel((val), cp->regs + (reg)); \
371 readl(cp->regs + (reg)); \
372 } while (0)
373
374
375 static void __cp_set_rx_mode (struct net_device *dev);
376 static void cp_tx (struct cp_private *cp);
377 static void cp_clean_rings (struct cp_private *cp);
378 #ifdef CONFIG_NET_POLL_CONTROLLER
379 static void cp_poll_controller(struct net_device *dev);
380 #endif
381 static int cp_get_eeprom_len(struct net_device *dev);
382 static int cp_get_eeprom(struct net_device *dev,
383 struct ethtool_eeprom *eeprom, u8 *data);
384 static int cp_set_eeprom(struct net_device *dev,
385 struct ethtool_eeprom *eeprom, u8 *data);
386
387 static struct {
388 const char str[ETH_GSTRING_LEN];
389 } ethtool_stats_keys[] = {
390 { "tx_ok" },
391 { "rx_ok" },
392 { "tx_err" },
393 { "rx_err" },
394 { "rx_fifo" },
395 { "frame_align" },
396 { "tx_ok_1col" },
397 { "tx_ok_mcol" },
398 { "rx_ok_phys" },
399 { "rx_ok_bcast" },
400 { "rx_ok_mcast" },
401 { "tx_abort" },
402 { "tx_underrun" },
403 { "rx_frags" },
404 };
405
406
407 static inline void cp_set_rxbufsize (struct cp_private *cp)
408 {
409 unsigned int mtu = cp->dev->mtu;
410
411 if (mtu > ETH_DATA_LEN)
412
413 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
414 else
415 cp->rx_buf_sz = PKT_BUF_SZ;
416 }
417
418 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
419 struct cp_desc *desc)
420 {
421 u32 opts2 = le32_to_cpu(desc->opts2);
422
423 skb->protocol = eth_type_trans (skb, cp->dev);
424
425 cp->dev->stats.rx_packets++;
426 cp->dev->stats.rx_bytes += skb->len;
427
428 if (opts2 & RxVlanTagged)
429 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff));
430
431 napi_gro_receive(&cp->napi, skb);
432 }
433
434 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
435 u32 status, u32 len)
436 {
437 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
438 rx_tail, status, len);
439 cp->dev->stats.rx_errors++;
440 if (status & RxErrFrame)
441 cp->dev->stats.rx_frame_errors++;
442 if (status & RxErrCRC)
443 cp->dev->stats.rx_crc_errors++;
444 if ((status & RxErrRunt) || (status & RxErrLong))
445 cp->dev->stats.rx_length_errors++;
446 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
447 cp->dev->stats.rx_length_errors++;
448 if (status & RxErrFIFO)
449 cp->dev->stats.rx_fifo_errors++;
450 }
451
452 static inline unsigned int cp_rx_csum_ok (u32 status)
453 {
454 unsigned int protocol = (status >> 16) & 0x3;
455
456 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
457 ((protocol == RxProtoUDP) && !(status & UDPFail)))
458 return 1;
459 else
460 return 0;
461 }
462
463 static int cp_rx_poll(struct napi_struct *napi, int budget)
464 {
465 struct cp_private *cp = container_of(napi, struct cp_private, napi);
466 struct net_device *dev = cp->dev;
467 unsigned int rx_tail = cp->rx_tail;
468 int rx = 0;
469
470 cpw16(IntrStatus, cp_rx_intr_mask);
471
472 while (rx < budget) {
473 u32 status, len;
474 dma_addr_t mapping, new_mapping;
475 struct sk_buff *skb, *new_skb;
476 struct cp_desc *desc;
477 const unsigned buflen = cp->rx_buf_sz;
478
479 skb = cp->rx_skb[rx_tail];
480 BUG_ON(!skb);
481
482 desc = &cp->rx_ring[rx_tail];
483 status = le32_to_cpu(desc->opts1);
484 if (status & DescOwn)
485 break;
486
487 len = (status & 0x1fff) - 4;
488 mapping = le64_to_cpu(desc->addr);
489
490 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
491
492
493
494
495
496 cp_rx_err_acct(cp, rx_tail, status, len);
497 dev->stats.rx_dropped++;
498 cp->cp_stats.rx_frags++;
499 goto rx_next;
500 }
501
502 if (status & (RxError | RxErrFIFO)) {
503 cp_rx_err_acct(cp, rx_tail, status, len);
504 goto rx_next;
505 }
506
507 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
508 rx_tail, status, len);
509
510 new_skb = napi_alloc_skb(napi, buflen);
511 if (!new_skb) {
512 dev->stats.rx_dropped++;
513 goto rx_next;
514 }
515
516 new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
517 PCI_DMA_FROMDEVICE);
518 if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
519 dev->stats.rx_dropped++;
520 kfree_skb(new_skb);
521 goto rx_next;
522 }
523
524 dma_unmap_single(&cp->pdev->dev, mapping,
525 buflen, PCI_DMA_FROMDEVICE);
526
527
528 if (cp_rx_csum_ok(status))
529 skb->ip_summed = CHECKSUM_UNNECESSARY;
530 else
531 skb_checksum_none_assert(skb);
532
533 skb_put(skb, len);
534
535 cp->rx_skb[rx_tail] = new_skb;
536
537 cp_rx_skb(cp, skb, desc);
538 rx++;
539 mapping = new_mapping;
540
541 rx_next:
542 cp->rx_ring[rx_tail].opts2 = 0;
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 if (rx_tail == (CP_RX_RING_SIZE - 1))
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
546 cp->rx_buf_sz);
547 else
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 rx_tail = NEXT_RX(rx_tail);
550 }
551
552 cp->rx_tail = rx_tail;
553
554
555
556
557 if (rx < budget && napi_complete_done(napi, rx)) {
558 unsigned long flags;
559
560 spin_lock_irqsave(&cp->lock, flags);
561 cpw16_f(IntrMask, cp_intr_mask);
562 spin_unlock_irqrestore(&cp->lock, flags);
563 }
564
565 return rx;
566 }
567
568 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
569 {
570 struct net_device *dev = dev_instance;
571 struct cp_private *cp;
572 int handled = 0;
573 u16 status;
574 u16 mask;
575
576 if (unlikely(dev == NULL))
577 return IRQ_NONE;
578 cp = netdev_priv(dev);
579
580 spin_lock(&cp->lock);
581
582 mask = cpr16(IntrMask);
583 if (!mask)
584 goto out_unlock;
585
586 status = cpr16(IntrStatus);
587 if (!status || (status == 0xFFFF))
588 goto out_unlock;
589
590 handled = 1;
591
592 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
593 status, cpr8(Cmd), cpr16(CpCmd));
594
595 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
596
597
598 if (unlikely(!netif_running(dev))) {
599 cpw16(IntrMask, 0);
600 goto out_unlock;
601 }
602
603 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
604 if (napi_schedule_prep(&cp->napi)) {
605 cpw16_f(IntrMask, cp_norx_intr_mask);
606 __napi_schedule(&cp->napi);
607 }
608
609 if (status & (TxOK | TxErr | TxEmpty | SWInt))
610 cp_tx(cp);
611 if (status & LinkChg)
612 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
613
614
615 if (status & PciErr) {
616 u16 pci_status;
617
618 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
619 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
620 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
621 status, pci_status);
622
623
624 }
625
626 out_unlock:
627 spin_unlock(&cp->lock);
628
629 return IRQ_RETVAL(handled);
630 }
631
632 #ifdef CONFIG_NET_POLL_CONTROLLER
633
634
635
636
637 static void cp_poll_controller(struct net_device *dev)
638 {
639 struct cp_private *cp = netdev_priv(dev);
640 const int irq = cp->pdev->irq;
641
642 disable_irq(irq);
643 cp_interrupt(irq, dev);
644 enable_irq(irq);
645 }
646 #endif
647
648 static void cp_tx (struct cp_private *cp)
649 {
650 unsigned tx_head = cp->tx_head;
651 unsigned tx_tail = cp->tx_tail;
652 unsigned bytes_compl = 0, pkts_compl = 0;
653
654 while (tx_tail != tx_head) {
655 struct cp_desc *txd = cp->tx_ring + tx_tail;
656 struct sk_buff *skb;
657 u32 status;
658
659 rmb();
660 status = le32_to_cpu(txd->opts1);
661 if (status & DescOwn)
662 break;
663
664 skb = cp->tx_skb[tx_tail];
665 BUG_ON(!skb);
666
667 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
668 cp->tx_opts[tx_tail] & 0xffff,
669 PCI_DMA_TODEVICE);
670
671 if (status & LastFrag) {
672 if (status & (TxError | TxFIFOUnder)) {
673 netif_dbg(cp, tx_err, cp->dev,
674 "tx err, status 0x%x\n", status);
675 cp->dev->stats.tx_errors++;
676 if (status & TxOWC)
677 cp->dev->stats.tx_window_errors++;
678 if (status & TxMaxCol)
679 cp->dev->stats.tx_aborted_errors++;
680 if (status & TxLinkFail)
681 cp->dev->stats.tx_carrier_errors++;
682 if (status & TxFIFOUnder)
683 cp->dev->stats.tx_fifo_errors++;
684 } else {
685 cp->dev->stats.collisions +=
686 ((status >> TxColCntShift) & TxColCntMask);
687 cp->dev->stats.tx_packets++;
688 cp->dev->stats.tx_bytes += skb->len;
689 netif_dbg(cp, tx_done, cp->dev,
690 "tx done, slot %d\n", tx_tail);
691 }
692 bytes_compl += skb->len;
693 pkts_compl++;
694 dev_consume_skb_irq(skb);
695 }
696
697 cp->tx_skb[tx_tail] = NULL;
698
699 tx_tail = NEXT_TX(tx_tail);
700 }
701
702 cp->tx_tail = tx_tail;
703
704 netdev_completed_queue(cp->dev, pkts_compl, bytes_compl);
705 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
706 netif_wake_queue(cp->dev);
707 }
708
709 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
710 {
711 return skb_vlan_tag_present(skb) ?
712 TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00;
713 }
714
715 static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
716 int first, int entry_last)
717 {
718 int frag, index;
719 struct cp_desc *txd;
720 skb_frag_t *this_frag;
721 for (frag = 0; frag+first < entry_last; frag++) {
722 index = first+frag;
723 cp->tx_skb[index] = NULL;
724 txd = &cp->tx_ring[index];
725 this_frag = &skb_shinfo(skb)->frags[frag];
726 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
727 skb_frag_size(this_frag), PCI_DMA_TODEVICE);
728 }
729 }
730
731 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
732 struct net_device *dev)
733 {
734 struct cp_private *cp = netdev_priv(dev);
735 unsigned entry;
736 u32 eor, opts1;
737 unsigned long intr_flags;
738 __le32 opts2;
739 int mss = 0;
740
741 spin_lock_irqsave(&cp->lock, intr_flags);
742
743
744 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
745 netif_stop_queue(dev);
746 spin_unlock_irqrestore(&cp->lock, intr_flags);
747 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
748 return NETDEV_TX_BUSY;
749 }
750
751 entry = cp->tx_head;
752 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
753 mss = skb_shinfo(skb)->gso_size;
754
755 if (mss > MSSMask) {
756 netdev_WARN_ONCE(dev, "Net bug: GSO size %d too large for 8139CP\n",
757 mss);
758 goto out_dma_error;
759 }
760
761 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
762 opts1 = DescOwn;
763 if (mss)
764 opts1 |= LargeSend | (mss << MSSShift);
765 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
766 const struct iphdr *ip = ip_hdr(skb);
767 if (ip->protocol == IPPROTO_TCP)
768 opts1 |= IPCS | TCPCS;
769 else if (ip->protocol == IPPROTO_UDP)
770 opts1 |= IPCS | UDPCS;
771 else {
772 WARN_ONCE(1,
773 "Net bug: asked to checksum invalid Legacy IP packet\n");
774 goto out_dma_error;
775 }
776 }
777
778 if (skb_shinfo(skb)->nr_frags == 0) {
779 struct cp_desc *txd = &cp->tx_ring[entry];
780 u32 len;
781 dma_addr_t mapping;
782
783 len = skb->len;
784 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
785 if (dma_mapping_error(&cp->pdev->dev, mapping))
786 goto out_dma_error;
787
788 txd->opts2 = opts2;
789 txd->addr = cpu_to_le64(mapping);
790 wmb();
791
792 opts1 |= eor | len | FirstFrag | LastFrag;
793
794 txd->opts1 = cpu_to_le32(opts1);
795 wmb();
796
797 cp->tx_skb[entry] = skb;
798 cp->tx_opts[entry] = opts1;
799 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
800 entry, skb->len);
801 } else {
802 struct cp_desc *txd;
803 u32 first_len, first_eor, ctrl;
804 dma_addr_t first_mapping;
805 int frag, first_entry = entry;
806
807
808
809
810 first_eor = eor;
811 first_len = skb_headlen(skb);
812 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
813 first_len, PCI_DMA_TODEVICE);
814 if (dma_mapping_error(&cp->pdev->dev, first_mapping))
815 goto out_dma_error;
816
817 cp->tx_skb[entry] = skb;
818
819 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
820 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
821 u32 len;
822 dma_addr_t mapping;
823
824 entry = NEXT_TX(entry);
825
826 len = skb_frag_size(this_frag);
827 mapping = dma_map_single(&cp->pdev->dev,
828 skb_frag_address(this_frag),
829 len, PCI_DMA_TODEVICE);
830 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
831 unwind_tx_frag_mapping(cp, skb, first_entry, entry);
832 goto out_dma_error;
833 }
834
835 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
836
837 ctrl = opts1 | eor | len;
838
839 if (frag == skb_shinfo(skb)->nr_frags - 1)
840 ctrl |= LastFrag;
841
842 txd = &cp->tx_ring[entry];
843 txd->opts2 = opts2;
844 txd->addr = cpu_to_le64(mapping);
845 wmb();
846
847 txd->opts1 = cpu_to_le32(ctrl);
848 wmb();
849
850 cp->tx_opts[entry] = ctrl;
851 cp->tx_skb[entry] = skb;
852 }
853
854 txd = &cp->tx_ring[first_entry];
855 txd->opts2 = opts2;
856 txd->addr = cpu_to_le64(first_mapping);
857 wmb();
858
859 ctrl = opts1 | first_eor | first_len | FirstFrag;
860 txd->opts1 = cpu_to_le32(ctrl);
861 wmb();
862
863 cp->tx_opts[first_entry] = ctrl;
864 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
865 first_entry, entry, skb->len);
866 }
867 cp->tx_head = NEXT_TX(entry);
868
869 netdev_sent_queue(dev, skb->len);
870 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
871 netif_stop_queue(dev);
872
873 out_unlock:
874 spin_unlock_irqrestore(&cp->lock, intr_flags);
875
876 cpw8(TxPoll, NormalTxPoll);
877
878 return NETDEV_TX_OK;
879 out_dma_error:
880 dev_kfree_skb_any(skb);
881 cp->dev->stats.tx_dropped++;
882 goto out_unlock;
883 }
884
885
886
887
888 static void __cp_set_rx_mode (struct net_device *dev)
889 {
890 struct cp_private *cp = netdev_priv(dev);
891 u32 mc_filter[2];
892 int rx_mode;
893
894
895 if (dev->flags & IFF_PROMISC) {
896
897 rx_mode =
898 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
899 AcceptAllPhys;
900 mc_filter[1] = mc_filter[0] = 0xffffffff;
901 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
902 (dev->flags & IFF_ALLMULTI)) {
903
904 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
905 mc_filter[1] = mc_filter[0] = 0xffffffff;
906 } else {
907 struct netdev_hw_addr *ha;
908 rx_mode = AcceptBroadcast | AcceptMyPhys;
909 mc_filter[1] = mc_filter[0] = 0;
910 netdev_for_each_mc_addr(ha, dev) {
911 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
912
913 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
914 rx_mode |= AcceptMulticast;
915 }
916 }
917
918
919 cp->rx_config = cp_rx_config | rx_mode;
920 cpw32_f(RxConfig, cp->rx_config);
921
922 cpw32_f (MAR0 + 0, mc_filter[0]);
923 cpw32_f (MAR0 + 4, mc_filter[1]);
924 }
925
926 static void cp_set_rx_mode (struct net_device *dev)
927 {
928 unsigned long flags;
929 struct cp_private *cp = netdev_priv(dev);
930
931 spin_lock_irqsave (&cp->lock, flags);
932 __cp_set_rx_mode(dev);
933 spin_unlock_irqrestore (&cp->lock, flags);
934 }
935
936 static void __cp_get_stats(struct cp_private *cp)
937 {
938
939 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
940 cpw32 (RxMissed, 0);
941 }
942
943 static struct net_device_stats *cp_get_stats(struct net_device *dev)
944 {
945 struct cp_private *cp = netdev_priv(dev);
946 unsigned long flags;
947
948
949 spin_lock_irqsave(&cp->lock, flags);
950 if (netif_running(dev) && netif_device_present(dev))
951 __cp_get_stats(cp);
952 spin_unlock_irqrestore(&cp->lock, flags);
953
954 return &dev->stats;
955 }
956
957 static void cp_stop_hw (struct cp_private *cp)
958 {
959 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
960 cpw16_f(IntrMask, 0);
961 cpw8(Cmd, 0);
962 cpw16_f(CpCmd, 0);
963 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
964
965 cp->rx_tail = 0;
966 cp->tx_head = cp->tx_tail = 0;
967
968 netdev_reset_queue(cp->dev);
969 }
970
971 static void cp_reset_hw (struct cp_private *cp)
972 {
973 unsigned work = 1000;
974
975 cpw8(Cmd, CmdReset);
976
977 while (work--) {
978 if (!(cpr8(Cmd) & CmdReset))
979 return;
980
981 schedule_timeout_uninterruptible(10);
982 }
983
984 netdev_err(cp->dev, "hardware reset timeout\n");
985 }
986
987 static inline void cp_start_hw (struct cp_private *cp)
988 {
989 dma_addr_t ring_dma;
990
991 cpw16(CpCmd, cp->cpcmd);
992
993
994
995
996
997
998
999
1000
1001 cpw32_f(HiTxRingAddr, 0);
1002 cpw32_f(HiTxRingAddr + 4, 0);
1003
1004 ring_dma = cp->ring_dma;
1005 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1006 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1007
1008 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1009 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1010 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1011
1012
1013
1014
1015
1016
1017
1018 cpw8(Cmd, RxOn | TxOn);
1019
1020 netdev_reset_queue(cp->dev);
1021 }
1022
1023 static void cp_enable_irq(struct cp_private *cp)
1024 {
1025 cpw16_f(IntrMask, cp_intr_mask);
1026 }
1027
1028 static void cp_init_hw (struct cp_private *cp)
1029 {
1030 struct net_device *dev = cp->dev;
1031
1032 cp_reset_hw(cp);
1033
1034 cpw8_f (Cfg9346, Cfg9346_Unlock);
1035
1036
1037 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1038 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1039
1040 cp_start_hw(cp);
1041 cpw8(TxThresh, 0x06);
1042
1043 __cp_set_rx_mode(dev);
1044 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
1045
1046 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
1047
1048 cpw8(Config3, PARMEnable);
1049 cp->wol_enabled = 0;
1050
1051 cpw8(Config5, cpr8(Config5) & PMEStatus);
1052
1053 cpw16(MultiIntr, 0);
1054
1055 cpw8_f(Cfg9346, Cfg9346_Lock);
1056 }
1057
1058 static int cp_refill_rx(struct cp_private *cp)
1059 {
1060 struct net_device *dev = cp->dev;
1061 unsigned i;
1062
1063 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1064 struct sk_buff *skb;
1065 dma_addr_t mapping;
1066
1067 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1068 if (!skb)
1069 goto err_out;
1070
1071 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1072 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1073 if (dma_mapping_error(&cp->pdev->dev, mapping)) {
1074 kfree_skb(skb);
1075 goto err_out;
1076 }
1077 cp->rx_skb[i] = skb;
1078
1079 cp->rx_ring[i].opts2 = 0;
1080 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1081 if (i == (CP_RX_RING_SIZE - 1))
1082 cp->rx_ring[i].opts1 =
1083 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1084 else
1085 cp->rx_ring[i].opts1 =
1086 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1087 }
1088
1089 return 0;
1090
1091 err_out:
1092 cp_clean_rings(cp);
1093 return -ENOMEM;
1094 }
1095
1096 static void cp_init_rings_index (struct cp_private *cp)
1097 {
1098 cp->rx_tail = 0;
1099 cp->tx_head = cp->tx_tail = 0;
1100 }
1101
1102 static int cp_init_rings (struct cp_private *cp)
1103 {
1104 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1105 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1106 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1107
1108 cp_init_rings_index(cp);
1109
1110 return cp_refill_rx (cp);
1111 }
1112
1113 static int cp_alloc_rings (struct cp_private *cp)
1114 {
1115 struct device *d = &cp->pdev->dev;
1116 void *mem;
1117 int rc;
1118
1119 mem = dma_alloc_coherent(d, CP_RING_BYTES, &cp->ring_dma, GFP_KERNEL);
1120 if (!mem)
1121 return -ENOMEM;
1122
1123 cp->rx_ring = mem;
1124 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1125
1126 rc = cp_init_rings(cp);
1127 if (rc < 0)
1128 dma_free_coherent(d, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
1129
1130 return rc;
1131 }
1132
1133 static void cp_clean_rings (struct cp_private *cp)
1134 {
1135 struct cp_desc *desc;
1136 unsigned i;
1137
1138 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1139 if (cp->rx_skb[i]) {
1140 desc = cp->rx_ring + i;
1141 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1142 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1143 dev_kfree_skb_any(cp->rx_skb[i]);
1144 }
1145 }
1146
1147 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1148 if (cp->tx_skb[i]) {
1149 struct sk_buff *skb = cp->tx_skb[i];
1150
1151 desc = cp->tx_ring + i;
1152 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1153 le32_to_cpu(desc->opts1) & 0xffff,
1154 PCI_DMA_TODEVICE);
1155 if (le32_to_cpu(desc->opts1) & LastFrag)
1156 dev_kfree_skb_any(skb);
1157 cp->dev->stats.tx_dropped++;
1158 }
1159 }
1160 netdev_reset_queue(cp->dev);
1161
1162 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1163 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1164 memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
1165
1166 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1167 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1168 }
1169
1170 static void cp_free_rings (struct cp_private *cp)
1171 {
1172 cp_clean_rings(cp);
1173 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1174 cp->ring_dma);
1175 cp->rx_ring = NULL;
1176 cp->tx_ring = NULL;
1177 }
1178
1179 static int cp_open (struct net_device *dev)
1180 {
1181 struct cp_private *cp = netdev_priv(dev);
1182 const int irq = cp->pdev->irq;
1183 int rc;
1184
1185 netif_dbg(cp, ifup, dev, "enabling interface\n");
1186
1187 rc = cp_alloc_rings(cp);
1188 if (rc)
1189 return rc;
1190
1191 napi_enable(&cp->napi);
1192
1193 cp_init_hw(cp);
1194
1195 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1196 if (rc)
1197 goto err_out_hw;
1198
1199 cp_enable_irq(cp);
1200
1201 netif_carrier_off(dev);
1202 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1203 netif_start_queue(dev);
1204
1205 return 0;
1206
1207 err_out_hw:
1208 napi_disable(&cp->napi);
1209 cp_stop_hw(cp);
1210 cp_free_rings(cp);
1211 return rc;
1212 }
1213
1214 static int cp_close (struct net_device *dev)
1215 {
1216 struct cp_private *cp = netdev_priv(dev);
1217 unsigned long flags;
1218
1219 napi_disable(&cp->napi);
1220
1221 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1222
1223 spin_lock_irqsave(&cp->lock, flags);
1224
1225 netif_stop_queue(dev);
1226 netif_carrier_off(dev);
1227
1228 cp_stop_hw(cp);
1229
1230 spin_unlock_irqrestore(&cp->lock, flags);
1231
1232 free_irq(cp->pdev->irq, dev);
1233
1234 cp_free_rings(cp);
1235 return 0;
1236 }
1237
1238 static void cp_tx_timeout(struct net_device *dev)
1239 {
1240 struct cp_private *cp = netdev_priv(dev);
1241 unsigned long flags;
1242 int rc, i;
1243
1244 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1245 cpr8(Cmd), cpr16(CpCmd),
1246 cpr16(IntrStatus), cpr16(IntrMask));
1247
1248 spin_lock_irqsave(&cp->lock, flags);
1249
1250 netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
1251 cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
1252 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1253 netif_dbg(cp, tx_err, cp->dev,
1254 "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
1255 i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
1256 cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
1257 le64_to_cpu(cp->tx_ring[i].addr),
1258 cp->tx_skb[i]);
1259 }
1260
1261 cp_stop_hw(cp);
1262 cp_clean_rings(cp);
1263 rc = cp_init_rings(cp);
1264 cp_start_hw(cp);
1265 __cp_set_rx_mode(dev);
1266 cpw16_f(IntrMask, cp_norx_intr_mask);
1267
1268 netif_wake_queue(dev);
1269 napi_schedule_irqoff(&cp->napi);
1270
1271 spin_unlock_irqrestore(&cp->lock, flags);
1272 }
1273
1274 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1275 {
1276 struct cp_private *cp = netdev_priv(dev);
1277
1278
1279 if (!netif_running(dev)) {
1280 dev->mtu = new_mtu;
1281 cp_set_rxbufsize(cp);
1282 return 0;
1283 }
1284
1285
1286 cp_close(dev);
1287 dev->mtu = new_mtu;
1288 cp_set_rxbufsize(cp);
1289 return cp_open(dev);
1290 }
1291
1292 static const char mii_2_8139_map[8] = {
1293 BasicModeCtrl,
1294 BasicModeStatus,
1295 0,
1296 0,
1297 NWayAdvert,
1298 NWayLPAR,
1299 NWayExpansion,
1300 0
1301 };
1302
1303 static int mdio_read(struct net_device *dev, int phy_id, int location)
1304 {
1305 struct cp_private *cp = netdev_priv(dev);
1306
1307 return location < 8 && mii_2_8139_map[location] ?
1308 readw(cp->regs + mii_2_8139_map[location]) : 0;
1309 }
1310
1311
1312 static void mdio_write(struct net_device *dev, int phy_id, int location,
1313 int value)
1314 {
1315 struct cp_private *cp = netdev_priv(dev);
1316
1317 if (location == 0) {
1318 cpw8(Cfg9346, Cfg9346_Unlock);
1319 cpw16(BasicModeCtrl, value);
1320 cpw8(Cfg9346, Cfg9346_Lock);
1321 } else if (location < 8 && mii_2_8139_map[location])
1322 cpw16(mii_2_8139_map[location], value);
1323 }
1324
1325
1326 static int netdev_set_wol (struct cp_private *cp,
1327 const struct ethtool_wolinfo *wol)
1328 {
1329 u8 options;
1330
1331 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1332
1333 if (wol->wolopts) {
1334 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1335 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1336 }
1337
1338 cpw8 (Cfg9346, Cfg9346_Unlock);
1339 cpw8 (Config3, options);
1340 cpw8 (Cfg9346, Cfg9346_Lock);
1341
1342 options = 0;
1343 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1344
1345 if (wol->wolopts) {
1346 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1347 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1348 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1349 }
1350
1351 cpw8 (Config5, options);
1352
1353 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1354
1355 return 0;
1356 }
1357
1358
1359 static void netdev_get_wol (struct cp_private *cp,
1360 struct ethtool_wolinfo *wol)
1361 {
1362 u8 options;
1363
1364 wol->wolopts = 0;
1365 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1366 WAKE_MCAST | WAKE_UCAST;
1367
1368 if (!cp->wol_enabled) return;
1369
1370 options = cpr8 (Config3);
1371 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1372 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1373
1374 options = 0;
1375 options = cpr8 (Config5);
1376 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1377 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1378 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1379 }
1380
1381 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1382 {
1383 struct cp_private *cp = netdev_priv(dev);
1384
1385 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1386 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1387 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1388 }
1389
1390 static void cp_get_ringparam(struct net_device *dev,
1391 struct ethtool_ringparam *ring)
1392 {
1393 ring->rx_max_pending = CP_RX_RING_SIZE;
1394 ring->tx_max_pending = CP_TX_RING_SIZE;
1395 ring->rx_pending = CP_RX_RING_SIZE;
1396 ring->tx_pending = CP_TX_RING_SIZE;
1397 }
1398
1399 static int cp_get_regs_len(struct net_device *dev)
1400 {
1401 return CP_REGS_SIZE;
1402 }
1403
1404 static int cp_get_sset_count (struct net_device *dev, int sset)
1405 {
1406 switch (sset) {
1407 case ETH_SS_STATS:
1408 return CP_NUM_STATS;
1409 default:
1410 return -EOPNOTSUPP;
1411 }
1412 }
1413
1414 static int cp_get_link_ksettings(struct net_device *dev,
1415 struct ethtool_link_ksettings *cmd)
1416 {
1417 struct cp_private *cp = netdev_priv(dev);
1418 unsigned long flags;
1419
1420 spin_lock_irqsave(&cp->lock, flags);
1421 mii_ethtool_get_link_ksettings(&cp->mii_if, cmd);
1422 spin_unlock_irqrestore(&cp->lock, flags);
1423
1424 return 0;
1425 }
1426
1427 static int cp_set_link_ksettings(struct net_device *dev,
1428 const struct ethtool_link_ksettings *cmd)
1429 {
1430 struct cp_private *cp = netdev_priv(dev);
1431 int rc;
1432 unsigned long flags;
1433
1434 spin_lock_irqsave(&cp->lock, flags);
1435 rc = mii_ethtool_set_link_ksettings(&cp->mii_if, cmd);
1436 spin_unlock_irqrestore(&cp->lock, flags);
1437
1438 return rc;
1439 }
1440
1441 static int cp_nway_reset(struct net_device *dev)
1442 {
1443 struct cp_private *cp = netdev_priv(dev);
1444 return mii_nway_restart(&cp->mii_if);
1445 }
1446
1447 static u32 cp_get_msglevel(struct net_device *dev)
1448 {
1449 struct cp_private *cp = netdev_priv(dev);
1450 return cp->msg_enable;
1451 }
1452
1453 static void cp_set_msglevel(struct net_device *dev, u32 value)
1454 {
1455 struct cp_private *cp = netdev_priv(dev);
1456 cp->msg_enable = value;
1457 }
1458
1459 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1460 {
1461 struct cp_private *cp = netdev_priv(dev);
1462 unsigned long flags;
1463
1464 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1465 return 0;
1466
1467 spin_lock_irqsave(&cp->lock, flags);
1468
1469 if (features & NETIF_F_RXCSUM)
1470 cp->cpcmd |= RxChkSum;
1471 else
1472 cp->cpcmd &= ~RxChkSum;
1473
1474 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1475 cp->cpcmd |= RxVlanOn;
1476 else
1477 cp->cpcmd &= ~RxVlanOn;
1478
1479 cpw16_f(CpCmd, cp->cpcmd);
1480 spin_unlock_irqrestore(&cp->lock, flags);
1481
1482 return 0;
1483 }
1484
1485 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1486 void *p)
1487 {
1488 struct cp_private *cp = netdev_priv(dev);
1489 unsigned long flags;
1490
1491 if (regs->len < CP_REGS_SIZE)
1492 return ;
1493
1494 regs->version = CP_REGS_VER;
1495
1496 spin_lock_irqsave(&cp->lock, flags);
1497 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1498 spin_unlock_irqrestore(&cp->lock, flags);
1499 }
1500
1501 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1502 {
1503 struct cp_private *cp = netdev_priv(dev);
1504 unsigned long flags;
1505
1506 spin_lock_irqsave (&cp->lock, flags);
1507 netdev_get_wol (cp, wol);
1508 spin_unlock_irqrestore (&cp->lock, flags);
1509 }
1510
1511 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1512 {
1513 struct cp_private *cp = netdev_priv(dev);
1514 unsigned long flags;
1515 int rc;
1516
1517 spin_lock_irqsave (&cp->lock, flags);
1518 rc = netdev_set_wol (cp, wol);
1519 spin_unlock_irqrestore (&cp->lock, flags);
1520
1521 return rc;
1522 }
1523
1524 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1525 {
1526 switch (stringset) {
1527 case ETH_SS_STATS:
1528 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1529 break;
1530 default:
1531 BUG();
1532 break;
1533 }
1534 }
1535
1536 static void cp_get_ethtool_stats (struct net_device *dev,
1537 struct ethtool_stats *estats, u64 *tmp_stats)
1538 {
1539 struct cp_private *cp = netdev_priv(dev);
1540 struct cp_dma_stats *nic_stats;
1541 dma_addr_t dma;
1542 int i;
1543
1544 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1545 &dma, GFP_KERNEL);
1546 if (!nic_stats)
1547 return;
1548
1549
1550 cpw32(StatsAddr + 4, (u64)dma >> 32);
1551 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1552 cpr32(StatsAddr);
1553
1554 for (i = 0; i < 1000; i++) {
1555 if ((cpr32(StatsAddr) & DumpStats) == 0)
1556 break;
1557 udelay(10);
1558 }
1559 cpw32(StatsAddr, 0);
1560 cpw32(StatsAddr + 4, 0);
1561 cpr32(StatsAddr);
1562
1563 i = 0;
1564 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1565 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1566 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1567 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1568 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1569 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1570 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1571 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1572 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1573 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1574 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1575 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1576 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1577 tmp_stats[i++] = cp->cp_stats.rx_frags;
1578 BUG_ON(i != CP_NUM_STATS);
1579
1580 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1581 }
1582
1583 static const struct ethtool_ops cp_ethtool_ops = {
1584 .get_drvinfo = cp_get_drvinfo,
1585 .get_regs_len = cp_get_regs_len,
1586 .get_sset_count = cp_get_sset_count,
1587 .nway_reset = cp_nway_reset,
1588 .get_link = ethtool_op_get_link,
1589 .get_msglevel = cp_get_msglevel,
1590 .set_msglevel = cp_set_msglevel,
1591 .get_regs = cp_get_regs,
1592 .get_wol = cp_get_wol,
1593 .set_wol = cp_set_wol,
1594 .get_strings = cp_get_strings,
1595 .get_ethtool_stats = cp_get_ethtool_stats,
1596 .get_eeprom_len = cp_get_eeprom_len,
1597 .get_eeprom = cp_get_eeprom,
1598 .set_eeprom = cp_set_eeprom,
1599 .get_ringparam = cp_get_ringparam,
1600 .get_link_ksettings = cp_get_link_ksettings,
1601 .set_link_ksettings = cp_set_link_ksettings,
1602 };
1603
1604 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1605 {
1606 struct cp_private *cp = netdev_priv(dev);
1607 int rc;
1608 unsigned long flags;
1609
1610 if (!netif_running(dev))
1611 return -EINVAL;
1612
1613 spin_lock_irqsave(&cp->lock, flags);
1614 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1615 spin_unlock_irqrestore(&cp->lock, flags);
1616 return rc;
1617 }
1618
1619 static int cp_set_mac_address(struct net_device *dev, void *p)
1620 {
1621 struct cp_private *cp = netdev_priv(dev);
1622 struct sockaddr *addr = p;
1623
1624 if (!is_valid_ether_addr(addr->sa_data))
1625 return -EADDRNOTAVAIL;
1626
1627 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1628
1629 spin_lock_irq(&cp->lock);
1630
1631 cpw8_f(Cfg9346, Cfg9346_Unlock);
1632 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1633 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1634 cpw8_f(Cfg9346, Cfg9346_Lock);
1635
1636 spin_unlock_irq(&cp->lock);
1637
1638 return 0;
1639 }
1640
1641
1642
1643
1644 #define EE_SHIFT_CLK 0x04
1645 #define EE_CS 0x08
1646 #define EE_DATA_WRITE 0x02
1647 #define EE_WRITE_0 0x00
1648 #define EE_WRITE_1 0x02
1649 #define EE_DATA_READ 0x01
1650 #define EE_ENB (0x80 | EE_CS)
1651
1652
1653
1654
1655
1656 #define eeprom_delay() readb(ee_addr)
1657
1658
1659 #define EE_EXTEND_CMD (4)
1660 #define EE_WRITE_CMD (5)
1661 #define EE_READ_CMD (6)
1662 #define EE_ERASE_CMD (7)
1663
1664 #define EE_EWDS_ADDR (0)
1665 #define EE_WRAL_ADDR (1)
1666 #define EE_ERAL_ADDR (2)
1667 #define EE_EWEN_ADDR (3)
1668
1669 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1670
1671 static void eeprom_cmd_start(void __iomem *ee_addr)
1672 {
1673 writeb (EE_ENB & ~EE_CS, ee_addr);
1674 writeb (EE_ENB, ee_addr);
1675 eeprom_delay ();
1676 }
1677
1678 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1679 {
1680 int i;
1681
1682
1683 for (i = cmd_len - 1; i >= 0; i--) {
1684 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1685 writeb (EE_ENB | dataval, ee_addr);
1686 eeprom_delay ();
1687 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1688 eeprom_delay ();
1689 }
1690 writeb (EE_ENB, ee_addr);
1691 eeprom_delay ();
1692 }
1693
1694 static void eeprom_cmd_end(void __iomem *ee_addr)
1695 {
1696 writeb(0, ee_addr);
1697 eeprom_delay ();
1698 }
1699
1700 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1701 int addr_len)
1702 {
1703 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1704
1705 eeprom_cmd_start(ee_addr);
1706 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1707 eeprom_cmd_end(ee_addr);
1708 }
1709
1710 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1711 {
1712 int i;
1713 u16 retval = 0;
1714 void __iomem *ee_addr = ioaddr + Cfg9346;
1715 int read_cmd = location | (EE_READ_CMD << addr_len);
1716
1717 eeprom_cmd_start(ee_addr);
1718 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1719
1720 for (i = 16; i > 0; i--) {
1721 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1722 eeprom_delay ();
1723 retval =
1724 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1725 0);
1726 writeb (EE_ENB, ee_addr);
1727 eeprom_delay ();
1728 }
1729
1730 eeprom_cmd_end(ee_addr);
1731
1732 return retval;
1733 }
1734
1735 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1736 int addr_len)
1737 {
1738 int i;
1739 void __iomem *ee_addr = ioaddr + Cfg9346;
1740 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1741
1742 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1743
1744 eeprom_cmd_start(ee_addr);
1745 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1746 eeprom_cmd(ee_addr, val, 16);
1747 eeprom_cmd_end(ee_addr);
1748
1749 eeprom_cmd_start(ee_addr);
1750 for (i = 0; i < 20000; i++)
1751 if (readb(ee_addr) & EE_DATA_READ)
1752 break;
1753 eeprom_cmd_end(ee_addr);
1754
1755 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1756 }
1757
1758 static int cp_get_eeprom_len(struct net_device *dev)
1759 {
1760 struct cp_private *cp = netdev_priv(dev);
1761 int size;
1762
1763 spin_lock_irq(&cp->lock);
1764 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1765 spin_unlock_irq(&cp->lock);
1766
1767 return size;
1768 }
1769
1770 static int cp_get_eeprom(struct net_device *dev,
1771 struct ethtool_eeprom *eeprom, u8 *data)
1772 {
1773 struct cp_private *cp = netdev_priv(dev);
1774 unsigned int addr_len;
1775 u16 val;
1776 u32 offset = eeprom->offset >> 1;
1777 u32 len = eeprom->len;
1778 u32 i = 0;
1779
1780 eeprom->magic = CP_EEPROM_MAGIC;
1781
1782 spin_lock_irq(&cp->lock);
1783
1784 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1785
1786 if (eeprom->offset & 1) {
1787 val = read_eeprom(cp->regs, offset, addr_len);
1788 data[i++] = (u8)(val >> 8);
1789 offset++;
1790 }
1791
1792 while (i < len - 1) {
1793 val = read_eeprom(cp->regs, offset, addr_len);
1794 data[i++] = (u8)val;
1795 data[i++] = (u8)(val >> 8);
1796 offset++;
1797 }
1798
1799 if (i < len) {
1800 val = read_eeprom(cp->regs, offset, addr_len);
1801 data[i] = (u8)val;
1802 }
1803
1804 spin_unlock_irq(&cp->lock);
1805 return 0;
1806 }
1807
1808 static int cp_set_eeprom(struct net_device *dev,
1809 struct ethtool_eeprom *eeprom, u8 *data)
1810 {
1811 struct cp_private *cp = netdev_priv(dev);
1812 unsigned int addr_len;
1813 u16 val;
1814 u32 offset = eeprom->offset >> 1;
1815 u32 len = eeprom->len;
1816 u32 i = 0;
1817
1818 if (eeprom->magic != CP_EEPROM_MAGIC)
1819 return -EINVAL;
1820
1821 spin_lock_irq(&cp->lock);
1822
1823 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1824
1825 if (eeprom->offset & 1) {
1826 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1827 val |= (u16)data[i++] << 8;
1828 write_eeprom(cp->regs, offset, val, addr_len);
1829 offset++;
1830 }
1831
1832 while (i < len - 1) {
1833 val = (u16)data[i++];
1834 val |= (u16)data[i++] << 8;
1835 write_eeprom(cp->regs, offset, val, addr_len);
1836 offset++;
1837 }
1838
1839 if (i < len) {
1840 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1841 val |= (u16)data[i];
1842 write_eeprom(cp->regs, offset, val, addr_len);
1843 }
1844
1845 spin_unlock_irq(&cp->lock);
1846 return 0;
1847 }
1848
1849
1850 static void cp_set_d3_state (struct cp_private *cp)
1851 {
1852 pci_enable_wake(cp->pdev, PCI_D0, 1);
1853 pci_set_power_state (cp->pdev, PCI_D3hot);
1854 }
1855
1856 static netdev_features_t cp_features_check(struct sk_buff *skb,
1857 struct net_device *dev,
1858 netdev_features_t features)
1859 {
1860 if (skb_shinfo(skb)->gso_size > MSSMask)
1861 features &= ~NETIF_F_TSO;
1862
1863 return vlan_features_check(skb, features);
1864 }
1865 static const struct net_device_ops cp_netdev_ops = {
1866 .ndo_open = cp_open,
1867 .ndo_stop = cp_close,
1868 .ndo_validate_addr = eth_validate_addr,
1869 .ndo_set_mac_address = cp_set_mac_address,
1870 .ndo_set_rx_mode = cp_set_rx_mode,
1871 .ndo_get_stats = cp_get_stats,
1872 .ndo_do_ioctl = cp_ioctl,
1873 .ndo_start_xmit = cp_start_xmit,
1874 .ndo_tx_timeout = cp_tx_timeout,
1875 .ndo_set_features = cp_set_features,
1876 .ndo_change_mtu = cp_change_mtu,
1877 .ndo_features_check = cp_features_check,
1878
1879 #ifdef CONFIG_NET_POLL_CONTROLLER
1880 .ndo_poll_controller = cp_poll_controller,
1881 #endif
1882 };
1883
1884 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1885 {
1886 struct net_device *dev;
1887 struct cp_private *cp;
1888 int rc;
1889 void __iomem *regs;
1890 resource_size_t pciaddr;
1891 unsigned int addr_len, i, pci_using_dac;
1892
1893 pr_info_once("%s", version);
1894
1895 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1896 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1897 dev_info(&pdev->dev,
1898 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1899 pdev->vendor, pdev->device, pdev->revision);
1900 return -ENODEV;
1901 }
1902
1903 dev = alloc_etherdev(sizeof(struct cp_private));
1904 if (!dev)
1905 return -ENOMEM;
1906 SET_NETDEV_DEV(dev, &pdev->dev);
1907
1908 cp = netdev_priv(dev);
1909 cp->pdev = pdev;
1910 cp->dev = dev;
1911 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1912 spin_lock_init (&cp->lock);
1913 cp->mii_if.dev = dev;
1914 cp->mii_if.mdio_read = mdio_read;
1915 cp->mii_if.mdio_write = mdio_write;
1916 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1917 cp->mii_if.phy_id_mask = 0x1f;
1918 cp->mii_if.reg_num_mask = 0x1f;
1919 cp_set_rxbufsize(cp);
1920
1921 rc = pci_enable_device(pdev);
1922 if (rc)
1923 goto err_out_free;
1924
1925 rc = pci_set_mwi(pdev);
1926 if (rc)
1927 goto err_out_disable;
1928
1929 rc = pci_request_regions(pdev, DRV_NAME);
1930 if (rc)
1931 goto err_out_mwi;
1932
1933 pciaddr = pci_resource_start(pdev, 1);
1934 if (!pciaddr) {
1935 rc = -EIO;
1936 dev_err(&pdev->dev, "no MMIO resource\n");
1937 goto err_out_res;
1938 }
1939 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1940 rc = -EIO;
1941 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1942 (unsigned long long)pci_resource_len(pdev, 1));
1943 goto err_out_res;
1944 }
1945
1946
1947 if ((sizeof(dma_addr_t) > 4) &&
1948 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1949 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1950 pci_using_dac = 1;
1951 } else {
1952 pci_using_dac = 0;
1953
1954 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1955 if (rc) {
1956 dev_err(&pdev->dev,
1957 "No usable DMA configuration, aborting\n");
1958 goto err_out_res;
1959 }
1960 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1961 if (rc) {
1962 dev_err(&pdev->dev,
1963 "No usable consistent DMA configuration, aborting\n");
1964 goto err_out_res;
1965 }
1966 }
1967
1968 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1969 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1970
1971 dev->features |= NETIF_F_RXCSUM;
1972 dev->hw_features |= NETIF_F_RXCSUM;
1973
1974 regs = ioremap(pciaddr, CP_REGS_SIZE);
1975 if (!regs) {
1976 rc = -EIO;
1977 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1978 (unsigned long long)pci_resource_len(pdev, 1),
1979 (unsigned long long)pciaddr);
1980 goto err_out_res;
1981 }
1982 cp->regs = regs;
1983
1984 cp_stop_hw(cp);
1985
1986
1987 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1988 for (i = 0; i < 3; i++)
1989 ((__le16 *) (dev->dev_addr))[i] =
1990 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1991
1992 dev->netdev_ops = &cp_netdev_ops;
1993 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1994 dev->ethtool_ops = &cp_ethtool_ops;
1995 dev->watchdog_timeo = TX_TIMEOUT;
1996
1997 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1998 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1999
2000 if (pci_using_dac)
2001 dev->features |= NETIF_F_HIGHDMA;
2002
2003 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2004 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2005 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
2006 NETIF_F_HIGHDMA;
2007
2008
2009 dev->min_mtu = CP_MIN_MTU;
2010 dev->max_mtu = CP_MAX_MTU;
2011
2012 rc = register_netdev(dev);
2013 if (rc)
2014 goto err_out_iomap;
2015
2016 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
2017 regs, dev->dev_addr, pdev->irq);
2018
2019 pci_set_drvdata(pdev, dev);
2020
2021
2022 pci_set_master(pdev);
2023
2024 if (cp->wol_enabled)
2025 cp_set_d3_state (cp);
2026
2027 return 0;
2028
2029 err_out_iomap:
2030 iounmap(regs);
2031 err_out_res:
2032 pci_release_regions(pdev);
2033 err_out_mwi:
2034 pci_clear_mwi(pdev);
2035 err_out_disable:
2036 pci_disable_device(pdev);
2037 err_out_free:
2038 free_netdev(dev);
2039 return rc;
2040 }
2041
2042 static void cp_remove_one (struct pci_dev *pdev)
2043 {
2044 struct net_device *dev = pci_get_drvdata(pdev);
2045 struct cp_private *cp = netdev_priv(dev);
2046
2047 unregister_netdev(dev);
2048 iounmap(cp->regs);
2049 if (cp->wol_enabled)
2050 pci_set_power_state (pdev, PCI_D0);
2051 pci_release_regions(pdev);
2052 pci_clear_mwi(pdev);
2053 pci_disable_device(pdev);
2054 free_netdev(dev);
2055 }
2056
2057 #ifdef CONFIG_PM
2058 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
2059 {
2060 struct net_device *dev = pci_get_drvdata(pdev);
2061 struct cp_private *cp = netdev_priv(dev);
2062 unsigned long flags;
2063
2064 if (!netif_running(dev))
2065 return 0;
2066
2067 netif_device_detach (dev);
2068 netif_stop_queue (dev);
2069
2070 spin_lock_irqsave (&cp->lock, flags);
2071
2072
2073 cpw16 (IntrMask, 0);
2074 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2075
2076 spin_unlock_irqrestore (&cp->lock, flags);
2077
2078 pci_save_state(pdev);
2079 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2080 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2081
2082 return 0;
2083 }
2084
2085 static int cp_resume (struct pci_dev *pdev)
2086 {
2087 struct net_device *dev = pci_get_drvdata (pdev);
2088 struct cp_private *cp = netdev_priv(dev);
2089 unsigned long flags;
2090
2091 if (!netif_running(dev))
2092 return 0;
2093
2094 netif_device_attach (dev);
2095
2096 pci_set_power_state(pdev, PCI_D0);
2097 pci_restore_state(pdev);
2098 pci_enable_wake(pdev, PCI_D0, 0);
2099
2100
2101 cp_init_rings_index (cp);
2102 cp_init_hw (cp);
2103 cp_enable_irq(cp);
2104 netif_start_queue (dev);
2105
2106 spin_lock_irqsave (&cp->lock, flags);
2107
2108 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2109
2110 spin_unlock_irqrestore (&cp->lock, flags);
2111
2112 return 0;
2113 }
2114 #endif
2115
2116 static const struct pci_device_id cp_pci_tbl[] = {
2117 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
2118 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
2119 { },
2120 };
2121 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
2122
2123 static struct pci_driver cp_driver = {
2124 .name = DRV_NAME,
2125 .id_table = cp_pci_tbl,
2126 .probe = cp_init_one,
2127 .remove = cp_remove_one,
2128 #ifdef CONFIG_PM
2129 .resume = cp_resume,
2130 .suspend = cp_suspend,
2131 #endif
2132 };
2133
2134 module_pci_driver(cp_driver);