This source file includes following definitions.
- dnet_readw_mac
- dnet_writew_mac
- __dnet_set_hwaddr
- dnet_get_hwaddr
- dnet_mdio_read
- dnet_mdio_write
- dnet_handle_link_change
- dnet_mii_probe
- dnet_mii_init
- dnet_phy_marvell_fixup
- dnet_update_stats
- dnet_poll
- dnet_interrupt
- dnet_print_skb
- dnet_start_xmit
- dnet_reset_hw
- dnet_init_hw
- dnet_open
- dnet_close
- dnet_print_pretty_hwstats
- dnet_get_stats
- dnet_ioctl
- dnet_get_drvinfo
- dnet_probe
- dnet_remove
1
2
3
4
5
6
7
8 #include <linux/io.h>
9 #include <linux/module.h>
10 #include <linux/moduleparam.h>
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
15 #include <linux/interrupt.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/platform_device.h>
20 #include <linux/phy.h>
21
22 #include "dnet.h"
23
24 #undef DEBUG
25
26
27 static u16 dnet_readw_mac(struct dnet *bp, u16 reg)
28 {
29 u16 data_read;
30
31
32 dnet_writel(bp, reg, MACREG_ADDR);
33
34
35
36 ndelay(500);
37
38
39 data_read = dnet_readl(bp, MACREG_DATA);
40
41
42 return data_read;
43 }
44
45
46 static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val)
47 {
48
49 dnet_writel(bp, val, MACREG_DATA);
50
51
52 dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR);
53
54
55
56 ndelay(500);
57 }
58
59 static void __dnet_set_hwaddr(struct dnet *bp)
60 {
61 u16 tmp;
62
63 tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
64 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
65 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
66 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
67 tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
68 dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
69 }
70
71 static void dnet_get_hwaddr(struct dnet *bp)
72 {
73 u16 tmp;
74 u8 addr[6];
75
76
77
78
79
80
81
82
83
84
85
86
87
88 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG);
89 *((__be16 *)addr) = cpu_to_be16(tmp);
90 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG);
91 *((__be16 *)(addr + 2)) = cpu_to_be16(tmp);
92 tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG);
93 *((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
94
95 if (is_valid_ether_addr(addr))
96 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
97 }
98
99 static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
100 {
101 struct dnet *bp = bus->priv;
102 u16 value;
103
104 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
105 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
106 cpu_relax();
107
108
109 mii_id &= 0x1f;
110 regnum &= 0x1f;
111
112
113 value = (mii_id << 8);
114 value |= regnum;
115
116
117 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value);
118
119
120 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
121 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
122 cpu_relax();
123
124 value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG);
125
126 pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value);
127
128 return value;
129 }
130
131 static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
132 u16 value)
133 {
134 struct dnet *bp = bus->priv;
135 u16 tmp;
136
137 pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value);
138
139 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
140 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
141 cpu_relax();
142
143
144 tmp = (1 << 13);
145
146
147 mii_id &= 0x1f;
148 regnum &= 0x1f;
149
150
151 value &= 0xffff;
152
153
154 tmp |= (mii_id << 8);
155 tmp |= regnum;
156
157
158 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value);
159
160
161 dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp);
162
163 while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG)
164 & DNET_INTERNAL_GMII_MNG_CMD_FIN))
165 cpu_relax();
166
167 return 0;
168 }
169
170 static void dnet_handle_link_change(struct net_device *dev)
171 {
172 struct dnet *bp = netdev_priv(dev);
173 struct phy_device *phydev = dev->phydev;
174 unsigned long flags;
175 u32 mode_reg, ctl_reg;
176
177 int status_change = 0;
178
179 spin_lock_irqsave(&bp->lock, flags);
180
181 mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG);
182 ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
183
184 if (phydev->link) {
185 if (bp->duplex != phydev->duplex) {
186 if (phydev->duplex)
187 ctl_reg &=
188 ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP);
189 else
190 ctl_reg |=
191 DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP;
192
193 bp->duplex = phydev->duplex;
194 status_change = 1;
195 }
196
197 if (bp->speed != phydev->speed) {
198 status_change = 1;
199 switch (phydev->speed) {
200 case 1000:
201 mode_reg |= DNET_INTERNAL_MODE_GBITEN;
202 break;
203 case 100:
204 case 10:
205 mode_reg &= ~DNET_INTERNAL_MODE_GBITEN;
206 break;
207 default:
208 printk(KERN_WARNING
209 "%s: Ack! Speed (%d) is not "
210 "10/100/1000!\n", dev->name,
211 phydev->speed);
212 break;
213 }
214 bp->speed = phydev->speed;
215 }
216 }
217
218 if (phydev->link != bp->link) {
219 if (phydev->link) {
220 mode_reg |=
221 (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN);
222 } else {
223 mode_reg &=
224 ~(DNET_INTERNAL_MODE_RXEN |
225 DNET_INTERNAL_MODE_TXEN);
226 bp->speed = 0;
227 bp->duplex = -1;
228 }
229 bp->link = phydev->link;
230
231 status_change = 1;
232 }
233
234 if (status_change) {
235 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg);
236 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg);
237 }
238
239 spin_unlock_irqrestore(&bp->lock, flags);
240
241 if (status_change) {
242 if (phydev->link)
243 printk(KERN_INFO "%s: link up (%d/%s)\n",
244 dev->name, phydev->speed,
245 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
246 else
247 printk(KERN_INFO "%s: link down\n", dev->name);
248 }
249 }
250
251 static int dnet_mii_probe(struct net_device *dev)
252 {
253 struct dnet *bp = netdev_priv(dev);
254 struct phy_device *phydev = NULL;
255
256
257 phydev = phy_find_first(bp->mii_bus);
258
259 if (!phydev) {
260 printk(KERN_ERR "%s: no PHY found\n", dev->name);
261 return -ENODEV;
262 }
263
264
265
266
267 if (bp->capabilities & DNET_HAS_RMII) {
268 phydev = phy_connect(dev, phydev_name(phydev),
269 &dnet_handle_link_change,
270 PHY_INTERFACE_MODE_RMII);
271 } else {
272 phydev = phy_connect(dev, phydev_name(phydev),
273 &dnet_handle_link_change,
274 PHY_INTERFACE_MODE_MII);
275 }
276
277 if (IS_ERR(phydev)) {
278 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
279 return PTR_ERR(phydev);
280 }
281
282
283 if (bp->capabilities & DNET_HAS_GIGABIT)
284 phy_set_max_speed(phydev, SPEED_1000);
285 else
286 phy_set_max_speed(phydev, SPEED_100);
287
288 phy_support_asym_pause(phydev);
289
290 bp->link = 0;
291 bp->speed = 0;
292 bp->duplex = -1;
293
294 return 0;
295 }
296
297 static int dnet_mii_init(struct dnet *bp)
298 {
299 int err;
300
301 bp->mii_bus = mdiobus_alloc();
302 if (bp->mii_bus == NULL)
303 return -ENOMEM;
304
305 bp->mii_bus->name = "dnet_mii_bus";
306 bp->mii_bus->read = &dnet_mdio_read;
307 bp->mii_bus->write = &dnet_mdio_write;
308
309 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
310 bp->pdev->name, bp->pdev->id);
311
312 bp->mii_bus->priv = bp;
313
314 if (mdiobus_register(bp->mii_bus)) {
315 err = -ENXIO;
316 goto err_out;
317 }
318
319 if (dnet_mii_probe(bp->dev) != 0) {
320 err = -ENXIO;
321 goto err_out_unregister_bus;
322 }
323
324 return 0;
325
326 err_out_unregister_bus:
327 mdiobus_unregister(bp->mii_bus);
328 err_out:
329 mdiobus_free(bp->mii_bus);
330 return err;
331 }
332
333
334 static int dnet_phy_marvell_fixup(struct phy_device *phydev)
335 {
336 return phy_write(phydev, 0x18, 0x4148);
337 }
338
339 static void dnet_update_stats(struct dnet *bp)
340 {
341 u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT;
342 u32 *p = &bp->hw_stats.rx_pkt_ignr;
343 u32 *end = &bp->hw_stats.rx_byte + 1;
344
345 WARN_ON((unsigned long)(end - p - 1) !=
346 (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4);
347
348 for (; p < end; p++, reg++)
349 *p += readl(reg);
350
351 reg = bp->regs + DNET_TX_UNICAST_CNT;
352 p = &bp->hw_stats.tx_unicast;
353 end = &bp->hw_stats.tx_byte + 1;
354
355 WARN_ON((unsigned long)(end - p - 1) !=
356 (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4);
357
358 for (; p < end; p++, reg++)
359 *p += readl(reg);
360 }
361
362 static int dnet_poll(struct napi_struct *napi, int budget)
363 {
364 struct dnet *bp = container_of(napi, struct dnet, napi);
365 struct net_device *dev = bp->dev;
366 int npackets = 0;
367 unsigned int pkt_len;
368 struct sk_buff *skb;
369 unsigned int *data_ptr;
370 u32 int_enable;
371 u32 cmd_word;
372 int i;
373
374 while (npackets < budget) {
375
376
377
378
379 if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
380 break;
381
382 cmd_word = dnet_readl(bp, RX_LEN_FIFO);
383 pkt_len = cmd_word & 0xFFFF;
384
385 if (cmd_word & 0xDF180000)
386 printk(KERN_ERR "%s packet receive error %x\n",
387 __func__, cmd_word);
388
389 skb = netdev_alloc_skb(dev, pkt_len + 5);
390 if (skb != NULL) {
391
392 skb_reserve(skb, 2);
393
394
395
396
397 data_ptr = skb_put(skb, pkt_len);
398 for (i = 0; i < (pkt_len + 3) >> 2; i++)
399 *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO);
400 skb->protocol = eth_type_trans(skb, dev);
401 netif_receive_skb(skb);
402 npackets++;
403 } else
404 printk(KERN_NOTICE
405 "%s: No memory to allocate a sk_buff of "
406 "size %u.\n", dev->name, pkt_len);
407 }
408
409 if (npackets < budget) {
410
411
412
413 napi_complete_done(napi, npackets);
414 int_enable = dnet_readl(bp, INTR_ENB);
415 int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
416 dnet_writel(bp, int_enable, INTR_ENB);
417 }
418
419 return npackets;
420 }
421
422 static irqreturn_t dnet_interrupt(int irq, void *dev_id)
423 {
424 struct net_device *dev = dev_id;
425 struct dnet *bp = netdev_priv(dev);
426 u32 int_src, int_enable, int_current;
427 unsigned long flags;
428 unsigned int handled = 0;
429
430 spin_lock_irqsave(&bp->lock, flags);
431
432
433 int_src = dnet_readl(bp, INTR_SRC);
434 int_enable = dnet_readl(bp, INTR_ENB);
435 int_current = int_src & int_enable;
436
437
438 if (int_current & DNET_INTR_SRC_TX_FIFOAE) {
439 int_enable = dnet_readl(bp, INTR_ENB);
440 int_enable &= ~DNET_INTR_ENB_TX_FIFOAE;
441 dnet_writel(bp, int_enable, INTR_ENB);
442 netif_wake_queue(dev);
443 handled = 1;
444 }
445
446
447 if (int_current &
448 (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) {
449 printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__,
450 dnet_readl(bp, RX_STATUS), int_current);
451
452 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL);
453 ndelay(500);
454 dnet_writel(bp, 0, SYS_CTL);
455 handled = 1;
456 }
457
458
459 if (int_current &
460 (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) {
461 printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__,
462 dnet_readl(bp, TX_STATUS), int_current);
463
464 dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL);
465 ndelay(500);
466 dnet_writel(bp, 0, SYS_CTL);
467 handled = 1;
468 }
469
470 if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) {
471 if (napi_schedule_prep(&bp->napi)) {
472
473
474
475
476
477 int_enable = dnet_readl(bp, INTR_ENB);
478 int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF;
479 dnet_writel(bp, int_enable, INTR_ENB);
480 __napi_schedule(&bp->napi);
481 }
482 handled = 1;
483 }
484
485 if (!handled)
486 pr_debug("%s: irq %x remains\n", __func__, int_current);
487
488 spin_unlock_irqrestore(&bp->lock, flags);
489
490 return IRQ_RETVAL(handled);
491 }
492
493 #ifdef DEBUG
494 static inline void dnet_print_skb(struct sk_buff *skb)
495 {
496 int k;
497 printk(KERN_DEBUG PFX "data:");
498 for (k = 0; k < skb->len; k++)
499 printk(" %02x", (unsigned int)skb->data[k]);
500 printk("\n");
501 }
502 #else
503 #define dnet_print_skb(skb) do {} while (0)
504 #endif
505
506 static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
507 {
508
509 struct dnet *bp = netdev_priv(dev);
510 u32 tx_status, irq_enable;
511 unsigned int len, i, tx_cmd, wrsz;
512 unsigned long flags;
513 unsigned int *bufp;
514
515 tx_status = dnet_readl(bp, TX_STATUS);
516
517 pr_debug("start_xmit: len %u head %p data %p\n",
518 skb->len, skb->head, skb->data);
519 dnet_print_skb(skb);
520
521
522 len = (skb->len + 3) >> 2;
523
524 spin_lock_irqsave(&bp->lock, flags);
525
526 tx_status = dnet_readl(bp, TX_STATUS);
527
528 bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
529 wrsz = (u32) skb->len + 3;
530 wrsz += ((unsigned long) skb->data) & 0x3;
531 wrsz >>= 2;
532 tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len;
533
534
535 if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) {
536 for (i = 0; i < wrsz; i++)
537 dnet_writel(bp, *bufp++, TX_DATA_FIFO);
538
539
540
541
542
543 dnet_writel(bp, tx_cmd, TX_LEN_FIFO);
544 }
545
546 if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
547 netif_stop_queue(dev);
548 tx_status = dnet_readl(bp, INTR_SRC);
549 irq_enable = dnet_readl(bp, INTR_ENB);
550 irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
551 dnet_writel(bp, irq_enable, INTR_ENB);
552 }
553
554 skb_tx_timestamp(skb);
555
556
557 dev_kfree_skb(skb);
558
559 spin_unlock_irqrestore(&bp->lock, flags);
560
561 return NETDEV_TX_OK;
562 }
563
564 static void dnet_reset_hw(struct dnet *bp)
565 {
566
567 dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN);
568
569
570
571
572
573 dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH);
574
575
576
577
578 dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH);
579
580
581 dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH,
582 SYS_CTL);
583 msleep(1);
584 dnet_writel(bp, 0, SYS_CTL);
585 }
586
587 static void dnet_init_hw(struct dnet *bp)
588 {
589 u32 config;
590
591 dnet_reset_hw(bp);
592 __dnet_set_hwaddr(bp);
593
594 config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG);
595
596 if (bp->dev->flags & IFF_PROMISC)
597
598 config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC;
599 if (!(bp->dev->flags & IFF_BROADCAST))
600
601 config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST;
602
603 config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE |
604 DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST |
605 DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL |
606 DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS;
607
608 dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config);
609
610
611 config = dnet_readl(bp, INTR_SRC);
612
613
614 dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY |
615 DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR |
616 DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL |
617 DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM |
618 DNET_INTR_ENB_RX_PKTRDY, INTR_ENB);
619 }
620
621 static int dnet_open(struct net_device *dev)
622 {
623 struct dnet *bp = netdev_priv(dev);
624
625
626 if (!dev->phydev)
627 return -EAGAIN;
628
629 napi_enable(&bp->napi);
630 dnet_init_hw(bp);
631
632 phy_start_aneg(dev->phydev);
633
634
635 phy_start(dev->phydev);
636
637 netif_start_queue(dev);
638
639 return 0;
640 }
641
642 static int dnet_close(struct net_device *dev)
643 {
644 struct dnet *bp = netdev_priv(dev);
645
646 netif_stop_queue(dev);
647 napi_disable(&bp->napi);
648
649 if (dev->phydev)
650 phy_stop(dev->phydev);
651
652 dnet_reset_hw(bp);
653 netif_carrier_off(dev);
654
655 return 0;
656 }
657
658 static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat)
659 {
660 pr_debug("%s\n", __func__);
661 pr_debug("----------------------------- RX statistics "
662 "-------------------------------\n");
663 pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr);
664 pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err);
665 pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm);
666 pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm);
667 pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol);
668 pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err);
669 pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt);
670 pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm);
671 pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm);
672 pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast);
673 pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast);
674 pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag);
675 pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink);
676 pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib);
677 pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd);
678 pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte);
679 pr_debug("----------------------------- TX statistics "
680 "-------------------------------\n");
681 pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast);
682 pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm);
683 pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast);
684 pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast);
685 pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag);
686 pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs);
687 pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo);
688 pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte);
689 }
690
691 static struct net_device_stats *dnet_get_stats(struct net_device *dev)
692 {
693
694 struct dnet *bp = netdev_priv(dev);
695 struct net_device_stats *nstat = &dev->stats;
696 struct dnet_stats *hwstat = &bp->hw_stats;
697
698
699 dnet_update_stats(bp);
700
701
702 nstat->rx_errors = (hwstat->rx_len_chk_err +
703 hwstat->rx_lng_frm + hwstat->rx_shrt_frm +
704
705
706 hwstat->rx_crc_err +
707 hwstat->rx_pre_shrink +
708 hwstat->rx_drib_nib + hwstat->rx_unsup_opcd);
709 nstat->tx_errors = hwstat->tx_bad_fcs;
710 nstat->rx_length_errors = (hwstat->rx_len_chk_err +
711 hwstat->rx_lng_frm +
712 hwstat->rx_shrt_frm + hwstat->rx_pre_shrink);
713 nstat->rx_crc_errors = hwstat->rx_crc_err;
714 nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib;
715 nstat->rx_packets = hwstat->rx_ok_pkt;
716 nstat->tx_packets = (hwstat->tx_unicast +
717 hwstat->tx_multicast + hwstat->tx_brdcast);
718 nstat->rx_bytes = hwstat->rx_byte;
719 nstat->tx_bytes = hwstat->tx_byte;
720 nstat->multicast = hwstat->rx_multicast;
721 nstat->rx_missed_errors = hwstat->rx_pkt_ignr;
722
723 dnet_print_pretty_hwstats(hwstat);
724
725 return nstat;
726 }
727
728 static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
729 {
730 struct phy_device *phydev = dev->phydev;
731
732 if (!netif_running(dev))
733 return -EINVAL;
734
735 if (!phydev)
736 return -ENODEV;
737
738 return phy_mii_ioctl(phydev, rq, cmd);
739 }
740
741 static void dnet_get_drvinfo(struct net_device *dev,
742 struct ethtool_drvinfo *info)
743 {
744 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
745 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
746 strlcpy(info->bus_info, "0", sizeof(info->bus_info));
747 }
748
749 static const struct ethtool_ops dnet_ethtool_ops = {
750 .get_drvinfo = dnet_get_drvinfo,
751 .get_link = ethtool_op_get_link,
752 .get_ts_info = ethtool_op_get_ts_info,
753 .get_link_ksettings = phy_ethtool_get_link_ksettings,
754 .set_link_ksettings = phy_ethtool_set_link_ksettings,
755 };
756
757 static const struct net_device_ops dnet_netdev_ops = {
758 .ndo_open = dnet_open,
759 .ndo_stop = dnet_close,
760 .ndo_get_stats = dnet_get_stats,
761 .ndo_start_xmit = dnet_start_xmit,
762 .ndo_do_ioctl = dnet_ioctl,
763 .ndo_set_mac_address = eth_mac_addr,
764 .ndo_validate_addr = eth_validate_addr,
765 };
766
767 static int dnet_probe(struct platform_device *pdev)
768 {
769 struct resource *res;
770 struct net_device *dev;
771 struct dnet *bp;
772 struct phy_device *phydev;
773 int err;
774 unsigned int irq;
775
776 irq = platform_get_irq(pdev, 0);
777
778 dev = alloc_etherdev(sizeof(*bp));
779 if (!dev)
780 return -ENOMEM;
781
782
783 dev->features |= 0;
784
785 bp = netdev_priv(dev);
786 bp->dev = dev;
787
788 platform_set_drvdata(pdev, dev);
789 SET_NETDEV_DEV(dev, &pdev->dev);
790
791 spin_lock_init(&bp->lock);
792
793 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
794 bp->regs = devm_ioremap_resource(&pdev->dev, res);
795 if (IS_ERR(bp->regs)) {
796 err = PTR_ERR(bp->regs);
797 goto err_out_free_dev;
798 }
799
800 dev->irq = irq;
801 err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev);
802 if (err) {
803 dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
804 irq, err);
805 goto err_out_free_dev;
806 }
807
808 dev->netdev_ops = &dnet_netdev_ops;
809 netif_napi_add(dev, &bp->napi, dnet_poll, 64);
810 dev->ethtool_ops = &dnet_ethtool_ops;
811
812 dev->base_addr = (unsigned long)bp->regs;
813
814 bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK;
815
816 dnet_get_hwaddr(bp);
817
818 if (!is_valid_ether_addr(dev->dev_addr)) {
819
820 eth_hw_addr_random(dev);
821 __dnet_set_hwaddr(bp);
822 }
823
824 err = register_netdev(dev);
825 if (err) {
826 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
827 goto err_out_free_irq;
828 }
829
830
831 err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0,
832 dnet_phy_marvell_fixup);
833
834 if (err)
835 dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n");
836
837 err = dnet_mii_init(bp);
838 if (err)
839 goto err_out_unregister_netdev;
840
841 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
842 bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
843 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
844 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
845 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
846 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
847 (bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
848 phydev = dev->phydev;
849 phy_attached_info(phydev);
850
851 return 0;
852
853 err_out_unregister_netdev:
854 unregister_netdev(dev);
855 err_out_free_irq:
856 free_irq(dev->irq, dev);
857 err_out_free_dev:
858 free_netdev(dev);
859 return err;
860 }
861
862 static int dnet_remove(struct platform_device *pdev)
863 {
864
865 struct net_device *dev;
866 struct dnet *bp;
867
868 dev = platform_get_drvdata(pdev);
869
870 if (dev) {
871 bp = netdev_priv(dev);
872 if (dev->phydev)
873 phy_disconnect(dev->phydev);
874 mdiobus_unregister(bp->mii_bus);
875 mdiobus_free(bp->mii_bus);
876 unregister_netdev(dev);
877 free_irq(dev->irq, dev);
878 free_netdev(dev);
879 }
880
881 return 0;
882 }
883
884 static struct platform_driver dnet_driver = {
885 .probe = dnet_probe,
886 .remove = dnet_remove,
887 .driver = {
888 .name = "dnet",
889 },
890 };
891
892 module_platform_driver(dnet_driver);
893
894 MODULE_LICENSE("GPL");
895 MODULE_DESCRIPTION("Dave DNET Ethernet driver");
896 MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, "
897 "Matteo Vit <matteo.vit@dave.eu>");