This source file includes following definitions.
- au1000_enable_mac
- au1000_mdio_read
- au1000_mdio_write
- au1000_mdiobus_read
- au1000_mdiobus_write
- au1000_mdiobus_reset
- au1000_hard_stop
- au1000_enable_rx_tx
- au1000_adjust_link
- au1000_mii_probe
- au1000_GetFreeDB
- au1000_ReleaseDB
- au1000_reset_mac_unlocked
- au1000_reset_mac
- au1000_setup_hw_rings
- au1000_get_drvinfo
- au1000_set_msglevel
- au1000_get_msglevel
- au1000_init
- au1000_update_rx_stats
- au1000_rx
- au1000_update_tx_stats
- au1000_tx_ack
- au1000_interrupt
- au1000_open
- au1000_close
- au1000_tx
- au1000_tx_timeout
- au1000_multicast_list
- au1000_ioctl
- au1000_probe
- au1000_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/capability.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/timer.h>
27 #include <linux/errno.h>
28 #include <linux/in.h>
29 #include <linux/ioport.h>
30 #include <linux/bitops.h>
31 #include <linux/slab.h>
32 #include <linux/interrupt.h>
33 #include <linux/netdevice.h>
34 #include <linux/etherdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/mii.h>
37 #include <linux/skbuff.h>
38 #include <linux/delay.h>
39 #include <linux/crc32.h>
40 #include <linux/phy.h>
41 #include <linux/platform_device.h>
42 #include <linux/cpu.h>
43 #include <linux/io.h>
44
45 #include <asm/mipsregs.h>
46 #include <asm/irq.h>
47 #include <asm/processor.h>
48
49 #include <au1000.h>
50 #include <au1xxx_eth.h>
51 #include <prom.h>
52
53 #include "au1000_eth.h"
54
55 #ifdef AU1000_ETH_DEBUG
56 static int au1000_debug = 5;
57 #else
58 static int au1000_debug = 3;
59 #endif
60
61 #define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
62 NETIF_MSG_PROBE | \
63 NETIF_MSG_LINK)
64
65 #define DRV_NAME "au1000_eth"
66 #define DRV_VERSION "1.7"
67 #define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
68 #define DRV_DESC "Au1xxx on-chip Ethernet driver"
69
70 MODULE_AUTHOR(DRV_AUTHOR);
71 MODULE_DESCRIPTION(DRV_DESC);
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_VERSION);
74
75
76 #define MAC_CONTROL 0x0
77 # define MAC_RX_ENABLE (1 << 2)
78 # define MAC_TX_ENABLE (1 << 3)
79 # define MAC_DEF_CHECK (1 << 5)
80 # define MAC_SET_BL(X) (((X) & 0x3) << 6)
81 # define MAC_AUTO_PAD (1 << 8)
82 # define MAC_DISABLE_RETRY (1 << 10)
83 # define MAC_DISABLE_BCAST (1 << 11)
84 # define MAC_LATE_COL (1 << 12)
85 # define MAC_HASH_MODE (1 << 13)
86 # define MAC_HASH_ONLY (1 << 15)
87 # define MAC_PASS_ALL (1 << 16)
88 # define MAC_INVERSE_FILTER (1 << 17)
89 # define MAC_PROMISCUOUS (1 << 18)
90 # define MAC_PASS_ALL_MULTI (1 << 19)
91 # define MAC_FULL_DUPLEX (1 << 20)
92 # define MAC_NORMAL_MODE 0
93 # define MAC_INT_LOOPBACK (1 << 21)
94 # define MAC_EXT_LOOPBACK (1 << 22)
95 # define MAC_DISABLE_RX_OWN (1 << 23)
96 # define MAC_BIG_ENDIAN (1 << 30)
97 # define MAC_RX_ALL (1 << 31)
98 #define MAC_ADDRESS_HIGH 0x4
99 #define MAC_ADDRESS_LOW 0x8
100 #define MAC_MCAST_HIGH 0xC
101 #define MAC_MCAST_LOW 0x10
102 #define MAC_MII_CNTRL 0x14
103 # define MAC_MII_BUSY (1 << 0)
104 # define MAC_MII_READ 0
105 # define MAC_MII_WRITE (1 << 1)
106 # define MAC_SET_MII_SELECT_REG(X) (((X) & 0x1f) << 6)
107 # define MAC_SET_MII_SELECT_PHY(X) (((X) & 0x1f) << 11)
108 #define MAC_MII_DATA 0x18
109 #define MAC_FLOW_CNTRL 0x1C
110 # define MAC_FLOW_CNTRL_BUSY (1 << 0)
111 # define MAC_FLOW_CNTRL_ENABLE (1 << 1)
112 # define MAC_PASS_CONTROL (1 << 2)
113 # define MAC_SET_PAUSE(X) (((X) & 0xffff) << 16)
114 #define MAC_VLAN1_TAG 0x20
115 #define MAC_VLAN2_TAG 0x24
116
117
118 # define MAC_EN_CLOCK_ENABLE (1 << 0)
119 # define MAC_EN_RESET0 (1 << 1)
120 # define MAC_EN_TOSS (0 << 2)
121 # define MAC_EN_CACHEABLE (1 << 3)
122 # define MAC_EN_RESET1 (1 << 4)
123 # define MAC_EN_RESET2 (1 << 5)
124 # define MAC_DMA_RESET (1 << 6)
125
126
127
128 #define MAC_TX_BUFF0_STATUS 0x0
129 # define TX_FRAME_ABORTED (1 << 0)
130 # define TX_JAB_TIMEOUT (1 << 1)
131 # define TX_NO_CARRIER (1 << 2)
132 # define TX_LOSS_CARRIER (1 << 3)
133 # define TX_EXC_DEF (1 << 4)
134 # define TX_LATE_COLL_ABORT (1 << 5)
135 # define TX_EXC_COLL (1 << 6)
136 # define TX_UNDERRUN (1 << 7)
137 # define TX_DEFERRED (1 << 8)
138 # define TX_LATE_COLL (1 << 9)
139 # define TX_COLL_CNT_MASK (0xF << 10)
140 # define TX_PKT_RETRY (1 << 31)
141 #define MAC_TX_BUFF0_ADDR 0x4
142 # define TX_DMA_ENABLE (1 << 0)
143 # define TX_T_DONE (1 << 1)
144 # define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
145 #define MAC_TX_BUFF0_LEN 0x8
146 #define MAC_TX_BUFF1_STATUS 0x10
147 #define MAC_TX_BUFF1_ADDR 0x14
148 #define MAC_TX_BUFF1_LEN 0x18
149 #define MAC_TX_BUFF2_STATUS 0x20
150 #define MAC_TX_BUFF2_ADDR 0x24
151 #define MAC_TX_BUFF2_LEN 0x28
152 #define MAC_TX_BUFF3_STATUS 0x30
153 #define MAC_TX_BUFF3_ADDR 0x34
154 #define MAC_TX_BUFF3_LEN 0x38
155
156
157 #define MAC_RX_BUFF0_STATUS 0x0
158 # define RX_FRAME_LEN_MASK 0x3fff
159 # define RX_WDOG_TIMER (1 << 14)
160 # define RX_RUNT (1 << 15)
161 # define RX_OVERLEN (1 << 16)
162 # define RX_COLL (1 << 17)
163 # define RX_ETHER (1 << 18)
164 # define RX_MII_ERROR (1 << 19)
165 # define RX_DRIBBLING (1 << 20)
166 # define RX_CRC_ERROR (1 << 21)
167 # define RX_VLAN1 (1 << 22)
168 # define RX_VLAN2 (1 << 23)
169 # define RX_LEN_ERROR (1 << 24)
170 # define RX_CNTRL_FRAME (1 << 25)
171 # define RX_U_CNTRL_FRAME (1 << 26)
172 # define RX_MCAST_FRAME (1 << 27)
173 # define RX_BCAST_FRAME (1 << 28)
174 # define RX_FILTER_FAIL (1 << 29)
175 # define RX_PACKET_FILTER (1 << 30)
176 # define RX_MISSED_FRAME (1 << 31)
177
178 # define RX_ERROR (RX_WDOG_TIMER | RX_RUNT | RX_OVERLEN | \
179 RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
180 RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
181 #define MAC_RX_BUFF0_ADDR 0x4
182 # define RX_DMA_ENABLE (1 << 0)
183 # define RX_T_DONE (1 << 1)
184 # define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
185 # define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
186 #define MAC_RX_BUFF1_STATUS 0x10
187 #define MAC_RX_BUFF1_ADDR 0x14
188 #define MAC_RX_BUFF2_STATUS 0x20
189 #define MAC_RX_BUFF2_ADDR 0x24
190 #define MAC_RX_BUFF3_STATUS 0x30
191 #define MAC_RX_BUFF3_ADDR 0x34
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247 static void au1000_enable_mac(struct net_device *dev, int force_reset)
248 {
249 unsigned long flags;
250 struct au1000_private *aup = netdev_priv(dev);
251
252 spin_lock_irqsave(&aup->lock, flags);
253
254 if (force_reset || (!aup->mac_enabled)) {
255 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
256 wmb();
257 mdelay(2);
258 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
259 | MAC_EN_CLOCK_ENABLE), aup->enable);
260 wmb();
261 mdelay(2);
262
263 aup->mac_enabled = 1;
264 }
265
266 spin_unlock_irqrestore(&aup->lock, flags);
267 }
268
269
270
271
272 static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
273 {
274 struct au1000_private *aup = netdev_priv(dev);
275 u32 *const mii_control_reg = &aup->mac->mii_control;
276 u32 *const mii_data_reg = &aup->mac->mii_data;
277 u32 timedout = 20;
278 u32 mii_control;
279
280 while (readl(mii_control_reg) & MAC_MII_BUSY) {
281 mdelay(1);
282 if (--timedout == 0) {
283 netdev_err(dev, "read_MII busy timeout!!\n");
284 return -1;
285 }
286 }
287
288 mii_control = MAC_SET_MII_SELECT_REG(reg) |
289 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_READ;
290
291 writel(mii_control, mii_control_reg);
292
293 timedout = 20;
294 while (readl(mii_control_reg) & MAC_MII_BUSY) {
295 mdelay(1);
296 if (--timedout == 0) {
297 netdev_err(dev, "mdio_read busy timeout!!\n");
298 return -1;
299 }
300 }
301 return readl(mii_data_reg);
302 }
303
304 static void au1000_mdio_write(struct net_device *dev, int phy_addr,
305 int reg, u16 value)
306 {
307 struct au1000_private *aup = netdev_priv(dev);
308 u32 *const mii_control_reg = &aup->mac->mii_control;
309 u32 *const mii_data_reg = &aup->mac->mii_data;
310 u32 timedout = 20;
311 u32 mii_control;
312
313 while (readl(mii_control_reg) & MAC_MII_BUSY) {
314 mdelay(1);
315 if (--timedout == 0) {
316 netdev_err(dev, "mdio_write busy timeout!!\n");
317 return;
318 }
319 }
320
321 mii_control = MAC_SET_MII_SELECT_REG(reg) |
322 MAC_SET_MII_SELECT_PHY(phy_addr) | MAC_MII_WRITE;
323
324 writel(value, mii_data_reg);
325 writel(mii_control, mii_control_reg);
326 }
327
328 static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
329 {
330 struct net_device *const dev = bus->priv;
331
332
333
334
335 au1000_enable_mac(dev, 0);
336
337 return au1000_mdio_read(dev, phy_addr, regnum);
338 }
339
340 static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
341 u16 value)
342 {
343 struct net_device *const dev = bus->priv;
344
345
346
347
348 au1000_enable_mac(dev, 0);
349
350 au1000_mdio_write(dev, phy_addr, regnum, value);
351 return 0;
352 }
353
354 static int au1000_mdiobus_reset(struct mii_bus *bus)
355 {
356 struct net_device *const dev = bus->priv;
357
358
359
360
361 au1000_enable_mac(dev, 0);
362
363 return 0;
364 }
365
366 static void au1000_hard_stop(struct net_device *dev)
367 {
368 struct au1000_private *aup = netdev_priv(dev);
369 u32 reg;
370
371 netif_dbg(aup, drv, dev, "hard stop\n");
372
373 reg = readl(&aup->mac->control);
374 reg &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
375 writel(reg, &aup->mac->control);
376 wmb();
377 mdelay(10);
378 }
379
380 static void au1000_enable_rx_tx(struct net_device *dev)
381 {
382 struct au1000_private *aup = netdev_priv(dev);
383 u32 reg;
384
385 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
386
387 reg = readl(&aup->mac->control);
388 reg |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
389 writel(reg, &aup->mac->control);
390 wmb();
391 mdelay(10);
392 }
393
394 static void
395 au1000_adjust_link(struct net_device *dev)
396 {
397 struct au1000_private *aup = netdev_priv(dev);
398 struct phy_device *phydev = dev->phydev;
399 unsigned long flags;
400 u32 reg;
401
402 int status_change = 0;
403
404 BUG_ON(!phydev);
405
406 spin_lock_irqsave(&aup->lock, flags);
407
408 if (phydev->link && (aup->old_speed != phydev->speed)) {
409
410
411 switch (phydev->speed) {
412 case SPEED_10:
413 case SPEED_100:
414 break;
415 default:
416 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
417 phydev->speed);
418 break;
419 }
420
421 aup->old_speed = phydev->speed;
422
423 status_change = 1;
424 }
425
426 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
427
428
429
430 au1000_hard_stop(dev);
431
432 reg = readl(&aup->mac->control);
433 if (DUPLEX_FULL == phydev->duplex) {
434 reg |= MAC_FULL_DUPLEX;
435 reg &= ~MAC_DISABLE_RX_OWN;
436 } else {
437 reg &= ~MAC_FULL_DUPLEX;
438 reg |= MAC_DISABLE_RX_OWN;
439 }
440 writel(reg, &aup->mac->control);
441 wmb();
442 mdelay(1);
443
444 au1000_enable_rx_tx(dev);
445 aup->old_duplex = phydev->duplex;
446
447 status_change = 1;
448 }
449
450 if (phydev->link != aup->old_link) {
451
452
453 if (!phydev->link) {
454
455 aup->old_speed = 0;
456 aup->old_duplex = -1;
457 }
458
459 aup->old_link = phydev->link;
460 status_change = 1;
461 }
462
463 spin_unlock_irqrestore(&aup->lock, flags);
464
465 if (status_change) {
466 if (phydev->link)
467 netdev_info(dev, "link up (%d/%s)\n",
468 phydev->speed,
469 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
470 else
471 netdev_info(dev, "link down\n");
472 }
473 }
474
475 static int au1000_mii_probe(struct net_device *dev)
476 {
477 struct au1000_private *const aup = netdev_priv(dev);
478 struct phy_device *phydev = NULL;
479 int phy_addr;
480
481 if (aup->phy_static_config) {
482 BUG_ON(aup->mac_id < 0 || aup->mac_id > 1);
483
484 if (aup->phy_addr)
485 phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
486 else
487 netdev_info(dev, "using PHY-less setup\n");
488 return 0;
489 }
490
491
492
493
494 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
495 if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
496 phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
497 if (!aup->phy_search_highest_addr)
498
499 break;
500 }
501
502 if (aup->phy1_search_mac0) {
503
504 if (!phydev && (aup->mac_id == 1)) {
505
506 dev_info(&dev->dev, ": no PHY found on MAC1, "
507 "let's see if it's attached to MAC0...\n");
508
509
510
511
512 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
513 struct phy_device *const tmp_phydev =
514 mdiobus_get_phy(aup->mii_bus,
515 phy_addr);
516
517 if (aup->mac_id == 1)
518 break;
519
520
521 if (!tmp_phydev)
522 continue;
523
524
525 if (tmp_phydev->attached_dev)
526 continue;
527
528 phydev = tmp_phydev;
529 break;
530 }
531 }
532 }
533
534 if (!phydev) {
535 netdev_err(dev, "no PHY found\n");
536 return -1;
537 }
538
539
540 BUG_ON(phydev->attached_dev);
541
542 phydev = phy_connect(dev, phydev_name(phydev),
543 &au1000_adjust_link, PHY_INTERFACE_MODE_MII);
544
545 if (IS_ERR(phydev)) {
546 netdev_err(dev, "Could not attach to PHY\n");
547 return PTR_ERR(phydev);
548 }
549
550 phy_set_max_speed(phydev, SPEED_100);
551
552 aup->old_link = 0;
553 aup->old_speed = 0;
554 aup->old_duplex = -1;
555
556 phy_attached_info(phydev);
557
558 return 0;
559 }
560
561
562
563
564
565
566
567 static struct db_dest *au1000_GetFreeDB(struct au1000_private *aup)
568 {
569 struct db_dest *pDB;
570 pDB = aup->pDBfree;
571
572 if (pDB)
573 aup->pDBfree = pDB->pnext;
574
575 return pDB;
576 }
577
578 void au1000_ReleaseDB(struct au1000_private *aup, struct db_dest *pDB)
579 {
580 struct db_dest *pDBfree = aup->pDBfree;
581 if (pDBfree)
582 pDBfree->pnext = pDB;
583 aup->pDBfree = pDB;
584 }
585
586 static void au1000_reset_mac_unlocked(struct net_device *dev)
587 {
588 struct au1000_private *const aup = netdev_priv(dev);
589 int i;
590
591 au1000_hard_stop(dev);
592
593 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
594 wmb();
595 mdelay(2);
596 writel(0, aup->enable);
597 wmb();
598 mdelay(2);
599
600 aup->tx_full = 0;
601 for (i = 0; i < NUM_RX_DMA; i++) {
602
603 aup->rx_dma_ring[i]->buff_stat &= ~0xf;
604 }
605 for (i = 0; i < NUM_TX_DMA; i++) {
606
607 aup->tx_dma_ring[i]->buff_stat &= ~0xf;
608 }
609
610 aup->mac_enabled = 0;
611
612 }
613
614 static void au1000_reset_mac(struct net_device *dev)
615 {
616 struct au1000_private *const aup = netdev_priv(dev);
617 unsigned long flags;
618
619 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
620 (unsigned)aup);
621
622 spin_lock_irqsave(&aup->lock, flags);
623
624 au1000_reset_mac_unlocked(dev);
625
626 spin_unlock_irqrestore(&aup->lock, flags);
627 }
628
629
630
631
632
633
634 static void
635 au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
636 {
637 int i;
638
639 for (i = 0; i < NUM_RX_DMA; i++) {
640 aup->rx_dma_ring[i] = (struct rx_dma *)
641 (tx_base + 0x100 + sizeof(struct rx_dma) * i);
642 }
643 for (i = 0; i < NUM_TX_DMA; i++) {
644 aup->tx_dma_ring[i] = (struct tx_dma *)
645 (tx_base + sizeof(struct tx_dma) * i);
646 }
647 }
648
649
650
651
652
653 static void
654 au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
655 {
656 struct au1000_private *aup = netdev_priv(dev);
657
658 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
659 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
660 snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
661 aup->mac_id);
662 }
663
664 static void au1000_set_msglevel(struct net_device *dev, u32 value)
665 {
666 struct au1000_private *aup = netdev_priv(dev);
667 aup->msg_enable = value;
668 }
669
670 static u32 au1000_get_msglevel(struct net_device *dev)
671 {
672 struct au1000_private *aup = netdev_priv(dev);
673 return aup->msg_enable;
674 }
675
676 static const struct ethtool_ops au1000_ethtool_ops = {
677 .get_drvinfo = au1000_get_drvinfo,
678 .get_link = ethtool_op_get_link,
679 .get_msglevel = au1000_get_msglevel,
680 .set_msglevel = au1000_set_msglevel,
681 .get_link_ksettings = phy_ethtool_get_link_ksettings,
682 .set_link_ksettings = phy_ethtool_set_link_ksettings,
683 };
684
685
686
687
688
689
690
691
692
693
694
695 static int au1000_init(struct net_device *dev)
696 {
697 struct au1000_private *aup = netdev_priv(dev);
698 unsigned long flags;
699 int i;
700 u32 control;
701
702 netif_dbg(aup, hw, dev, "au1000_init\n");
703
704
705 au1000_enable_mac(dev, 1);
706
707 spin_lock_irqsave(&aup->lock, flags);
708
709 writel(0, &aup->mac->control);
710 aup->tx_head = (aup->tx_dma_ring[0]->buff_stat & 0xC) >> 2;
711 aup->tx_tail = aup->tx_head;
712 aup->rx_head = (aup->rx_dma_ring[0]->buff_stat & 0xC) >> 2;
713
714 writel(dev->dev_addr[5]<<8 | dev->dev_addr[4],
715 &aup->mac->mac_addr_high);
716 writel(dev->dev_addr[3]<<24 | dev->dev_addr[2]<<16 |
717 dev->dev_addr[1]<<8 | dev->dev_addr[0],
718 &aup->mac->mac_addr_low);
719
720
721 for (i = 0; i < NUM_RX_DMA; i++)
722 aup->rx_dma_ring[i]->buff_stat |= RX_DMA_ENABLE;
723
724 wmb();
725
726 control = MAC_RX_ENABLE | MAC_TX_ENABLE;
727 #ifndef CONFIG_CPU_LITTLE_ENDIAN
728 control |= MAC_BIG_ENDIAN;
729 #endif
730 if (dev->phydev) {
731 if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
732 control |= MAC_FULL_DUPLEX;
733 else
734 control |= MAC_DISABLE_RX_OWN;
735 } else {
736 control |= MAC_FULL_DUPLEX;
737 }
738
739 writel(control, &aup->mac->control);
740 writel(0x8100, &aup->mac->vlan1_tag);
741 wmb();
742
743 spin_unlock_irqrestore(&aup->lock, flags);
744 return 0;
745 }
746
747 static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
748 {
749 struct net_device_stats *ps = &dev->stats;
750
751 ps->rx_packets++;
752 if (status & RX_MCAST_FRAME)
753 ps->multicast++;
754
755 if (status & RX_ERROR) {
756 ps->rx_errors++;
757 if (status & RX_MISSED_FRAME)
758 ps->rx_missed_errors++;
759 if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR))
760 ps->rx_length_errors++;
761 if (status & RX_CRC_ERROR)
762 ps->rx_crc_errors++;
763 if (status & RX_COLL)
764 ps->collisions++;
765 } else
766 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
767
768 }
769
770
771
772
773 static int au1000_rx(struct net_device *dev)
774 {
775 struct au1000_private *aup = netdev_priv(dev);
776 struct sk_buff *skb;
777 struct rx_dma *prxd;
778 u32 buff_stat, status;
779 struct db_dest *pDB;
780 u32 frmlen;
781
782 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
783
784 prxd = aup->rx_dma_ring[aup->rx_head];
785 buff_stat = prxd->buff_stat;
786 while (buff_stat & RX_T_DONE) {
787 status = prxd->status;
788 pDB = aup->rx_db_inuse[aup->rx_head];
789 au1000_update_rx_stats(dev, status);
790 if (!(status & RX_ERROR)) {
791
792
793 frmlen = (status & RX_FRAME_LEN_MASK);
794 frmlen -= 4;
795 skb = netdev_alloc_skb(dev, frmlen + 2);
796 if (skb == NULL) {
797 dev->stats.rx_dropped++;
798 continue;
799 }
800 skb_reserve(skb, 2);
801 skb_copy_to_linear_data(skb,
802 (unsigned char *)pDB->vaddr, frmlen);
803 skb_put(skb, frmlen);
804 skb->protocol = eth_type_trans(skb, dev);
805 netif_rx(skb);
806 } else {
807 if (au1000_debug > 4) {
808 pr_err("rx_error(s):");
809 if (status & RX_MISSED_FRAME)
810 pr_cont(" miss");
811 if (status & RX_WDOG_TIMER)
812 pr_cont(" wdog");
813 if (status & RX_RUNT)
814 pr_cont(" runt");
815 if (status & RX_OVERLEN)
816 pr_cont(" overlen");
817 if (status & RX_COLL)
818 pr_cont(" coll");
819 if (status & RX_MII_ERROR)
820 pr_cont(" mii error");
821 if (status & RX_CRC_ERROR)
822 pr_cont(" crc error");
823 if (status & RX_LEN_ERROR)
824 pr_cont(" len error");
825 if (status & RX_U_CNTRL_FRAME)
826 pr_cont(" u control frame");
827 pr_cont("\n");
828 }
829 }
830 prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE);
831 aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1);
832 wmb();
833
834
835 prxd = aup->rx_dma_ring[aup->rx_head];
836 buff_stat = prxd->buff_stat;
837 }
838 return 0;
839 }
840
841 static void au1000_update_tx_stats(struct net_device *dev, u32 status)
842 {
843 struct net_device_stats *ps = &dev->stats;
844
845 if (status & TX_FRAME_ABORTED) {
846 if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
847 if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
848
849
850
851 ps->tx_errors++;
852 ps->tx_aborted_errors++;
853 }
854 } else {
855 ps->tx_errors++;
856 ps->tx_aborted_errors++;
857 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
858 ps->tx_carrier_errors++;
859 }
860 }
861 }
862
863
864
865
866
867
868 static void au1000_tx_ack(struct net_device *dev)
869 {
870 struct au1000_private *aup = netdev_priv(dev);
871 struct tx_dma *ptxd;
872
873 ptxd = aup->tx_dma_ring[aup->tx_tail];
874
875 while (ptxd->buff_stat & TX_T_DONE) {
876 au1000_update_tx_stats(dev, ptxd->status);
877 ptxd->buff_stat &= ~TX_T_DONE;
878 ptxd->len = 0;
879 wmb();
880
881 aup->tx_tail = (aup->tx_tail + 1) & (NUM_TX_DMA - 1);
882 ptxd = aup->tx_dma_ring[aup->tx_tail];
883
884 if (aup->tx_full) {
885 aup->tx_full = 0;
886 netif_wake_queue(dev);
887 }
888 }
889 }
890
891
892
893
894 static irqreturn_t au1000_interrupt(int irq, void *dev_id)
895 {
896 struct net_device *dev = dev_id;
897
898
899
900 au1000_rx(dev);
901 au1000_tx_ack(dev);
902 return IRQ_RETVAL(1);
903 }
904
905 static int au1000_open(struct net_device *dev)
906 {
907 int retval;
908 struct au1000_private *aup = netdev_priv(dev);
909
910 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
911
912 retval = request_irq(dev->irq, au1000_interrupt, 0,
913 dev->name, dev);
914 if (retval) {
915 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
916 return retval;
917 }
918
919 retval = au1000_init(dev);
920 if (retval) {
921 netdev_err(dev, "error in au1000_init\n");
922 free_irq(dev->irq, dev);
923 return retval;
924 }
925
926 if (dev->phydev)
927 phy_start(dev->phydev);
928
929 netif_start_queue(dev);
930
931 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
932
933 return 0;
934 }
935
936 static int au1000_close(struct net_device *dev)
937 {
938 unsigned long flags;
939 struct au1000_private *const aup = netdev_priv(dev);
940
941 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
942
943 if (dev->phydev)
944 phy_stop(dev->phydev);
945
946 spin_lock_irqsave(&aup->lock, flags);
947
948 au1000_reset_mac_unlocked(dev);
949
950
951 netif_stop_queue(dev);
952
953
954 free_irq(dev->irq, dev);
955 spin_unlock_irqrestore(&aup->lock, flags);
956
957 return 0;
958 }
959
960
961
962
963 static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
964 {
965 struct au1000_private *aup = netdev_priv(dev);
966 struct net_device_stats *ps = &dev->stats;
967 struct tx_dma *ptxd;
968 u32 buff_stat;
969 struct db_dest *pDB;
970 int i;
971
972 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
973 (unsigned)aup, skb->len,
974 skb->data, aup->tx_head);
975
976 ptxd = aup->tx_dma_ring[aup->tx_head];
977 buff_stat = ptxd->buff_stat;
978 if (buff_stat & TX_DMA_ENABLE) {
979
980 netif_stop_queue(dev);
981 aup->tx_full = 1;
982 return NETDEV_TX_BUSY;
983 } else if (buff_stat & TX_T_DONE) {
984 au1000_update_tx_stats(dev, ptxd->status);
985 ptxd->len = 0;
986 }
987
988 if (aup->tx_full) {
989 aup->tx_full = 0;
990 netif_wake_queue(dev);
991 }
992
993 pDB = aup->tx_db_inuse[aup->tx_head];
994 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
995 if (skb->len < ETH_ZLEN) {
996 for (i = skb->len; i < ETH_ZLEN; i++)
997 ((char *)pDB->vaddr)[i] = 0;
998
999 ptxd->len = ETH_ZLEN;
1000 } else
1001 ptxd->len = skb->len;
1002
1003 ps->tx_packets++;
1004 ps->tx_bytes += ptxd->len;
1005
1006 ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE;
1007 wmb();
1008 dev_kfree_skb(skb);
1009 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
1010 return NETDEV_TX_OK;
1011 }
1012
1013
1014
1015
1016
1017 static void au1000_tx_timeout(struct net_device *dev)
1018 {
1019 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
1020 au1000_reset_mac(dev);
1021 au1000_init(dev);
1022 netif_trans_update(dev);
1023 netif_wake_queue(dev);
1024 }
1025
1026 static void au1000_multicast_list(struct net_device *dev)
1027 {
1028 struct au1000_private *aup = netdev_priv(dev);
1029 u32 reg;
1030
1031 netif_dbg(aup, drv, dev, "%s: flags=%x\n", __func__, dev->flags);
1032 reg = readl(&aup->mac->control);
1033 if (dev->flags & IFF_PROMISC) {
1034 reg |= MAC_PROMISCUOUS;
1035 } else if ((dev->flags & IFF_ALLMULTI) ||
1036 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
1037 reg |= MAC_PASS_ALL_MULTI;
1038 reg &= ~MAC_PROMISCUOUS;
1039 netdev_info(dev, "Pass all multicast\n");
1040 } else {
1041 struct netdev_hw_addr *ha;
1042 u32 mc_filter[2];
1043
1044 mc_filter[1] = mc_filter[0] = 0;
1045 netdev_for_each_mc_addr(ha, dev)
1046 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
1047 (long *)mc_filter);
1048 writel(mc_filter[1], &aup->mac->multi_hash_high);
1049 writel(mc_filter[0], &aup->mac->multi_hash_low);
1050 reg &= ~MAC_PROMISCUOUS;
1051 reg |= MAC_HASH_MODE;
1052 }
1053 writel(reg, &aup->mac->control);
1054 }
1055
1056 static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1057 {
1058 if (!netif_running(dev))
1059 return -EINVAL;
1060
1061 if (!dev->phydev)
1062 return -EINVAL;
1063
1064 return phy_mii_ioctl(dev->phydev, rq, cmd);
1065 }
1066
1067 static const struct net_device_ops au1000_netdev_ops = {
1068 .ndo_open = au1000_open,
1069 .ndo_stop = au1000_close,
1070 .ndo_start_xmit = au1000_tx,
1071 .ndo_set_rx_mode = au1000_multicast_list,
1072 .ndo_do_ioctl = au1000_ioctl,
1073 .ndo_tx_timeout = au1000_tx_timeout,
1074 .ndo_set_mac_address = eth_mac_addr,
1075 .ndo_validate_addr = eth_validate_addr,
1076 };
1077
1078 static int au1000_probe(struct platform_device *pdev)
1079 {
1080 struct au1000_private *aup = NULL;
1081 struct au1000_eth_platform_data *pd;
1082 struct net_device *dev = NULL;
1083 struct db_dest *pDB, *pDBfree;
1084 int irq, i, err = 0;
1085 struct resource *base, *macen, *macdma;
1086
1087 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1088 if (!base) {
1089 dev_err(&pdev->dev, "failed to retrieve base register\n");
1090 err = -ENODEV;
1091 goto out;
1092 }
1093
1094 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1095 if (!macen) {
1096 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1097 err = -ENODEV;
1098 goto out;
1099 }
1100
1101 irq = platform_get_irq(pdev, 0);
1102 if (irq < 0) {
1103 err = -ENODEV;
1104 goto out;
1105 }
1106
1107 macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1108 if (!macdma) {
1109 dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n");
1110 err = -ENODEV;
1111 goto out;
1112 }
1113
1114 if (!request_mem_region(base->start, resource_size(base),
1115 pdev->name)) {
1116 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1117 err = -ENXIO;
1118 goto out;
1119 }
1120
1121 if (!request_mem_region(macen->start, resource_size(macen),
1122 pdev->name)) {
1123 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1124 err = -ENXIO;
1125 goto err_request;
1126 }
1127
1128 if (!request_mem_region(macdma->start, resource_size(macdma),
1129 pdev->name)) {
1130 dev_err(&pdev->dev, "failed to request MACDMA memory region\n");
1131 err = -ENXIO;
1132 goto err_macdma;
1133 }
1134
1135 dev = alloc_etherdev(sizeof(struct au1000_private));
1136 if (!dev) {
1137 err = -ENOMEM;
1138 goto err_alloc;
1139 }
1140
1141 SET_NETDEV_DEV(dev, &pdev->dev);
1142 platform_set_drvdata(pdev, dev);
1143 aup = netdev_priv(dev);
1144
1145 spin_lock_init(&aup->lock);
1146 aup->msg_enable = (au1000_debug < 4 ?
1147 AU1000_DEF_MSG_ENABLE : au1000_debug);
1148
1149
1150
1151
1152 aup->vaddr = (u32)dma_alloc_attrs(&pdev->dev, MAX_BUF_SIZE *
1153 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1154 &aup->dma_addr, 0,
1155 DMA_ATTR_NON_CONSISTENT);
1156 if (!aup->vaddr) {
1157 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1158 err = -ENOMEM;
1159 goto err_vaddr;
1160 }
1161
1162
1163 aup->mac = (struct mac_reg *)
1164 ioremap_nocache(base->start, resource_size(base));
1165 if (!aup->mac) {
1166 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1167 err = -ENXIO;
1168 goto err_remap1;
1169 }
1170
1171
1172 aup->enable = (u32 *)ioremap_nocache(macen->start,
1173 resource_size(macen));
1174 if (!aup->enable) {
1175 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1176 err = -ENXIO;
1177 goto err_remap2;
1178 }
1179 aup->mac_id = pdev->id;
1180
1181 aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma));
1182 if (!aup->macdma) {
1183 dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n");
1184 err = -ENXIO;
1185 goto err_remap3;
1186 }
1187
1188 au1000_setup_hw_rings(aup, aup->macdma);
1189
1190 writel(0, aup->enable);
1191 aup->mac_enabled = 0;
1192
1193 pd = dev_get_platdata(&pdev->dev);
1194 if (!pd) {
1195 dev_info(&pdev->dev, "no platform_data passed,"
1196 " PHY search on MAC0\n");
1197 aup->phy1_search_mac0 = 1;
1198 } else {
1199 if (is_valid_ether_addr(pd->mac)) {
1200 memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
1201 } else {
1202
1203 eth_hw_addr_random(dev);
1204 }
1205
1206 aup->phy_static_config = pd->phy_static_config;
1207 aup->phy_search_highest_addr = pd->phy_search_highest_addr;
1208 aup->phy1_search_mac0 = pd->phy1_search_mac0;
1209 aup->phy_addr = pd->phy_addr;
1210 aup->phy_busid = pd->phy_busid;
1211 aup->phy_irq = pd->phy_irq;
1212 }
1213
1214 if (aup->phy_busid > 0) {
1215 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
1216 err = -ENODEV;
1217 goto err_mdiobus_alloc;
1218 }
1219
1220 aup->mii_bus = mdiobus_alloc();
1221 if (aup->mii_bus == NULL) {
1222 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1223 err = -ENOMEM;
1224 goto err_mdiobus_alloc;
1225 }
1226
1227 aup->mii_bus->priv = dev;
1228 aup->mii_bus->read = au1000_mdiobus_read;
1229 aup->mii_bus->write = au1000_mdiobus_write;
1230 aup->mii_bus->reset = au1000_mdiobus_reset;
1231 aup->mii_bus->name = "au1000_eth_mii";
1232 snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1233 pdev->name, aup->mac_id);
1234
1235
1236 if (aup->phy_static_config)
1237 if (aup->phy_irq && aup->phy_busid == aup->mac_id)
1238 aup->mii_bus->irq[aup->phy_addr] = aup->phy_irq;
1239
1240 err = mdiobus_register(aup->mii_bus);
1241 if (err) {
1242 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1243 goto err_mdiobus_reg;
1244 }
1245
1246 err = au1000_mii_probe(dev);
1247 if (err != 0)
1248 goto err_out;
1249
1250 pDBfree = NULL;
1251
1252 pDB = aup->db;
1253 for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) {
1254 pDB->pnext = pDBfree;
1255 pDBfree = pDB;
1256 pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i);
1257 pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
1258 pDB++;
1259 }
1260 aup->pDBfree = pDBfree;
1261
1262 err = -ENODEV;
1263 for (i = 0; i < NUM_RX_DMA; i++) {
1264 pDB = au1000_GetFreeDB(aup);
1265 if (!pDB)
1266 goto err_out;
1267
1268 aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1269 aup->rx_db_inuse[i] = pDB;
1270 }
1271
1272 err = -ENODEV;
1273 for (i = 0; i < NUM_TX_DMA; i++) {
1274 pDB = au1000_GetFreeDB(aup);
1275 if (!pDB)
1276 goto err_out;
1277
1278 aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr;
1279 aup->tx_dma_ring[i]->len = 0;
1280 aup->tx_db_inuse[i] = pDB;
1281 }
1282
1283 dev->base_addr = base->start;
1284 dev->irq = irq;
1285 dev->netdev_ops = &au1000_netdev_ops;
1286 dev->ethtool_ops = &au1000_ethtool_ops;
1287 dev->watchdog_timeo = ETH_TX_TIMEOUT;
1288
1289
1290
1291
1292
1293 au1000_reset_mac(dev);
1294
1295 err = register_netdev(dev);
1296 if (err) {
1297 netdev_err(dev, "Cannot register net device, aborting.\n");
1298 goto err_out;
1299 }
1300
1301 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1302 (unsigned long)base->start, irq);
1303
1304 pr_info_once("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1305
1306 return 0;
1307
1308 err_out:
1309 if (aup->mii_bus != NULL)
1310 mdiobus_unregister(aup->mii_bus);
1311
1312
1313
1314
1315 au1000_reset_mac(dev);
1316
1317 for (i = 0; i < NUM_RX_DMA; i++) {
1318 if (aup->rx_db_inuse[i])
1319 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1320 }
1321 for (i = 0; i < NUM_TX_DMA; i++) {
1322 if (aup->tx_db_inuse[i])
1323 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1324 }
1325 err_mdiobus_reg:
1326 mdiobus_free(aup->mii_bus);
1327 err_mdiobus_alloc:
1328 iounmap(aup->macdma);
1329 err_remap3:
1330 iounmap(aup->enable);
1331 err_remap2:
1332 iounmap(aup->mac);
1333 err_remap1:
1334 dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1335 (void *)aup->vaddr, aup->dma_addr,
1336 DMA_ATTR_NON_CONSISTENT);
1337 err_vaddr:
1338 free_netdev(dev);
1339 err_alloc:
1340 release_mem_region(macdma->start, resource_size(macdma));
1341 err_macdma:
1342 release_mem_region(macen->start, resource_size(macen));
1343 err_request:
1344 release_mem_region(base->start, resource_size(base));
1345 out:
1346 return err;
1347 }
1348
1349 static int au1000_remove(struct platform_device *pdev)
1350 {
1351 struct net_device *dev = platform_get_drvdata(pdev);
1352 struct au1000_private *aup = netdev_priv(dev);
1353 int i;
1354 struct resource *base, *macen;
1355
1356 unregister_netdev(dev);
1357 mdiobus_unregister(aup->mii_bus);
1358 mdiobus_free(aup->mii_bus);
1359
1360 for (i = 0; i < NUM_RX_DMA; i++)
1361 if (aup->rx_db_inuse[i])
1362 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1363
1364 for (i = 0; i < NUM_TX_DMA; i++)
1365 if (aup->tx_db_inuse[i])
1366 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1367
1368 dma_free_attrs(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
1369 (void *)aup->vaddr, aup->dma_addr,
1370 DMA_ATTR_NON_CONSISTENT);
1371
1372 iounmap(aup->macdma);
1373 iounmap(aup->mac);
1374 iounmap(aup->enable);
1375
1376 base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1377 release_mem_region(base->start, resource_size(base));
1378
1379 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1380 release_mem_region(base->start, resource_size(base));
1381
1382 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1383 release_mem_region(macen->start, resource_size(macen));
1384
1385 free_netdev(dev);
1386
1387 return 0;
1388 }
1389
1390 static struct platform_driver au1000_eth_driver = {
1391 .probe = au1000_probe,
1392 .remove = au1000_remove,
1393 .driver = {
1394 .name = "au1000-eth",
1395 },
1396 };
1397
1398 module_platform_driver(au1000_eth_driver);
1399
1400 MODULE_ALIAS("platform:au1000-eth");