This source file includes following definitions.
- eeprom_wait_ready
- eeprom_write
- eeprom_read
- et131x_init_eeprom
- et131x_rx_dma_enable
- et131x_rx_dma_disable
- et131x_tx_dma_enable
- add_10bit
- add_12bit
- et1310_config_mac_regs1
- et1310_config_mac_regs2
- et1310_in_phy_coma
- et1310_setup_device_for_multicast
- et1310_setup_device_for_unicast
- et1310_config_rxmac_regs
- et1310_config_txmac_regs
- et1310_config_macstat_regs
- et131x_phy_mii_read
- et131x_mii_read
- et131x_mii_write
- et1310_phy_read_mii_bit
- et1310_config_flow_control
- et1310_update_macstat_host_counters
- et1310_handle_macstat_interrupt
- et131x_mdio_read
- et131x_mdio_write
- et1310_phy_power_switch
- et131x_xcvr_init
- et131x_configure_global_regs
- et131x_config_rx_dma_regs
- et131x_config_tx_dma_regs
- et131x_adapter_setup
- et131x_soft_reset
- et131x_enable_interrupts
- et131x_disable_interrupts
- et131x_tx_dma_disable
- et131x_enable_txrx
- et131x_disable_txrx
- et131x_init_send
- et1310_enable_phy_coma
- et1310_disable_phy_coma
- bump_free_buff_ring
- et131x_rx_dma_memory_alloc
- et131x_rx_dma_memory_free
- et131x_init_recv
- et131x_set_rx_dma_timer
- nic_return_rfd
- nic_rx_pkts
- et131x_handle_recv_pkts
- et131x_tx_dma_memory_alloc
- et131x_tx_dma_memory_free
- nic_send_packet
- send_packet
- free_send_packet
- et131x_free_busy_send_packets
- et131x_handle_send_pkts
- et131x_get_regs_len
- et131x_get_regs
- et131x_get_drvinfo
- et131x_hwaddr_init
- et131x_pci_init
- et131x_error_timer_handler
- et131x_adapter_memory_free
- et131x_adapter_memory_alloc
- et131x_adjust_link
- et131x_mii_probe
- et131x_adapter_init
- et131x_pci_remove
- et131x_up
- et131x_down
- et131x_suspend
- et131x_resume
- et131x_isr
- et131x_poll
- et131x_stats
- et131x_open
- et131x_close
- et131x_ioctl
- et131x_set_packet_filter
- et131x_multicast
- et131x_tx
- et131x_tx_timeout
- et131x_change_mtu
- et131x_pci_setup
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56 #include <linux/pci.h>
57 #include <linux/module.h>
58 #include <linux/types.h>
59 #include <linux/kernel.h>
60
61 #include <linux/sched.h>
62 #include <linux/ptrace.h>
63 #include <linux/slab.h>
64 #include <linux/ctype.h>
65 #include <linux/string.h>
66 #include <linux/timer.h>
67 #include <linux/interrupt.h>
68 #include <linux/in.h>
69 #include <linux/delay.h>
70 #include <linux/bitops.h>
71 #include <linux/io.h>
72
73 #include <linux/netdevice.h>
74 #include <linux/etherdevice.h>
75 #include <linux/skbuff.h>
76 #include <linux/if_arp.h>
77 #include <linux/ioport.h>
78 #include <linux/crc32.h>
79 #include <linux/random.h>
80 #include <linux/phy.h>
81
82 #include "et131x.h"
83
84 MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>");
85 MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>");
86 MODULE_LICENSE("Dual BSD/GPL");
87 MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver for the ET1310 by Agere Systems");
88
89
90 #define MAX_NUM_REGISTER_POLLS 1000
91 #define MAX_NUM_WRITE_RETRIES 2
92
93
94 #define COUNTER_WRAP_16_BIT 0x10000
95 #define COUNTER_WRAP_12_BIT 0x1000
96
97
98 #define INTERNAL_MEM_SIZE 0x400
99 #define INTERNAL_MEM_RX_OFFSET 0x1FF
100
101
102
103
104
105
106
107
108
109
110 #define INT_MASK_DISABLE 0xffffffff
111
112
113
114
115
116 #define INT_MASK_ENABLE 0xfffebf17
117 #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7
118
119
120
121 #define NIC_MIN_PACKET_SIZE 60
122
123
124 #define NIC_MAX_MCAST_LIST 128
125
126
127 #define ET131X_PACKET_TYPE_DIRECTED 0x0001
128 #define ET131X_PACKET_TYPE_MULTICAST 0x0002
129 #define ET131X_PACKET_TYPE_BROADCAST 0x0004
130 #define ET131X_PACKET_TYPE_PROMISCUOUS 0x0008
131 #define ET131X_PACKET_TYPE_ALL_MULTICAST 0x0010
132
133
134 #define ET131X_TX_TIMEOUT (1 * HZ)
135 #define NIC_SEND_HANG_THRESHOLD 0
136
137
138 #define FMP_ADAPTER_INTERRUPT_IN_USE 0x00000008
139
140
141 #define FMP_ADAPTER_LOWER_POWER 0x00200000
142
143 #define FMP_ADAPTER_NON_RECOVER_ERROR 0x00800000
144 #define FMP_ADAPTER_HARDWARE_ERROR 0x04000000
145
146 #define FMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
147
148
149 #define ET1310_PCI_MAC_ADDRESS 0xA4
150 #define ET1310_PCI_EEPROM_STATUS 0xB2
151 #define ET1310_PCI_ACK_NACK 0xC0
152 #define ET1310_PCI_REPLAY 0xC2
153 #define ET1310_PCI_L0L1LATENCY 0xCF
154
155
156 #define ET131X_PCI_DEVICE_ID_GIG 0xED00
157 #define ET131X_PCI_DEVICE_ID_FAST 0xED01
158
159
160 #define NANO_IN_A_MICRO 1000
161
162 #define PARM_RX_NUM_BUFS_DEF 4
163 #define PARM_RX_TIME_INT_DEF 10
164 #define PARM_RX_MEM_END_DEF 0x2bc
165 #define PARM_TX_TIME_INT_DEF 40
166 #define PARM_TX_NUM_BUFS_DEF 4
167 #define PARM_DMA_CACHE_DEF 0
168
169
170 #define FBR_CHUNKS 32
171 #define MAX_DESC_PER_RING_RX 1024
172
173
174 #define RFD_LOW_WATER_MARK 40
175 #define NIC_DEFAULT_NUM_RFD 1024
176 #define NUM_FBRS 2
177
178 #define MAX_PACKETS_HANDLED 256
179 #define ET131X_MIN_MTU 64
180 #define ET131X_MAX_MTU 9216
181
182 #define ALCATEL_MULTICAST_PKT 0x01000000
183 #define ALCATEL_BROADCAST_PKT 0x02000000
184
185
186 struct fbr_desc {
187 u32 addr_lo;
188 u32 addr_hi;
189 u32 word2;
190 };
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234 struct pkt_stat_desc {
235 u32 word0;
236 u32 word1;
237 };
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264 struct rx_status_block {
265 u32 word0;
266 u32 word1;
267 };
268
269
270
271
272 struct fbr_lookup {
273 void *virt[MAX_DESC_PER_RING_RX];
274 u32 bus_high[MAX_DESC_PER_RING_RX];
275 u32 bus_low[MAX_DESC_PER_RING_RX];
276 void *ring_virtaddr;
277 dma_addr_t ring_physaddr;
278 void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
279 dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
280 u32 local_full;
281 u32 num_entries;
282 dma_addr_t buffsize;
283 };
284
285
286
287
288 struct rx_ring {
289 struct fbr_lookup *fbr[NUM_FBRS];
290 void *ps_ring_virtaddr;
291 dma_addr_t ps_ring_physaddr;
292 u32 local_psr_full;
293 u32 psr_entries;
294
295 struct rx_status_block *rx_status_block;
296 dma_addr_t rx_status_bus;
297
298 struct list_head recv_list;
299 u32 num_ready_recv;
300
301 u32 num_rfd;
302
303 bool unfinished_receives;
304 };
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 #define TXDESC_FLAG_LASTPKT 0x0001
333 #define TXDESC_FLAG_FIRSTPKT 0x0002
334 #define TXDESC_FLAG_INTPROC 0x0004
335
336
337 struct tx_desc {
338 u32 addr_hi;
339 u32 addr_lo;
340 u32 len_vlan;
341 u32 flags;
342 };
343
344
345
346
347
348
349 struct tcb {
350 struct tcb *next;
351 u32 count;
352 u32 stale;
353 struct sk_buff *skb;
354 u32 index;
355 u32 index_start;
356 };
357
358
359 struct tx_ring {
360
361 struct tcb *tcb_ring;
362
363
364 struct tcb *tcb_qhead;
365 struct tcb *tcb_qtail;
366
367
368 struct tcb *send_head;
369 struct tcb *send_tail;
370 int used;
371
372
373 struct tx_desc *tx_desc_ring;
374 dma_addr_t tx_desc_ring_pa;
375
376
377 u32 send_idx;
378
379
380 u32 *tx_status;
381 dma_addr_t tx_status_pa;
382
383
384 int since_irq;
385 };
386
387
388
389
390 #define NUM_DESC_PER_RING_TX 512
391 #define NUM_TCB 64
392
393
394
395
396
397 #define TX_ERROR_PERIOD 1000
398
399 #define LO_MARK_PERCENT_FOR_PSR 15
400 #define LO_MARK_PERCENT_FOR_RX 15
401
402
403 struct rfd {
404 struct list_head list_node;
405 struct sk_buff *skb;
406 u32 len;
407 u16 bufferindex;
408 u8 ringindex;
409 };
410
411
412 #define FLOW_BOTH 0
413 #define FLOW_TXONLY 1
414 #define FLOW_RXONLY 2
415 #define FLOW_NONE 3
416
417
418 struct ce_stats {
419 u32 multicast_pkts_rcvd;
420 u32 rcvd_pkts_dropped;
421
422 u32 tx_underflows;
423 u32 tx_collisions;
424 u32 tx_excessive_collisions;
425 u32 tx_first_collisions;
426 u32 tx_late_collisions;
427 u32 tx_max_pkt_errs;
428 u32 tx_deferred;
429
430 u32 rx_overflows;
431 u32 rx_length_errs;
432 u32 rx_align_errs;
433 u32 rx_crc_errs;
434 u32 rx_code_violations;
435 u32 rx_other_errs;
436
437 u32 interrupt_status;
438 };
439
440
441 struct et131x_adapter {
442 struct net_device *netdev;
443 struct pci_dev *pdev;
444 struct mii_bus *mii_bus;
445 struct napi_struct napi;
446
447
448 u32 flags;
449
450
451 int link;
452
453
454 u8 rom_addr[ETH_ALEN];
455 u8 addr[ETH_ALEN];
456 bool has_eeprom;
457 u8 eeprom_data[2];
458
459 spinlock_t tcb_send_qlock;
460 spinlock_t tcb_ready_qlock;
461 spinlock_t rcv_lock;
462
463
464 u32 packet_filter;
465
466
467 u32 multicast_addr_count;
468 u8 multicast_list[NIC_MAX_MCAST_LIST][ETH_ALEN];
469
470
471 struct address_map __iomem *regs;
472
473
474 u8 wanted_flow;
475 u32 registry_jumbo_packet;
476
477
478 u8 flow;
479
480
481 struct timer_list error_timer;
482
483
484
485
486 u8 boot_coma;
487
488
489 struct tx_ring tx_ring;
490
491
492 struct rx_ring rx_ring;
493
494 struct ce_stats stats;
495 };
496
497 static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
498 {
499 u32 reg;
500 int i;
501
502
503
504
505
506
507 for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) {
508 if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, ®))
509 return -EIO;
510
511
512 if ((reg & 0x3000) == 0x3000) {
513 if (status)
514 *status = reg;
515 return reg & 0xFF;
516 }
517 }
518 return -ETIMEDOUT;
519 }
520
521 static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data)
522 {
523 struct pci_dev *pdev = adapter->pdev;
524 int index = 0;
525 int retries;
526 int err = 0;
527 int writeok = 0;
528 u32 status;
529 u32 val = 0;
530
531
532
533
534
535
536
537
538 err = eeprom_wait_ready(pdev, NULL);
539 if (err < 0)
540 return err;
541
542
543
544
545
546
547 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
548 LBCIF_CONTROL_LBCIF_ENABLE |
549 LBCIF_CONTROL_I2C_WRITE))
550 return -EIO;
551
552
553 for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) {
554 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
555 break;
556
557
558
559 if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data))
560 break;
561
562
563
564
565
566
567
568 err = eeprom_wait_ready(pdev, &status);
569 if (err < 0)
570 return 0;
571
572
573
574
575
576 if ((status & LBCIF_STATUS_GENERAL_ERROR) &&
577 adapter->pdev->revision == 0)
578 break;
579
580
581
582
583
584
585
586
587 if (status & LBCIF_STATUS_ACK_ERROR) {
588
589
590
591
592
593 udelay(10);
594 continue;
595 }
596
597 writeok = 1;
598 break;
599 }
600
601 udelay(10);
602
603 while (1) {
604 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
605 LBCIF_CONTROL_LBCIF_ENABLE))
606 writeok = 0;
607
608
609
610
611 do {
612 pci_write_config_dword(pdev,
613 LBCIF_ADDRESS_REGISTER,
614 addr);
615 do {
616 pci_read_config_dword(pdev,
617 LBCIF_DATA_REGISTER,
618 &val);
619 } while ((val & 0x00010000) == 0);
620 } while (val & 0x00040000);
621
622 if ((val & 0xFF00) != 0xC000 || index == 10000)
623 break;
624 index++;
625 }
626 return writeok ? 0 : -EIO;
627 }
628
629 static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
630 {
631 struct pci_dev *pdev = adapter->pdev;
632 int err;
633 u32 status;
634
635
636
637
638 err = eeprom_wait_ready(pdev, NULL);
639 if (err < 0)
640 return err;
641
642
643
644
645
646 if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER,
647 LBCIF_CONTROL_LBCIF_ENABLE))
648 return -EIO;
649
650
651
652 if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr))
653 return -EIO;
654
655
656
657
658 err = eeprom_wait_ready(pdev, &status);
659 if (err < 0)
660 return err;
661
662
663
664 *pdata = err;
665
666 return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
667 }
668
669 static int et131x_init_eeprom(struct et131x_adapter *adapter)
670 {
671 struct pci_dev *pdev = adapter->pdev;
672 u8 eestatus;
673
674 pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus);
675
676
677
678
679
680
681
682 if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) {
683 dev_err(&pdev->dev,
684 "Could not read PCI config space for EEPROM Status\n");
685 return -EIO;
686 }
687
688
689
690
691 if (eestatus & 0x4C) {
692 int write_failed = 0;
693
694 if (pdev->revision == 0x01) {
695 int i;
696 static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF };
697
698
699
700
701
702 for (i = 0; i < 3; i++)
703 if (eeprom_write(adapter, i, eedata[i]) < 0)
704 write_failed = 1;
705 }
706 if (pdev->revision != 0x01 || write_failed) {
707 dev_err(&pdev->dev,
708 "Fatal EEPROM Status Error - 0x%04x\n",
709 eestatus);
710
711
712
713
714
715
716
717 adapter->has_eeprom = 0;
718 return -EIO;
719 }
720 }
721 adapter->has_eeprom = 1;
722
723
724
725
726 eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]);
727 eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]);
728
729 if (adapter->eeprom_data[0] != 0xcd)
730
731 adapter->eeprom_data[1] = 0x00;
732
733 return 0;
734 }
735
736 static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
737 {
738
739 u32 csr = ET_RXDMA_CSR_FBR1_ENABLE;
740 struct rx_ring *rx_ring = &adapter->rx_ring;
741
742 if (rx_ring->fbr[1]->buffsize == 4096)
743 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO;
744 else if (rx_ring->fbr[1]->buffsize == 8192)
745 csr |= ET_RXDMA_CSR_FBR1_SIZE_HI;
746 else if (rx_ring->fbr[1]->buffsize == 16384)
747 csr |= ET_RXDMA_CSR_FBR1_SIZE_LO | ET_RXDMA_CSR_FBR1_SIZE_HI;
748
749 csr |= ET_RXDMA_CSR_FBR0_ENABLE;
750 if (rx_ring->fbr[0]->buffsize == 256)
751 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO;
752 else if (rx_ring->fbr[0]->buffsize == 512)
753 csr |= ET_RXDMA_CSR_FBR0_SIZE_HI;
754 else if (rx_ring->fbr[0]->buffsize == 1024)
755 csr |= ET_RXDMA_CSR_FBR0_SIZE_LO | ET_RXDMA_CSR_FBR0_SIZE_HI;
756 writel(csr, &adapter->regs->rxdma.csr);
757
758 csr = readl(&adapter->regs->rxdma.csr);
759 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
760 udelay(5);
761 csr = readl(&adapter->regs->rxdma.csr);
762 if (csr & ET_RXDMA_CSR_HALT_STATUS) {
763 dev_err(&adapter->pdev->dev,
764 "RX Dma failed to exit halt state. CSR 0x%08x\n",
765 csr);
766 }
767 }
768 }
769
770 static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
771 {
772 u32 csr;
773
774 writel(ET_RXDMA_CSR_HALT | ET_RXDMA_CSR_FBR1_ENABLE,
775 &adapter->regs->rxdma.csr);
776 csr = readl(&adapter->regs->rxdma.csr);
777 if (!(csr & ET_RXDMA_CSR_HALT_STATUS)) {
778 udelay(5);
779 csr = readl(&adapter->regs->rxdma.csr);
780 if (!(csr & ET_RXDMA_CSR_HALT_STATUS))
781 dev_err(&adapter->pdev->dev,
782 "RX Dma failed to enter halt state. CSR 0x%08x\n",
783 csr);
784 }
785 }
786
787 static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
788 {
789
790
791
792 writel(ET_TXDMA_SNGL_EPKT | (PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT),
793 &adapter->regs->txdma.csr);
794 }
795
796 static inline void add_10bit(u32 *v, int n)
797 {
798 *v = INDEX10(*v + n) | (*v & ET_DMA10_WRAP);
799 }
800
801 static inline void add_12bit(u32 *v, int n)
802 {
803 *v = INDEX12(*v + n) | (*v & ET_DMA12_WRAP);
804 }
805
806 static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
807 {
808 struct mac_regs __iomem *macregs = &adapter->regs->mac;
809 u32 station1;
810 u32 station2;
811 u32 ipg;
812
813
814
815
816 writel(ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
817 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
818 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC,
819 ¯egs->cfg1);
820
821
822 ipg = 0x38005860;
823 ipg |= 0x50 << 8;
824 writel(ipg, ¯egs->ipg);
825
826
827
828 writel(0x00A1F037, ¯egs->hfdp);
829
830
831 writel(0, ¯egs->if_ctrl);
832
833 writel(ET_MAC_MIIMGMT_CLK_RST, ¯egs->mii_mgmt_cfg);
834
835
836
837
838
839
840
841
842 station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) |
843 (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT);
844 station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) |
845 (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) |
846 (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) |
847 adapter->addr[2];
848 writel(station1, ¯egs->station_addr_1);
849 writel(station2, ¯egs->station_addr_2);
850
851
852
853
854
855
856
857
858 writel(adapter->registry_jumbo_packet + 4, ¯egs->max_fm_len);
859
860
861 writel(0, ¯egs->cfg1);
862 }
863
864 static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
865 {
866 int32_t delay = 0;
867 struct mac_regs __iomem *mac = &adapter->regs->mac;
868 struct phy_device *phydev = adapter->netdev->phydev;
869 u32 cfg1;
870 u32 cfg2;
871 u32 ifctrl;
872 u32 ctl;
873
874 ctl = readl(&adapter->regs->txmac.ctl);
875 cfg1 = readl(&mac->cfg1);
876 cfg2 = readl(&mac->cfg2);
877 ifctrl = readl(&mac->if_ctrl);
878
879
880 cfg2 &= ~ET_MAC_CFG2_IFMODE_MASK;
881 if (phydev->speed == SPEED_1000) {
882 cfg2 |= ET_MAC_CFG2_IFMODE_1000;
883 ifctrl &= ~ET_MAC_IFCTRL_PHYMODE;
884 } else {
885 cfg2 |= ET_MAC_CFG2_IFMODE_100;
886 ifctrl |= ET_MAC_IFCTRL_PHYMODE;
887 }
888
889 cfg1 |= ET_MAC_CFG1_RX_ENABLE | ET_MAC_CFG1_TX_ENABLE |
890 ET_MAC_CFG1_TX_FLOW;
891
892 cfg1 &= ~(ET_MAC_CFG1_LOOPBACK | ET_MAC_CFG1_RX_FLOW);
893 if (adapter->flow == FLOW_RXONLY || adapter->flow == FLOW_BOTH)
894 cfg1 |= ET_MAC_CFG1_RX_FLOW;
895 writel(cfg1, &mac->cfg1);
896
897
898
899
900
901 cfg2 |= 0x7 << ET_MAC_CFG2_PREAMBLE_SHIFT;
902 cfg2 |= ET_MAC_CFG2_IFMODE_LEN_CHECK;
903 cfg2 |= ET_MAC_CFG2_IFMODE_PAD_CRC;
904 cfg2 |= ET_MAC_CFG2_IFMODE_CRC_ENABLE;
905 cfg2 &= ~ET_MAC_CFG2_IFMODE_HUGE_FRAME;
906 cfg2 &= ~ET_MAC_CFG2_IFMODE_FULL_DPLX;
907
908 if (phydev->duplex == DUPLEX_FULL)
909 cfg2 |= ET_MAC_CFG2_IFMODE_FULL_DPLX;
910
911 ifctrl &= ~ET_MAC_IFCTRL_GHDMODE;
912 if (phydev->duplex == DUPLEX_HALF)
913 ifctrl |= ET_MAC_IFCTRL_GHDMODE;
914
915 writel(ifctrl, &mac->if_ctrl);
916 writel(cfg2, &mac->cfg2);
917
918 do {
919 udelay(10);
920 delay++;
921 cfg1 = readl(&mac->cfg1);
922 } while ((cfg1 & ET_MAC_CFG1_WAIT) != ET_MAC_CFG1_WAIT && delay < 100);
923
924 if (delay == 100) {
925 dev_warn(&adapter->pdev->dev,
926 "Syncd bits did not respond correctly cfg1 word 0x%08x\n",
927 cfg1);
928 }
929
930 ctl |= ET_TX_CTRL_TXMAC_ENABLE | ET_TX_CTRL_FC_DISABLE;
931 writel(ctl, &adapter->regs->txmac.ctl);
932
933 if (adapter->flags & FMP_ADAPTER_LOWER_POWER) {
934 et131x_rx_dma_enable(adapter);
935 et131x_tx_dma_enable(adapter);
936 }
937 }
938
939 static int et1310_in_phy_coma(struct et131x_adapter *adapter)
940 {
941 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
942
943 return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
944 }
945
946 static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
947 {
948 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
949 u32 hash1 = 0;
950 u32 hash2 = 0;
951 u32 hash3 = 0;
952 u32 hash4 = 0;
953 u32 pm_csr;
954
955
956
957
958
959
960 if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
961 int i;
962
963
964 for (i = 0; i < adapter->multicast_addr_count; i++) {
965 u32 result;
966
967 result = ether_crc(6, adapter->multicast_list[i]);
968
969 result = (result & 0x3F800000) >> 23;
970
971 if (result < 32) {
972 hash1 |= (1 << result);
973 } else if ((31 < result) && (result < 64)) {
974 result -= 32;
975 hash2 |= (1 << result);
976 } else if ((63 < result) && (result < 96)) {
977 result -= 64;
978 hash3 |= (1 << result);
979 } else {
980 result -= 96;
981 hash4 |= (1 << result);
982 }
983 }
984 }
985
986
987 pm_csr = readl(&adapter->regs->global.pm_csr);
988 if (!et1310_in_phy_coma(adapter)) {
989 writel(hash1, &rxmac->multi_hash1);
990 writel(hash2, &rxmac->multi_hash2);
991 writel(hash3, &rxmac->multi_hash3);
992 writel(hash4, &rxmac->multi_hash4);
993 }
994 }
995
996 static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
997 {
998 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
999 u32 uni_pf1;
1000 u32 uni_pf2;
1001 u32 uni_pf3;
1002 u32 pm_csr;
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013 uni_pf3 = (adapter->addr[0] << ET_RX_UNI_PF_ADDR2_1_SHIFT) |
1014 (adapter->addr[1] << ET_RX_UNI_PF_ADDR2_2_SHIFT) |
1015 (adapter->addr[0] << ET_RX_UNI_PF_ADDR1_1_SHIFT) |
1016 adapter->addr[1];
1017
1018 uni_pf2 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR2_3_SHIFT) |
1019 (adapter->addr[3] << ET_RX_UNI_PF_ADDR2_4_SHIFT) |
1020 (adapter->addr[4] << ET_RX_UNI_PF_ADDR2_5_SHIFT) |
1021 adapter->addr[5];
1022
1023 uni_pf1 = (adapter->addr[2] << ET_RX_UNI_PF_ADDR1_3_SHIFT) |
1024 (adapter->addr[3] << ET_RX_UNI_PF_ADDR1_4_SHIFT) |
1025 (adapter->addr[4] << ET_RX_UNI_PF_ADDR1_5_SHIFT) |
1026 adapter->addr[5];
1027
1028 pm_csr = readl(&adapter->regs->global.pm_csr);
1029 if (!et1310_in_phy_coma(adapter)) {
1030 writel(uni_pf1, &rxmac->uni_pf_addr1);
1031 writel(uni_pf2, &rxmac->uni_pf_addr2);
1032 writel(uni_pf3, &rxmac->uni_pf_addr3);
1033 }
1034 }
1035
1036 static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
1037 {
1038 struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
1039 struct phy_device *phydev = adapter->netdev->phydev;
1040 u32 sa_lo;
1041 u32 sa_hi = 0;
1042 u32 pf_ctrl = 0;
1043 u32 __iomem *wolw;
1044
1045
1046 writel(0x8, &rxmac->ctrl);
1047
1048
1049 writel(0, &rxmac->crc0);
1050 writel(0, &rxmac->crc12);
1051 writel(0, &rxmac->crc34);
1052
1053
1054
1055
1056
1057 for (wolw = &rxmac->mask0_word0; wolw <= &rxmac->mask4_word3; wolw++)
1058 writel(0, wolw);
1059
1060
1061 sa_lo = (adapter->addr[2] << ET_RX_WOL_LO_SA3_SHIFT) |
1062 (adapter->addr[3] << ET_RX_WOL_LO_SA4_SHIFT) |
1063 (adapter->addr[4] << ET_RX_WOL_LO_SA5_SHIFT) |
1064 adapter->addr[5];
1065 writel(sa_lo, &rxmac->sa_lo);
1066
1067 sa_hi = (u32)(adapter->addr[0] << ET_RX_WOL_HI_SA1_SHIFT) |
1068 adapter->addr[1];
1069 writel(sa_hi, &rxmac->sa_hi);
1070
1071
1072 writel(0, &rxmac->pf_ctrl);
1073
1074
1075 if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) {
1076 et1310_setup_device_for_unicast(adapter);
1077 pf_ctrl |= ET_RX_PFCTRL_UNICST_FILTER_ENABLE;
1078 } else {
1079 writel(0, &rxmac->uni_pf_addr1);
1080 writel(0, &rxmac->uni_pf_addr2);
1081 writel(0, &rxmac->uni_pf_addr3);
1082 }
1083
1084
1085 if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) {
1086 pf_ctrl |= ET_RX_PFCTRL_MLTCST_FILTER_ENABLE;
1087 et1310_setup_device_for_multicast(adapter);
1088 }
1089
1090
1091 pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << ET_RX_PFCTRL_MIN_PKT_SZ_SHIFT;
1092 pf_ctrl |= ET_RX_PFCTRL_FRAG_FILTER_ENABLE;
1093
1094 if (adapter->registry_jumbo_packet > 8192)
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105 writel(0x41, &rxmac->mcif_ctrl_max_seg);
1106 else
1107 writel(0, &rxmac->mcif_ctrl_max_seg);
1108
1109 writel(0, &rxmac->mcif_water_mark);
1110 writel(0, &rxmac->mif_ctrl);
1111 writel(0, &rxmac->space_avail);
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126 if (phydev && phydev->speed == SPEED_100)
1127 writel(0x30038, &rxmac->mif_ctrl);
1128 else
1129 writel(0x30030, &rxmac->mif_ctrl);
1130
1131
1132
1133
1134
1135
1136
1137 writel(pf_ctrl, &rxmac->pf_ctrl);
1138 writel(ET_RX_CTRL_RXMAC_ENABLE | ET_RX_CTRL_WOL_DISABLE, &rxmac->ctrl);
1139 }
1140
1141 static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
1142 {
1143 struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
1144
1145
1146
1147
1148
1149 if (adapter->flow == FLOW_NONE)
1150 writel(0, &txmac->cf_param);
1151 else
1152 writel(0x40, &txmac->cf_param);
1153 }
1154
1155 static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
1156 {
1157 struct macstat_regs __iomem *macstat = &adapter->regs->macstat;
1158 u32 __iomem *reg;
1159
1160
1161 for (reg = &macstat->txrx_0_64_byte_frames;
1162 reg <= &macstat->carry_reg2; reg++)
1163 writel(0, reg);
1164
1165
1166
1167
1168
1169 writel(0xFFFFBE32, &macstat->carry_reg1_mask);
1170 writel(0xFFFE7E8B, &macstat->carry_reg2_mask);
1171 }
1172
1173 static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
1174 u8 reg, u16 *value)
1175 {
1176 struct mac_regs __iomem *mac = &adapter->regs->mac;
1177 int status = 0;
1178 u32 delay = 0;
1179 u32 mii_addr;
1180 u32 mii_cmd;
1181 u32 mii_indicator;
1182
1183
1184
1185
1186 mii_addr = readl(&mac->mii_mgmt_addr);
1187 mii_cmd = readl(&mac->mii_mgmt_cmd);
1188
1189
1190 writel(0, &mac->mii_mgmt_cmd);
1191
1192
1193 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1194
1195 writel(0x1, &mac->mii_mgmt_cmd);
1196
1197 do {
1198 udelay(50);
1199 delay++;
1200 mii_indicator = readl(&mac->mii_mgmt_indicator);
1201 } while ((mii_indicator & ET_MAC_MGMT_WAIT) && delay < 50);
1202
1203
1204 if (delay == 50) {
1205 dev_warn(&adapter->pdev->dev,
1206 "reg 0x%08x could not be read\n", reg);
1207 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1208 mii_indicator);
1209
1210 status = -EIO;
1211 goto out;
1212 }
1213
1214
1215
1216
1217 *value = readl(&mac->mii_mgmt_stat) & ET_MAC_MIIMGMT_STAT_PHYCRTL_MASK;
1218
1219 out:
1220
1221 writel(0, &mac->mii_mgmt_cmd);
1222
1223
1224
1225
1226 writel(mii_addr, &mac->mii_mgmt_addr);
1227 writel(mii_cmd, &mac->mii_mgmt_cmd);
1228
1229 return status;
1230 }
1231
1232 static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
1233 {
1234 struct phy_device *phydev = adapter->netdev->phydev;
1235
1236 if (!phydev)
1237 return -EIO;
1238
1239 return et131x_phy_mii_read(adapter, phydev->mdio.addr, reg, value);
1240 }
1241
1242 static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg,
1243 u16 value)
1244 {
1245 struct mac_regs __iomem *mac = &adapter->regs->mac;
1246 int status = 0;
1247 u32 delay = 0;
1248 u32 mii_addr;
1249 u32 mii_cmd;
1250 u32 mii_indicator;
1251
1252
1253
1254
1255 mii_addr = readl(&mac->mii_mgmt_addr);
1256 mii_cmd = readl(&mac->mii_mgmt_cmd);
1257
1258
1259 writel(0, &mac->mii_mgmt_cmd);
1260
1261
1262 writel(ET_MAC_MII_ADDR(addr, reg), &mac->mii_mgmt_addr);
1263
1264
1265 writel(value, &mac->mii_mgmt_ctrl);
1266
1267 do {
1268 udelay(50);
1269 delay++;
1270 mii_indicator = readl(&mac->mii_mgmt_indicator);
1271 } while ((mii_indicator & ET_MAC_MGMT_BUSY) && delay < 100);
1272
1273
1274 if (delay == 100) {
1275 u16 tmp;
1276
1277 dev_warn(&adapter->pdev->dev,
1278 "reg 0x%08x could not be written", reg);
1279 dev_warn(&adapter->pdev->dev, "status is 0x%08x\n",
1280 mii_indicator);
1281 dev_warn(&adapter->pdev->dev, "command is 0x%08x\n",
1282 readl(&mac->mii_mgmt_cmd));
1283
1284 et131x_mii_read(adapter, reg, &tmp);
1285
1286 status = -EIO;
1287 }
1288
1289 writel(0, &mac->mii_mgmt_cmd);
1290
1291
1292
1293
1294 writel(mii_addr, &mac->mii_mgmt_addr);
1295 writel(mii_cmd, &mac->mii_mgmt_cmd);
1296
1297 return status;
1298 }
1299
1300 static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
1301 u16 regnum,
1302 u16 bitnum,
1303 u8 *value)
1304 {
1305 u16 reg;
1306 u16 mask = 1 << bitnum;
1307
1308 et131x_mii_read(adapter, regnum, ®);
1309
1310 *value = (reg & mask) >> bitnum;
1311 }
1312
1313 static void et1310_config_flow_control(struct et131x_adapter *adapter)
1314 {
1315 struct phy_device *phydev = adapter->netdev->phydev;
1316
1317 if (phydev->duplex == DUPLEX_HALF) {
1318 adapter->flow = FLOW_NONE;
1319 } else {
1320 char remote_pause, remote_async_pause;
1321
1322 et1310_phy_read_mii_bit(adapter, 5, 10, &remote_pause);
1323 et1310_phy_read_mii_bit(adapter, 5, 11, &remote_async_pause);
1324
1325 if (remote_pause && remote_async_pause) {
1326 adapter->flow = adapter->wanted_flow;
1327 } else if (remote_pause && !remote_async_pause) {
1328 if (adapter->wanted_flow == FLOW_BOTH)
1329 adapter->flow = FLOW_BOTH;
1330 else
1331 adapter->flow = FLOW_NONE;
1332 } else if (!remote_pause && !remote_async_pause) {
1333 adapter->flow = FLOW_NONE;
1334 } else {
1335 if (adapter->wanted_flow == FLOW_BOTH)
1336 adapter->flow = FLOW_RXONLY;
1337 else
1338 adapter->flow = FLOW_NONE;
1339 }
1340 }
1341 }
1342
1343
1344 static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
1345 {
1346 struct ce_stats *stats = &adapter->stats;
1347 struct macstat_regs __iomem *macstat =
1348 &adapter->regs->macstat;
1349
1350 stats->tx_collisions += readl(&macstat->tx_total_collisions);
1351 stats->tx_first_collisions += readl(&macstat->tx_single_collisions);
1352 stats->tx_deferred += readl(&macstat->tx_deferred);
1353 stats->tx_excessive_collisions +=
1354 readl(&macstat->tx_multiple_collisions);
1355 stats->tx_late_collisions += readl(&macstat->tx_late_collisions);
1356 stats->tx_underflows += readl(&macstat->tx_undersize_frames);
1357 stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames);
1358
1359 stats->rx_align_errs += readl(&macstat->rx_align_errs);
1360 stats->rx_crc_errs += readl(&macstat->rx_code_errs);
1361 stats->rcvd_pkts_dropped += readl(&macstat->rx_drops);
1362 stats->rx_overflows += readl(&macstat->rx_oversize_packets);
1363 stats->rx_code_violations += readl(&macstat->rx_fcs_errs);
1364 stats->rx_length_errs += readl(&macstat->rx_frame_len_errs);
1365 stats->rx_other_errs += readl(&macstat->rx_fragment_packets);
1366 }
1367
1368
1369
1370
1371
1372
1373
1374 static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
1375 {
1376 u32 carry_reg1;
1377 u32 carry_reg2;
1378
1379
1380
1381
1382 carry_reg1 = readl(&adapter->regs->macstat.carry_reg1);
1383 carry_reg2 = readl(&adapter->regs->macstat.carry_reg2);
1384
1385 writel(carry_reg1, &adapter->regs->macstat.carry_reg1);
1386 writel(carry_reg2, &adapter->regs->macstat.carry_reg2);
1387
1388
1389
1390
1391
1392
1393
1394 if (carry_reg1 & (1 << 14))
1395 adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT;
1396 if (carry_reg1 & (1 << 8))
1397 adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT;
1398 if (carry_reg1 & (1 << 7))
1399 adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT;
1400 if (carry_reg1 & (1 << 2))
1401 adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT;
1402 if (carry_reg1 & (1 << 6))
1403 adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT;
1404 if (carry_reg1 & (1 << 3))
1405 adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT;
1406 if (carry_reg1 & (1 << 0))
1407 adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT;
1408 if (carry_reg2 & (1 << 16))
1409 adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT;
1410 if (carry_reg2 & (1 << 15))
1411 adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT;
1412 if (carry_reg2 & (1 << 6))
1413 adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT;
1414 if (carry_reg2 & (1 << 8))
1415 adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT;
1416 if (carry_reg2 & (1 << 5))
1417 adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT;
1418 if (carry_reg2 & (1 << 4))
1419 adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT;
1420 if (carry_reg2 & (1 << 2))
1421 adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
1422 }
1423
1424 static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
1425 {
1426 struct net_device *netdev = bus->priv;
1427 struct et131x_adapter *adapter = netdev_priv(netdev);
1428 u16 value;
1429 int ret;
1430
1431 ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value);
1432
1433 if (ret < 0)
1434 return ret;
1435
1436 return value;
1437 }
1438
1439 static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
1440 int reg, u16 value)
1441 {
1442 struct net_device *netdev = bus->priv;
1443 struct et131x_adapter *adapter = netdev_priv(netdev);
1444
1445 return et131x_mii_write(adapter, phy_addr, reg, value);
1446 }
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457 static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
1458 {
1459 u16 data;
1460 struct phy_device *phydev = adapter->netdev->phydev;
1461
1462 et131x_mii_read(adapter, MII_BMCR, &data);
1463 data &= ~BMCR_PDOWN;
1464 if (down)
1465 data |= BMCR_PDOWN;
1466 et131x_mii_write(adapter, phydev->mdio.addr, MII_BMCR, data);
1467 }
1468
1469
1470 static void et131x_xcvr_init(struct et131x_adapter *adapter)
1471 {
1472 u16 lcr2;
1473 struct phy_device *phydev = adapter->netdev->phydev;
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483 if ((adapter->eeprom_data[1] & 0x4) == 0) {
1484 et131x_mii_read(adapter, PHY_LED_2, &lcr2);
1485
1486 lcr2 &= (ET_LED2_LED_100TX | ET_LED2_LED_1000T);
1487 lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT);
1488
1489 if ((adapter->eeprom_data[1] & 0x8) == 0)
1490 lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT);
1491 else
1492 lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT);
1493
1494 et131x_mii_write(adapter, phydev->mdio.addr, PHY_LED_2, lcr2);
1495 }
1496 }
1497
1498
1499 static void et131x_configure_global_regs(struct et131x_adapter *adapter)
1500 {
1501 struct global_regs __iomem *regs = &adapter->regs->global;
1502
1503 writel(0, ®s->rxq_start_addr);
1504 writel(INTERNAL_MEM_SIZE - 1, ®s->txq_end_addr);
1505
1506 if (adapter->registry_jumbo_packet < 2048) {
1507
1508
1509
1510
1511
1512 writel(PARM_RX_MEM_END_DEF, ®s->rxq_end_addr);
1513 writel(PARM_RX_MEM_END_DEF + 1, ®s->txq_start_addr);
1514 } else if (adapter->registry_jumbo_packet < 8192) {
1515
1516 writel(INTERNAL_MEM_RX_OFFSET, ®s->rxq_end_addr);
1517 writel(INTERNAL_MEM_RX_OFFSET + 1, ®s->txq_start_addr);
1518 } else {
1519
1520
1521
1522
1523
1524 writel(0x01b3, ®s->rxq_end_addr);
1525 writel(0x01b4, ®s->txq_start_addr);
1526 }
1527
1528
1529 writel(0, ®s->loopback);
1530
1531 writel(0, ®s->msi_config);
1532
1533
1534
1535
1536 writel(0, ®s->watchdog_timer);
1537 }
1538
1539
1540 static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
1541 {
1542 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
1543 struct rx_ring *rx_local = &adapter->rx_ring;
1544 struct fbr_desc *fbr_entry;
1545 u32 entry;
1546 u32 psr_num_des;
1547 unsigned long flags;
1548 u8 id;
1549
1550 et131x_rx_dma_disable(adapter);
1551
1552
1553 writel(upper_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_hi);
1554 writel(lower_32_bits(rx_local->rx_status_bus), &rx_dma->dma_wb_base_lo);
1555
1556 memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block));
1557
1558
1559 writel(upper_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_hi);
1560 writel(lower_32_bits(rx_local->ps_ring_physaddr), &rx_dma->psr_base_lo);
1561 writel(rx_local->psr_entries - 1, &rx_dma->psr_num_des);
1562 writel(0, &rx_dma->psr_full_offset);
1563
1564 psr_num_des = readl(&rx_dma->psr_num_des) & ET_RXDMA_PSR_NUM_DES_MASK;
1565 writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100,
1566 &rx_dma->psr_min_des);
1567
1568 spin_lock_irqsave(&adapter->rcv_lock, flags);
1569
1570
1571 rx_local->local_psr_full = 0;
1572
1573 for (id = 0; id < NUM_FBRS; id++) {
1574 u32 __iomem *num_des;
1575 u32 __iomem *full_offset;
1576 u32 __iomem *min_des;
1577 u32 __iomem *base_hi;
1578 u32 __iomem *base_lo;
1579 struct fbr_lookup *fbr = rx_local->fbr[id];
1580
1581 if (id == 0) {
1582 num_des = &rx_dma->fbr0_num_des;
1583 full_offset = &rx_dma->fbr0_full_offset;
1584 min_des = &rx_dma->fbr0_min_des;
1585 base_hi = &rx_dma->fbr0_base_hi;
1586 base_lo = &rx_dma->fbr0_base_lo;
1587 } else {
1588 num_des = &rx_dma->fbr1_num_des;
1589 full_offset = &rx_dma->fbr1_full_offset;
1590 min_des = &rx_dma->fbr1_min_des;
1591 base_hi = &rx_dma->fbr1_base_hi;
1592 base_lo = &rx_dma->fbr1_base_lo;
1593 }
1594
1595
1596 fbr_entry = fbr->ring_virtaddr;
1597 for (entry = 0; entry < fbr->num_entries; entry++) {
1598 fbr_entry->addr_hi = fbr->bus_high[entry];
1599 fbr_entry->addr_lo = fbr->bus_low[entry];
1600 fbr_entry->word2 = entry;
1601 fbr_entry++;
1602 }
1603
1604
1605 writel(upper_32_bits(fbr->ring_physaddr), base_hi);
1606 writel(lower_32_bits(fbr->ring_physaddr), base_lo);
1607 writel(fbr->num_entries - 1, num_des);
1608 writel(ET_DMA10_WRAP, full_offset);
1609
1610
1611
1612
1613 fbr->local_full = ET_DMA10_WRAP;
1614 writel(((fbr->num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1,
1615 min_des);
1616 }
1617
1618
1619
1620
1621
1622
1623 writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done);
1624
1625
1626
1627
1628
1629
1630 writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time);
1631
1632 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
1633 }
1634
1635
1636
1637
1638
1639
1640 static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
1641 {
1642 struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
1643 struct tx_ring *tx_ring = &adapter->tx_ring;
1644
1645
1646 writel(upper_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_hi);
1647 writel(lower_32_bits(tx_ring->tx_desc_ring_pa), &txdma->pr_base_lo);
1648
1649
1650 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des);
1651
1652
1653 writel(upper_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_hi);
1654 writel(lower_32_bits(tx_ring->tx_status_pa), &txdma->dma_wb_base_lo);
1655
1656 *tx_ring->tx_status = 0;
1657
1658 writel(0, &txdma->service_request);
1659 tx_ring->send_idx = 0;
1660 }
1661
1662
1663 static void et131x_adapter_setup(struct et131x_adapter *adapter)
1664 {
1665 et131x_configure_global_regs(adapter);
1666 et1310_config_mac_regs1(adapter);
1667
1668
1669
1670 writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl);
1671
1672 et1310_config_rxmac_regs(adapter);
1673 et1310_config_txmac_regs(adapter);
1674
1675 et131x_config_rx_dma_regs(adapter);
1676 et131x_config_tx_dma_regs(adapter);
1677
1678 et1310_config_macstat_regs(adapter);
1679
1680 et1310_phy_power_switch(adapter, 0);
1681 et131x_xcvr_init(adapter);
1682 }
1683
1684
1685 static void et131x_soft_reset(struct et131x_adapter *adapter)
1686 {
1687 u32 reg;
1688
1689
1690 reg = ET_MAC_CFG1_SOFT_RESET | ET_MAC_CFG1_SIM_RESET |
1691 ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1692 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1693 writel(reg, &adapter->regs->mac.cfg1);
1694
1695 reg = ET_RESET_ALL;
1696 writel(reg, &adapter->regs->global.sw_reset);
1697
1698 reg = ET_MAC_CFG1_RESET_RXMC | ET_MAC_CFG1_RESET_TXMC |
1699 ET_MAC_CFG1_RESET_RXFUNC | ET_MAC_CFG1_RESET_TXFUNC;
1700 writel(reg, &adapter->regs->mac.cfg1);
1701 writel(0, &adapter->regs->mac.cfg1);
1702 }
1703
1704 static void et131x_enable_interrupts(struct et131x_adapter *adapter)
1705 {
1706 u32 mask;
1707
1708 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
1709 mask = INT_MASK_ENABLE;
1710 else
1711 mask = INT_MASK_ENABLE_NO_FLOW;
1712
1713 writel(mask, &adapter->regs->global.int_mask);
1714 }
1715
1716 static void et131x_disable_interrupts(struct et131x_adapter *adapter)
1717 {
1718 writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
1719 }
1720
1721 static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
1722 {
1723
1724 writel(ET_TXDMA_CSR_HALT | ET_TXDMA_SNGL_EPKT,
1725 &adapter->regs->txdma.csr);
1726 }
1727
1728 static void et131x_enable_txrx(struct net_device *netdev)
1729 {
1730 struct et131x_adapter *adapter = netdev_priv(netdev);
1731
1732 et131x_rx_dma_enable(adapter);
1733 et131x_tx_dma_enable(adapter);
1734
1735 if (adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE)
1736 et131x_enable_interrupts(adapter);
1737
1738 netif_start_queue(netdev);
1739 }
1740
1741 static void et131x_disable_txrx(struct net_device *netdev)
1742 {
1743 struct et131x_adapter *adapter = netdev_priv(netdev);
1744
1745 netif_stop_queue(netdev);
1746
1747 et131x_rx_dma_disable(adapter);
1748 et131x_tx_dma_disable(adapter);
1749
1750 et131x_disable_interrupts(adapter);
1751 }
1752
1753 static void et131x_init_send(struct et131x_adapter *adapter)
1754 {
1755 int i;
1756 struct tx_ring *tx_ring = &adapter->tx_ring;
1757 struct tcb *tcb = tx_ring->tcb_ring;
1758
1759 tx_ring->tcb_qhead = tcb;
1760
1761 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
1762
1763 for (i = 0; i < NUM_TCB; i++) {
1764 tcb->next = tcb + 1;
1765 tcb++;
1766 }
1767
1768 tcb--;
1769 tx_ring->tcb_qtail = tcb;
1770 tcb->next = NULL;
1771
1772 tx_ring->send_head = NULL;
1773 tx_ring->send_tail = NULL;
1774 }
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786 static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
1787 {
1788 u32 pmcsr = readl(&adapter->regs->global.pm_csr);
1789
1790
1791 adapter->flags |= FMP_ADAPTER_LOWER_POWER;
1792
1793
1794 et131x_disable_txrx(adapter->netdev);
1795
1796
1797 pmcsr &= ~ET_PMCSR_INIT;
1798 writel(pmcsr, &adapter->regs->global.pm_csr);
1799
1800
1801 pmcsr |= ET_PM_PHY_SW_COMA;
1802 writel(pmcsr, &adapter->regs->global.pm_csr);
1803 }
1804
1805 static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
1806 {
1807 u32 pmcsr;
1808
1809 pmcsr = readl(&adapter->regs->global.pm_csr);
1810
1811
1812 pmcsr |= ET_PMCSR_INIT;
1813 pmcsr &= ~ET_PM_PHY_SW_COMA;
1814 writel(pmcsr, &adapter->regs->global.pm_csr);
1815
1816
1817
1818
1819
1820
1821 et131x_init_send(adapter);
1822
1823
1824
1825
1826
1827 et131x_soft_reset(adapter);
1828
1829 et131x_adapter_setup(adapter);
1830
1831
1832 adapter->flags &= ~FMP_ADAPTER_LOWER_POWER;
1833
1834 et131x_enable_txrx(adapter->netdev);
1835 }
1836
1837 static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
1838 {
1839 u32 tmp_free_buff_ring = *free_buff_ring;
1840
1841 tmp_free_buff_ring++;
1842
1843
1844
1845
1846
1847 if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) {
1848 tmp_free_buff_ring &= ~ET_DMA10_MASK;
1849 tmp_free_buff_ring ^= ET_DMA10_WRAP;
1850 }
1851
1852 tmp_free_buff_ring &= (ET_DMA10_MASK | ET_DMA10_WRAP);
1853 *free_buff_ring = tmp_free_buff_ring;
1854 return tmp_free_buff_ring;
1855 }
1856
1857
1858
1859
1860
1861
1862 static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
1863 {
1864 u8 id;
1865 u32 i, j;
1866 u32 bufsize;
1867 u32 psr_size;
1868 u32 fbr_chunksize;
1869 struct rx_ring *rx_ring = &adapter->rx_ring;
1870 struct fbr_lookup *fbr;
1871
1872
1873 rx_ring->fbr[0] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1874 if (rx_ring->fbr[0] == NULL)
1875 return -ENOMEM;
1876 rx_ring->fbr[1] = kzalloc(sizeof(*fbr), GFP_KERNEL);
1877 if (rx_ring->fbr[1] == NULL)
1878 return -ENOMEM;
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897 if (adapter->registry_jumbo_packet < 2048) {
1898 rx_ring->fbr[0]->buffsize = 256;
1899 rx_ring->fbr[0]->num_entries = 512;
1900 rx_ring->fbr[1]->buffsize = 2048;
1901 rx_ring->fbr[1]->num_entries = 512;
1902 } else if (adapter->registry_jumbo_packet < 4096) {
1903 rx_ring->fbr[0]->buffsize = 512;
1904 rx_ring->fbr[0]->num_entries = 1024;
1905 rx_ring->fbr[1]->buffsize = 4096;
1906 rx_ring->fbr[1]->num_entries = 512;
1907 } else {
1908 rx_ring->fbr[0]->buffsize = 1024;
1909 rx_ring->fbr[0]->num_entries = 768;
1910 rx_ring->fbr[1]->buffsize = 16384;
1911 rx_ring->fbr[1]->num_entries = 128;
1912 }
1913
1914 rx_ring->psr_entries = rx_ring->fbr[0]->num_entries +
1915 rx_ring->fbr[1]->num_entries;
1916
1917 for (id = 0; id < NUM_FBRS; id++) {
1918 fbr = rx_ring->fbr[id];
1919
1920 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
1921 fbr->ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1922 bufsize,
1923 &fbr->ring_physaddr,
1924 GFP_KERNEL);
1925 if (!fbr->ring_virtaddr) {
1926 dev_err(&adapter->pdev->dev,
1927 "Cannot alloc memory for Free Buffer Ring %d\n",
1928 id);
1929 return -ENOMEM;
1930 }
1931 }
1932
1933 for (id = 0; id < NUM_FBRS; id++) {
1934 fbr = rx_ring->fbr[id];
1935 fbr_chunksize = (FBR_CHUNKS * fbr->buffsize);
1936
1937 for (i = 0; i < fbr->num_entries / FBR_CHUNKS; i++) {
1938 dma_addr_t fbr_physaddr;
1939
1940 fbr->mem_virtaddrs[i] = dma_alloc_coherent(
1941 &adapter->pdev->dev, fbr_chunksize,
1942 &fbr->mem_physaddrs[i],
1943 GFP_KERNEL);
1944
1945 if (!fbr->mem_virtaddrs[i]) {
1946 dev_err(&adapter->pdev->dev,
1947 "Could not alloc memory\n");
1948 return -ENOMEM;
1949 }
1950
1951
1952 fbr_physaddr = fbr->mem_physaddrs[i];
1953
1954 for (j = 0; j < FBR_CHUNKS; j++) {
1955 u32 k = (i * FBR_CHUNKS) + j;
1956
1957
1958
1959
1960 fbr->virt[k] = (u8 *)fbr->mem_virtaddrs[i] +
1961 (j * fbr->buffsize);
1962
1963
1964
1965
1966 fbr->bus_high[k] = upper_32_bits(fbr_physaddr);
1967 fbr->bus_low[k] = lower_32_bits(fbr_physaddr);
1968 fbr_physaddr += fbr->buffsize;
1969 }
1970 }
1971 }
1972
1973
1974 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
1975
1976 rx_ring->ps_ring_virtaddr = dma_alloc_coherent(&adapter->pdev->dev,
1977 psr_size,
1978 &rx_ring->ps_ring_physaddr,
1979 GFP_KERNEL);
1980
1981 if (!rx_ring->ps_ring_virtaddr) {
1982 dev_err(&adapter->pdev->dev,
1983 "Cannot alloc memory for Packet Status Ring\n");
1984 return -ENOMEM;
1985 }
1986
1987
1988 rx_ring->rx_status_block = dma_alloc_coherent(&adapter->pdev->dev,
1989 sizeof(struct rx_status_block),
1990 &rx_ring->rx_status_bus,
1991 GFP_KERNEL);
1992 if (!rx_ring->rx_status_block) {
1993 dev_err(&adapter->pdev->dev,
1994 "Cannot alloc memory for Status Block\n");
1995 return -ENOMEM;
1996 }
1997 rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD;
1998
1999
2000
2001
2002 INIT_LIST_HEAD(&rx_ring->recv_list);
2003 return 0;
2004 }
2005
2006 static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
2007 {
2008 u8 id;
2009 u32 ii;
2010 u32 bufsize;
2011 u32 psr_size;
2012 struct rfd *rfd;
2013 struct rx_ring *rx_ring = &adapter->rx_ring;
2014 struct fbr_lookup *fbr;
2015
2016
2017 WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd);
2018
2019 while (!list_empty(&rx_ring->recv_list)) {
2020 rfd = list_entry(rx_ring->recv_list.next,
2021 struct rfd, list_node);
2022
2023 list_del(&rfd->list_node);
2024 rfd->skb = NULL;
2025 kfree(rfd);
2026 }
2027
2028
2029 for (id = 0; id < NUM_FBRS; id++) {
2030 fbr = rx_ring->fbr[id];
2031
2032 if (!fbr || !fbr->ring_virtaddr)
2033 continue;
2034
2035
2036 for (ii = 0; ii < fbr->num_entries / FBR_CHUNKS; ii++) {
2037 if (fbr->mem_virtaddrs[ii]) {
2038 bufsize = fbr->buffsize * FBR_CHUNKS;
2039
2040 dma_free_coherent(&adapter->pdev->dev,
2041 bufsize,
2042 fbr->mem_virtaddrs[ii],
2043 fbr->mem_physaddrs[ii]);
2044
2045 fbr->mem_virtaddrs[ii] = NULL;
2046 }
2047 }
2048
2049 bufsize = sizeof(struct fbr_desc) * fbr->num_entries;
2050
2051 dma_free_coherent(&adapter->pdev->dev,
2052 bufsize,
2053 fbr->ring_virtaddr,
2054 fbr->ring_physaddr);
2055
2056 fbr->ring_virtaddr = NULL;
2057 }
2058
2059
2060 if (rx_ring->ps_ring_virtaddr) {
2061 psr_size = sizeof(struct pkt_stat_desc) * rx_ring->psr_entries;
2062
2063 dma_free_coherent(&adapter->pdev->dev, psr_size,
2064 rx_ring->ps_ring_virtaddr,
2065 rx_ring->ps_ring_physaddr);
2066
2067 rx_ring->ps_ring_virtaddr = NULL;
2068 }
2069
2070
2071 if (rx_ring->rx_status_block) {
2072 dma_free_coherent(&adapter->pdev->dev,
2073 sizeof(struct rx_status_block),
2074 rx_ring->rx_status_block,
2075 rx_ring->rx_status_bus);
2076 rx_ring->rx_status_block = NULL;
2077 }
2078
2079
2080 kfree(rx_ring->fbr[0]);
2081 kfree(rx_ring->fbr[1]);
2082
2083
2084 rx_ring->num_ready_recv = 0;
2085 }
2086
2087
2088 static int et131x_init_recv(struct et131x_adapter *adapter)
2089 {
2090 struct rfd *rfd;
2091 u32 rfdct;
2092 struct rx_ring *rx_ring = &adapter->rx_ring;
2093
2094
2095 for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) {
2096 rfd = kzalloc(sizeof(*rfd), GFP_ATOMIC | GFP_DMA);
2097 if (!rfd)
2098 return -ENOMEM;
2099
2100 rfd->skb = NULL;
2101
2102
2103 list_add_tail(&rfd->list_node, &rx_ring->recv_list);
2104
2105
2106 rx_ring->num_ready_recv++;
2107 }
2108
2109 return 0;
2110 }
2111
2112
2113 static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
2114 {
2115 struct phy_device *phydev = adapter->netdev->phydev;
2116
2117
2118
2119
2120 if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) {
2121 writel(0, &adapter->regs->rxdma.max_pkt_time);
2122 writel(1, &adapter->regs->rxdma.num_pkt_done);
2123 }
2124 }
2125
2126
2127 static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
2128 {
2129 struct rx_ring *rx_local = &adapter->rx_ring;
2130 struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
2131 u16 buff_index = rfd->bufferindex;
2132 u8 ring_index = rfd->ringindex;
2133 unsigned long flags;
2134 struct fbr_lookup *fbr = rx_local->fbr[ring_index];
2135
2136
2137
2138
2139 if (buff_index < fbr->num_entries) {
2140 u32 free_buff_ring;
2141 u32 __iomem *offset;
2142 struct fbr_desc *next;
2143
2144 if (ring_index == 0)
2145 offset = &rx_dma->fbr0_full_offset;
2146 else
2147 offset = &rx_dma->fbr1_full_offset;
2148
2149 next = (struct fbr_desc *)(fbr->ring_virtaddr) +
2150 INDEX10(fbr->local_full);
2151
2152
2153
2154
2155
2156 next->addr_hi = fbr->bus_high[buff_index];
2157 next->addr_lo = fbr->bus_low[buff_index];
2158 next->word2 = buff_index;
2159
2160 free_buff_ring = bump_free_buff_ring(&fbr->local_full,
2161 fbr->num_entries - 1);
2162 writel(free_buff_ring, offset);
2163 } else {
2164 dev_err(&adapter->pdev->dev,
2165 "%s illegal Buffer Index returned\n", __func__);
2166 }
2167
2168
2169
2170
2171 spin_lock_irqsave(&adapter->rcv_lock, flags);
2172 list_add_tail(&rfd->list_node, &rx_local->recv_list);
2173 rx_local->num_ready_recv++;
2174 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2175
2176 WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
2177 }
2178
2179
2180
2181
2182
2183
2184
2185
2186 static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
2187 {
2188 struct rx_ring *rx_local = &adapter->rx_ring;
2189 struct rx_status_block *status;
2190 struct pkt_stat_desc *psr;
2191 struct rfd *rfd;
2192 unsigned long flags;
2193 struct list_head *element;
2194 u8 ring_index;
2195 u16 buff_index;
2196 u32 len;
2197 u32 word0;
2198 u32 word1;
2199 struct sk_buff *skb;
2200 struct fbr_lookup *fbr;
2201
2202
2203
2204
2205
2206 status = rx_local->rx_status_block;
2207 word1 = status->word1 >> 16;
2208
2209
2210 if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF))
2211 return NULL;
2212
2213
2214 psr = (struct pkt_stat_desc *)(rx_local->ps_ring_virtaddr) +
2215 (rx_local->local_psr_full & 0xFFF);
2216
2217
2218
2219
2220 len = psr->word1 & 0xFFFF;
2221 ring_index = (psr->word1 >> 26) & 0x03;
2222 fbr = rx_local->fbr[ring_index];
2223 buff_index = (psr->word1 >> 16) & 0x3FF;
2224 word0 = psr->word0;
2225
2226
2227
2228 add_12bit(&rx_local->local_psr_full, 1);
2229 if ((rx_local->local_psr_full & 0xFFF) > rx_local->psr_entries - 1) {
2230
2231 rx_local->local_psr_full &= ~0xFFF;
2232 rx_local->local_psr_full ^= 0x1000;
2233 }
2234
2235 writel(rx_local->local_psr_full, &adapter->regs->rxdma.psr_full_offset);
2236
2237 if (ring_index > 1 || buff_index > fbr->num_entries - 1) {
2238
2239 dev_err(&adapter->pdev->dev,
2240 "NICRxPkts PSR Entry %d indicates length of %d and/or bad bi(%d)\n",
2241 rx_local->local_psr_full & 0xFFF, len, buff_index);
2242 return NULL;
2243 }
2244
2245
2246 spin_lock_irqsave(&adapter->rcv_lock, flags);
2247
2248 element = rx_local->recv_list.next;
2249 rfd = list_entry(element, struct rfd, list_node);
2250
2251 if (!rfd) {
2252 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2253 return NULL;
2254 }
2255
2256 list_del(&rfd->list_node);
2257 rx_local->num_ready_recv--;
2258
2259 spin_unlock_irqrestore(&adapter->rcv_lock, flags);
2260
2261 rfd->bufferindex = buff_index;
2262 rfd->ringindex = ring_index;
2263
2264
2265
2266
2267
2268 if (len < (NIC_MIN_PACKET_SIZE + 4)) {
2269 adapter->stats.rx_other_errs++;
2270 rfd->len = 0;
2271 goto out;
2272 }
2273
2274 if ((word0 & ALCATEL_MULTICAST_PKT) && !(word0 & ALCATEL_BROADCAST_PKT))
2275 adapter->stats.multicast_pkts_rcvd++;
2276
2277 rfd->len = len;
2278
2279 skb = dev_alloc_skb(rfd->len + 2);
2280 if (!skb)
2281 return NULL;
2282
2283 adapter->netdev->stats.rx_bytes += rfd->len;
2284
2285 skb_put_data(skb, fbr->virt[buff_index], rfd->len);
2286
2287 skb->protocol = eth_type_trans(skb, adapter->netdev);
2288 skb->ip_summed = CHECKSUM_NONE;
2289 netif_receive_skb(skb);
2290
2291 out:
2292 nic_return_rfd(adapter, rfd);
2293 return rfd;
2294 }
2295
2296 static int et131x_handle_recv_pkts(struct et131x_adapter *adapter, int budget)
2297 {
2298 struct rfd *rfd = NULL;
2299 int count = 0;
2300 int limit = budget;
2301 bool done = true;
2302 struct rx_ring *rx_ring = &adapter->rx_ring;
2303
2304 if (budget > MAX_PACKETS_HANDLED)
2305 limit = MAX_PACKETS_HANDLED;
2306
2307
2308 while (count < limit) {
2309 if (list_empty(&rx_ring->recv_list)) {
2310 WARN_ON(rx_ring->num_ready_recv != 0);
2311 done = false;
2312 break;
2313 }
2314
2315 rfd = nic_rx_pkts(adapter);
2316
2317 if (rfd == NULL)
2318 break;
2319
2320
2321
2322
2323
2324
2325 if (!adapter->packet_filter ||
2326 !netif_carrier_ok(adapter->netdev) ||
2327 rfd->len == 0)
2328 continue;
2329
2330 adapter->netdev->stats.rx_packets++;
2331
2332 if (rx_ring->num_ready_recv < RFD_LOW_WATER_MARK)
2333 dev_warn(&adapter->pdev->dev, "RFD's are running out\n");
2334
2335 count++;
2336 }
2337
2338 if (count == limit || !done) {
2339 rx_ring->unfinished_receives = true;
2340 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2341 &adapter->regs->global.watchdog_timer);
2342 } else {
2343
2344 rx_ring->unfinished_receives = false;
2345 }
2346
2347 return count;
2348 }
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358 static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2359 {
2360 int desc_size = 0;
2361 struct tx_ring *tx_ring = &adapter->tx_ring;
2362
2363
2364 tx_ring->tcb_ring = kcalloc(NUM_TCB, sizeof(struct tcb),
2365 GFP_KERNEL | GFP_DMA);
2366 if (!tx_ring->tcb_ring)
2367 return -ENOMEM;
2368
2369 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2370 tx_ring->tx_desc_ring = dma_alloc_coherent(&adapter->pdev->dev,
2371 desc_size,
2372 &tx_ring->tx_desc_ring_pa,
2373 GFP_KERNEL);
2374 if (!tx_ring->tx_desc_ring) {
2375 dev_err(&adapter->pdev->dev,
2376 "Cannot alloc memory for Tx Ring\n");
2377 return -ENOMEM;
2378 }
2379
2380 tx_ring->tx_status = dma_alloc_coherent(&adapter->pdev->dev,
2381 sizeof(u32),
2382 &tx_ring->tx_status_pa,
2383 GFP_KERNEL);
2384 if (!tx_ring->tx_status) {
2385 dev_err(&adapter->pdev->dev,
2386 "Cannot alloc memory for Tx status block\n");
2387 return -ENOMEM;
2388 }
2389 return 0;
2390 }
2391
2392 static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
2393 {
2394 int desc_size = 0;
2395 struct tx_ring *tx_ring = &adapter->tx_ring;
2396
2397 if (tx_ring->tx_desc_ring) {
2398
2399 desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX);
2400 dma_free_coherent(&adapter->pdev->dev,
2401 desc_size,
2402 tx_ring->tx_desc_ring,
2403 tx_ring->tx_desc_ring_pa);
2404 tx_ring->tx_desc_ring = NULL;
2405 }
2406
2407
2408 if (tx_ring->tx_status) {
2409 dma_free_coherent(&adapter->pdev->dev,
2410 sizeof(u32),
2411 tx_ring->tx_status,
2412 tx_ring->tx_status_pa);
2413
2414 tx_ring->tx_status = NULL;
2415 }
2416
2417 kfree(tx_ring->tcb_ring);
2418 }
2419
2420
2421 static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
2422 {
2423 u32 i;
2424 struct tx_desc desc[24];
2425 u32 frag = 0;
2426 u32 thiscopy, remainder;
2427 struct sk_buff *skb = tcb->skb;
2428 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
2429 skb_frag_t *frags = &skb_shinfo(skb)->frags[0];
2430 struct phy_device *phydev = adapter->netdev->phydev;
2431 dma_addr_t dma_addr;
2432 struct tx_ring *tx_ring = &adapter->tx_ring;
2433
2434
2435
2436
2437
2438
2439
2440 BUILD_BUG_ON(MAX_SKB_FRAGS + 1 > 23);
2441
2442 memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1));
2443
2444 for (i = 0; i < nr_frags; i++) {
2445
2446
2447
2448 if (i == 0) {
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458 if (skb_headlen(skb) <= 1514) {
2459
2460
2461
2462 desc[frag].len_vlan = skb_headlen(skb);
2463 dma_addr = dma_map_single(&adapter->pdev->dev,
2464 skb->data,
2465 skb_headlen(skb),
2466 DMA_TO_DEVICE);
2467 desc[frag].addr_lo = lower_32_bits(dma_addr);
2468 desc[frag].addr_hi = upper_32_bits(dma_addr);
2469 frag++;
2470 } else {
2471 desc[frag].len_vlan = skb_headlen(skb) / 2;
2472 dma_addr = dma_map_single(&adapter->pdev->dev,
2473 skb->data,
2474 skb_headlen(skb) / 2,
2475 DMA_TO_DEVICE);
2476 desc[frag].addr_lo = lower_32_bits(dma_addr);
2477 desc[frag].addr_hi = upper_32_bits(dma_addr);
2478 frag++;
2479
2480 desc[frag].len_vlan = skb_headlen(skb) / 2;
2481 dma_addr = dma_map_single(&adapter->pdev->dev,
2482 skb->data +
2483 skb_headlen(skb) / 2,
2484 skb_headlen(skb) / 2,
2485 DMA_TO_DEVICE);
2486 desc[frag].addr_lo = lower_32_bits(dma_addr);
2487 desc[frag].addr_hi = upper_32_bits(dma_addr);
2488 frag++;
2489 }
2490 } else {
2491 desc[frag].len_vlan = skb_frag_size(&frags[i - 1]);
2492 dma_addr = skb_frag_dma_map(&adapter->pdev->dev,
2493 &frags[i - 1],
2494 0,
2495 desc[frag].len_vlan,
2496 DMA_TO_DEVICE);
2497 desc[frag].addr_lo = lower_32_bits(dma_addr);
2498 desc[frag].addr_hi = upper_32_bits(dma_addr);
2499 frag++;
2500 }
2501 }
2502
2503 if (phydev && phydev->speed == SPEED_1000) {
2504 if (++tx_ring->since_irq == PARM_TX_NUM_BUFS_DEF) {
2505
2506 desc[frag - 1].flags =
2507 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2508 tx_ring->since_irq = 0;
2509 } else {
2510 desc[frag - 1].flags = TXDESC_FLAG_LASTPKT;
2511 }
2512 } else {
2513 desc[frag - 1].flags =
2514 TXDESC_FLAG_INTPROC | TXDESC_FLAG_LASTPKT;
2515 }
2516
2517 desc[0].flags |= TXDESC_FLAG_FIRSTPKT;
2518
2519 tcb->index_start = tx_ring->send_idx;
2520 tcb->stale = 0;
2521
2522 thiscopy = NUM_DESC_PER_RING_TX - INDEX10(tx_ring->send_idx);
2523
2524 if (thiscopy >= frag) {
2525 remainder = 0;
2526 thiscopy = frag;
2527 } else {
2528 remainder = frag - thiscopy;
2529 }
2530
2531 memcpy(tx_ring->tx_desc_ring + INDEX10(tx_ring->send_idx),
2532 desc,
2533 sizeof(struct tx_desc) * thiscopy);
2534
2535 add_10bit(&tx_ring->send_idx, thiscopy);
2536
2537 if (INDEX10(tx_ring->send_idx) == 0 ||
2538 INDEX10(tx_ring->send_idx) == NUM_DESC_PER_RING_TX) {
2539 tx_ring->send_idx &= ~ET_DMA10_MASK;
2540 tx_ring->send_idx ^= ET_DMA10_WRAP;
2541 }
2542
2543 if (remainder) {
2544 memcpy(tx_ring->tx_desc_ring,
2545 desc + thiscopy,
2546 sizeof(struct tx_desc) * remainder);
2547
2548 add_10bit(&tx_ring->send_idx, remainder);
2549 }
2550
2551 if (INDEX10(tx_ring->send_idx) == 0) {
2552 if (tx_ring->send_idx)
2553 tcb->index = NUM_DESC_PER_RING_TX - 1;
2554 else
2555 tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1);
2556 } else {
2557 tcb->index = tx_ring->send_idx - 1;
2558 }
2559
2560 spin_lock(&adapter->tcb_send_qlock);
2561
2562 if (tx_ring->send_tail)
2563 tx_ring->send_tail->next = tcb;
2564 else
2565 tx_ring->send_head = tcb;
2566
2567 tx_ring->send_tail = tcb;
2568
2569 WARN_ON(tcb->next != NULL);
2570
2571 tx_ring->used++;
2572
2573 spin_unlock(&adapter->tcb_send_qlock);
2574
2575
2576 writel(tx_ring->send_idx, &adapter->regs->txdma.service_request);
2577
2578
2579
2580
2581 if (phydev && phydev->speed == SPEED_1000) {
2582 writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO,
2583 &adapter->regs->global.watchdog_timer);
2584 }
2585 return 0;
2586 }
2587
2588 static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
2589 {
2590 int status;
2591 struct tcb *tcb;
2592 unsigned long flags;
2593 struct tx_ring *tx_ring = &adapter->tx_ring;
2594
2595
2596 if (skb->len < ETH_HLEN)
2597 return -EIO;
2598
2599 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2600
2601 tcb = tx_ring->tcb_qhead;
2602
2603 if (tcb == NULL) {
2604 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2605 return -ENOMEM;
2606 }
2607
2608 tx_ring->tcb_qhead = tcb->next;
2609
2610 if (tx_ring->tcb_qhead == NULL)
2611 tx_ring->tcb_qtail = NULL;
2612
2613 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2614
2615 tcb->skb = skb;
2616 tcb->next = NULL;
2617
2618 status = nic_send_packet(adapter, tcb);
2619
2620 if (status != 0) {
2621 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2622
2623 if (tx_ring->tcb_qtail)
2624 tx_ring->tcb_qtail->next = tcb;
2625 else
2626
2627 tx_ring->tcb_qhead = tcb;
2628
2629 tx_ring->tcb_qtail = tcb;
2630 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2631 return status;
2632 }
2633 WARN_ON(tx_ring->used > NUM_TCB);
2634 return 0;
2635 }
2636
2637
2638 static inline void free_send_packet(struct et131x_adapter *adapter,
2639 struct tcb *tcb)
2640 {
2641 unsigned long flags;
2642 struct tx_desc *desc = NULL;
2643 struct net_device_stats *stats = &adapter->netdev->stats;
2644 struct tx_ring *tx_ring = &adapter->tx_ring;
2645 u64 dma_addr;
2646
2647 if (tcb->skb) {
2648 stats->tx_bytes += tcb->skb->len;
2649
2650
2651
2652
2653
2654 do {
2655 desc = tx_ring->tx_desc_ring +
2656 INDEX10(tcb->index_start);
2657
2658 dma_addr = desc->addr_lo;
2659 dma_addr |= (u64)desc->addr_hi << 32;
2660
2661 dma_unmap_single(&adapter->pdev->dev,
2662 dma_addr,
2663 desc->len_vlan, DMA_TO_DEVICE);
2664
2665 add_10bit(&tcb->index_start, 1);
2666 if (INDEX10(tcb->index_start) >=
2667 NUM_DESC_PER_RING_TX) {
2668 tcb->index_start &= ~ET_DMA10_MASK;
2669 tcb->index_start ^= ET_DMA10_WRAP;
2670 }
2671 } while (desc != tx_ring->tx_desc_ring + INDEX10(tcb->index));
2672
2673 dev_kfree_skb_any(tcb->skb);
2674 }
2675
2676 memset(tcb, 0, sizeof(struct tcb));
2677
2678
2679 spin_lock_irqsave(&adapter->tcb_ready_qlock, flags);
2680
2681 stats->tx_packets++;
2682
2683 if (tx_ring->tcb_qtail)
2684 tx_ring->tcb_qtail->next = tcb;
2685 else
2686 tx_ring->tcb_qhead = tcb;
2687
2688 tx_ring->tcb_qtail = tcb;
2689
2690 spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags);
2691 WARN_ON(tx_ring->used < 0);
2692 }
2693
2694
2695 static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
2696 {
2697 struct tcb *tcb;
2698 unsigned long flags;
2699 u32 freed = 0;
2700 struct tx_ring *tx_ring = &adapter->tx_ring;
2701
2702
2703 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2704
2705 tcb = tx_ring->send_head;
2706
2707 while (tcb != NULL && freed < NUM_TCB) {
2708 struct tcb *next = tcb->next;
2709
2710 tx_ring->send_head = next;
2711
2712 if (next == NULL)
2713 tx_ring->send_tail = NULL;
2714
2715 tx_ring->used--;
2716
2717 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2718
2719 freed++;
2720 free_send_packet(adapter, tcb);
2721
2722 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2723
2724 tcb = tx_ring->send_head;
2725 }
2726
2727 WARN_ON(freed == NUM_TCB);
2728
2729 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2730
2731 tx_ring->used = 0;
2732 }
2733
2734
2735
2736
2737
2738
2739 static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
2740 {
2741 unsigned long flags;
2742 u32 serviced;
2743 struct tcb *tcb;
2744 u32 index;
2745 struct tx_ring *tx_ring = &adapter->tx_ring;
2746
2747 serviced = readl(&adapter->regs->txdma.new_service_complete);
2748 index = INDEX10(serviced);
2749
2750
2751
2752
2753 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2754
2755 tcb = tx_ring->send_head;
2756
2757 while (tcb &&
2758 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2759 index < INDEX10(tcb->index)) {
2760 tx_ring->used--;
2761 tx_ring->send_head = tcb->next;
2762 if (tcb->next == NULL)
2763 tx_ring->send_tail = NULL;
2764
2765 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2766 free_send_packet(adapter, tcb);
2767 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2768
2769
2770 tcb = tx_ring->send_head;
2771 }
2772 while (tcb &&
2773 !((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
2774 index > (tcb->index & ET_DMA10_MASK)) {
2775 tx_ring->used--;
2776 tx_ring->send_head = tcb->next;
2777 if (tcb->next == NULL)
2778 tx_ring->send_tail = NULL;
2779
2780 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2781 free_send_packet(adapter, tcb);
2782 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
2783
2784
2785 tcb = tx_ring->send_head;
2786 }
2787
2788
2789 if (tx_ring->used <= NUM_TCB / 3)
2790 netif_wake_queue(adapter->netdev);
2791
2792 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
2793 }
2794
2795 static int et131x_get_regs_len(struct net_device *netdev)
2796 {
2797 #define ET131X_REGS_LEN 256
2798 return ET131X_REGS_LEN * sizeof(u32);
2799 }
2800
2801 static void et131x_get_regs(struct net_device *netdev,
2802 struct ethtool_regs *regs, void *regs_data)
2803 {
2804 struct et131x_adapter *adapter = netdev_priv(netdev);
2805 struct address_map __iomem *aregs = adapter->regs;
2806 u32 *regs_buff = regs_data;
2807 u32 num = 0;
2808 u16 tmp;
2809
2810 memset(regs_data, 0, et131x_get_regs_len(netdev));
2811
2812 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
2813 adapter->pdev->device;
2814
2815
2816 et131x_mii_read(adapter, MII_BMCR, &tmp);
2817 regs_buff[num++] = tmp;
2818 et131x_mii_read(adapter, MII_BMSR, &tmp);
2819 regs_buff[num++] = tmp;
2820 et131x_mii_read(adapter, MII_PHYSID1, &tmp);
2821 regs_buff[num++] = tmp;
2822 et131x_mii_read(adapter, MII_PHYSID2, &tmp);
2823 regs_buff[num++] = tmp;
2824 et131x_mii_read(adapter, MII_ADVERTISE, &tmp);
2825 regs_buff[num++] = tmp;
2826 et131x_mii_read(adapter, MII_LPA, &tmp);
2827 regs_buff[num++] = tmp;
2828 et131x_mii_read(adapter, MII_EXPANSION, &tmp);
2829 regs_buff[num++] = tmp;
2830
2831 et131x_mii_read(adapter, 0x07, &tmp);
2832 regs_buff[num++] = tmp;
2833
2834 et131x_mii_read(adapter, 0x08, &tmp);
2835 regs_buff[num++] = tmp;
2836 et131x_mii_read(adapter, MII_CTRL1000, &tmp);
2837 regs_buff[num++] = tmp;
2838 et131x_mii_read(adapter, MII_STAT1000, &tmp);
2839 regs_buff[num++] = tmp;
2840 et131x_mii_read(adapter, 0x0b, &tmp);
2841 regs_buff[num++] = tmp;
2842 et131x_mii_read(adapter, 0x0c, &tmp);
2843 regs_buff[num++] = tmp;
2844 et131x_mii_read(adapter, MII_MMD_CTRL, &tmp);
2845 regs_buff[num++] = tmp;
2846 et131x_mii_read(adapter, MII_MMD_DATA, &tmp);
2847 regs_buff[num++] = tmp;
2848 et131x_mii_read(adapter, MII_ESTATUS, &tmp);
2849 regs_buff[num++] = tmp;
2850
2851 et131x_mii_read(adapter, PHY_INDEX_REG, &tmp);
2852 regs_buff[num++] = tmp;
2853 et131x_mii_read(adapter, PHY_DATA_REG, &tmp);
2854 regs_buff[num++] = tmp;
2855 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, &tmp);
2856 regs_buff[num++] = tmp;
2857 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, &tmp);
2858 regs_buff[num++] = tmp;
2859 et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL + 1, &tmp);
2860 regs_buff[num++] = tmp;
2861
2862 et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, &tmp);
2863 regs_buff[num++] = tmp;
2864 et131x_mii_read(adapter, PHY_CONFIG, &tmp);
2865 regs_buff[num++] = tmp;
2866 et131x_mii_read(adapter, PHY_PHY_CONTROL, &tmp);
2867 regs_buff[num++] = tmp;
2868 et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &tmp);
2869 regs_buff[num++] = tmp;
2870 et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &tmp);
2871 regs_buff[num++] = tmp;
2872 et131x_mii_read(adapter, PHY_PHY_STATUS, &tmp);
2873 regs_buff[num++] = tmp;
2874 et131x_mii_read(adapter, PHY_LED_1, &tmp);
2875 regs_buff[num++] = tmp;
2876 et131x_mii_read(adapter, PHY_LED_2, &tmp);
2877 regs_buff[num++] = tmp;
2878
2879
2880 regs_buff[num++] = readl(&aregs->global.txq_start_addr);
2881 regs_buff[num++] = readl(&aregs->global.txq_end_addr);
2882 regs_buff[num++] = readl(&aregs->global.rxq_start_addr);
2883 regs_buff[num++] = readl(&aregs->global.rxq_end_addr);
2884 regs_buff[num++] = readl(&aregs->global.pm_csr);
2885 regs_buff[num++] = adapter->stats.interrupt_status;
2886 regs_buff[num++] = readl(&aregs->global.int_mask);
2887 regs_buff[num++] = readl(&aregs->global.int_alias_clr_en);
2888 regs_buff[num++] = readl(&aregs->global.int_status_alias);
2889 regs_buff[num++] = readl(&aregs->global.sw_reset);
2890 regs_buff[num++] = readl(&aregs->global.slv_timer);
2891 regs_buff[num++] = readl(&aregs->global.msi_config);
2892 regs_buff[num++] = readl(&aregs->global.loopback);
2893 regs_buff[num++] = readl(&aregs->global.watchdog_timer);
2894
2895
2896 regs_buff[num++] = readl(&aregs->txdma.csr);
2897 regs_buff[num++] = readl(&aregs->txdma.pr_base_hi);
2898 regs_buff[num++] = readl(&aregs->txdma.pr_base_lo);
2899 regs_buff[num++] = readl(&aregs->txdma.pr_num_des);
2900 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr);
2901 regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext);
2902 regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr);
2903 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi);
2904 regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo);
2905 regs_buff[num++] = readl(&aregs->txdma.service_request);
2906 regs_buff[num++] = readl(&aregs->txdma.service_complete);
2907 regs_buff[num++] = readl(&aregs->txdma.cache_rd_index);
2908 regs_buff[num++] = readl(&aregs->txdma.cache_wr_index);
2909 regs_buff[num++] = readl(&aregs->txdma.tx_dma_error);
2910 regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt);
2911 regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt);
2912 regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt);
2913 regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt);
2914 regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt);
2915 regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt);
2916 regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt);
2917 regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt);
2918 regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt);
2919 regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt);
2920 regs_buff[num++] = readl(&aregs->txdma.new_service_complete);
2921 regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt);
2922
2923
2924 regs_buff[num++] = readl(&aregs->rxdma.csr);
2925 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi);
2926 regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo);
2927 regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done);
2928 regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time);
2929 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr);
2930 regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext);
2931 regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr);
2932 regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi);
2933 regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo);
2934 regs_buff[num++] = readl(&aregs->rxdma.psr_num_des);
2935 regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset);
2936 regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset);
2937 regs_buff[num++] = readl(&aregs->rxdma.psr_access_index);
2938 regs_buff[num++] = readl(&aregs->rxdma.psr_min_des);
2939 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo);
2940 regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi);
2941 regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des);
2942 regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset);
2943 regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset);
2944 regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index);
2945 regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des);
2946 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo);
2947 regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi);
2948 regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des);
2949 regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset);
2950 regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset);
2951 regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index);
2952 regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des);
2953 }
2954
2955 static void et131x_get_drvinfo(struct net_device *netdev,
2956 struct ethtool_drvinfo *info)
2957 {
2958 struct et131x_adapter *adapter = netdev_priv(netdev);
2959
2960 strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
2961 strlcpy(info->version, DRIVER_VERSION, sizeof(info->version));
2962 strlcpy(info->bus_info, pci_name(adapter->pdev),
2963 sizeof(info->bus_info));
2964 }
2965
2966 static const struct ethtool_ops et131x_ethtool_ops = {
2967 .get_drvinfo = et131x_get_drvinfo,
2968 .get_regs_len = et131x_get_regs_len,
2969 .get_regs = et131x_get_regs,
2970 .get_link = ethtool_op_get_link,
2971 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2972 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2973 };
2974
2975
2976 static void et131x_hwaddr_init(struct et131x_adapter *adapter)
2977 {
2978
2979
2980
2981
2982 if (is_zero_ether_addr(adapter->rom_addr)) {
2983
2984
2985
2986
2987 get_random_bytes(&adapter->addr[5], 1);
2988
2989
2990
2991
2992 ether_addr_copy(adapter->rom_addr, adapter->addr);
2993 } else {
2994
2995
2996
2997
2998 ether_addr_copy(adapter->addr, adapter->rom_addr);
2999 }
3000 }
3001
3002 static int et131x_pci_init(struct et131x_adapter *adapter,
3003 struct pci_dev *pdev)
3004 {
3005 u16 max_payload;
3006 int i, rc;
3007
3008 rc = et131x_init_eeprom(adapter);
3009 if (rc < 0)
3010 goto out;
3011
3012 if (!pci_is_pcie(pdev)) {
3013 dev_err(&pdev->dev, "Missing PCIe capabilities\n");
3014 goto err_out;
3015 }
3016
3017
3018 max_payload = pdev->pcie_mpss;
3019
3020 if (max_payload < 2) {
3021 static const u16 acknak[2] = { 0x76, 0xD0 };
3022 static const u16 replay[2] = { 0x1E0, 0x2ED };
3023
3024 if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK,
3025 acknak[max_payload])) {
3026 dev_err(&pdev->dev,
3027 "Could not write PCI config space for ACK/NAK\n");
3028 goto err_out;
3029 }
3030 if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
3031 replay[max_payload])) {
3032 dev_err(&pdev->dev,
3033 "Could not write PCI config space for Replay Timer\n");
3034 goto err_out;
3035 }
3036 }
3037
3038
3039
3040
3041 if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
3042 dev_err(&pdev->dev,
3043 "Could not write PCI config space for Latency Timers\n");
3044 goto err_out;
3045 }
3046
3047
3048 if (pcie_set_readrq(pdev, 2048)) {
3049 dev_err(&pdev->dev,
3050 "Couldn't change PCI config space for Max read size\n");
3051 goto err_out;
3052 }
3053
3054
3055
3056
3057 if (!adapter->has_eeprom) {
3058 et131x_hwaddr_init(adapter);
3059 return 0;
3060 }
3061
3062 for (i = 0; i < ETH_ALEN; i++) {
3063 if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
3064 adapter->rom_addr + i)) {
3065 dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
3066 goto err_out;
3067 }
3068 }
3069 ether_addr_copy(adapter->addr, adapter->rom_addr);
3070 out:
3071 return rc;
3072 err_out:
3073 rc = -EIO;
3074 goto out;
3075 }
3076
3077
3078
3079
3080
3081
3082
3083 static void et131x_error_timer_handler(struct timer_list *t)
3084 {
3085 struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
3086 struct phy_device *phydev = adapter->netdev->phydev;
3087
3088 if (et1310_in_phy_coma(adapter)) {
3089
3090
3091
3092
3093 et1310_disable_phy_coma(adapter);
3094 adapter->boot_coma = 20;
3095 } else {
3096 et1310_update_macstat_host_counters(adapter);
3097 }
3098
3099 if (!phydev->link && adapter->boot_coma < 11)
3100 adapter->boot_coma++;
3101
3102 if (adapter->boot_coma == 10) {
3103 if (!phydev->link) {
3104 if (!et1310_in_phy_coma(adapter)) {
3105
3106
3107
3108 et131x_enable_interrupts(adapter);
3109 et1310_enable_phy_coma(adapter);
3110 }
3111 }
3112 }
3113
3114
3115 mod_timer(&adapter->error_timer, jiffies +
3116 msecs_to_jiffies(TX_ERROR_PERIOD));
3117 }
3118
3119 static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
3120 {
3121 et131x_tx_dma_memory_free(adapter);
3122 et131x_rx_dma_memory_free(adapter);
3123 }
3124
3125 static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
3126 {
3127 int status;
3128
3129 status = et131x_tx_dma_memory_alloc(adapter);
3130 if (status) {
3131 dev_err(&adapter->pdev->dev,
3132 "et131x_tx_dma_memory_alloc FAILED\n");
3133 et131x_tx_dma_memory_free(adapter);
3134 return status;
3135 }
3136
3137 status = et131x_rx_dma_memory_alloc(adapter);
3138 if (status) {
3139 dev_err(&adapter->pdev->dev,
3140 "et131x_rx_dma_memory_alloc FAILED\n");
3141 et131x_adapter_memory_free(adapter);
3142 return status;
3143 }
3144
3145 status = et131x_init_recv(adapter);
3146 if (status) {
3147 dev_err(&adapter->pdev->dev, "et131x_init_recv FAILED\n");
3148 et131x_adapter_memory_free(adapter);
3149 }
3150 return status;
3151 }
3152
3153 static void et131x_adjust_link(struct net_device *netdev)
3154 {
3155 struct et131x_adapter *adapter = netdev_priv(netdev);
3156 struct phy_device *phydev = netdev->phydev;
3157
3158 if (!phydev)
3159 return;
3160 if (phydev->link == adapter->link)
3161 return;
3162
3163
3164
3165
3166
3167 if (et1310_in_phy_coma(adapter))
3168 et1310_disable_phy_coma(adapter);
3169
3170 adapter->link = phydev->link;
3171 phy_print_status(phydev);
3172
3173 if (phydev->link) {
3174 adapter->boot_coma = 20;
3175 if (phydev->speed == SPEED_10) {
3176 u16 register18;
3177
3178 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3179 ®ister18);
3180 et131x_mii_write(adapter, phydev->mdio.addr,
3181 PHY_MPHY_CONTROL_REG,
3182 register18 | 0x4);
3183 et131x_mii_write(adapter, phydev->mdio.addr,
3184 PHY_INDEX_REG, register18 | 0x8402);
3185 et131x_mii_write(adapter, phydev->mdio.addr,
3186 PHY_DATA_REG, register18 | 511);
3187 et131x_mii_write(adapter, phydev->mdio.addr,
3188 PHY_MPHY_CONTROL_REG, register18);
3189 }
3190
3191 et1310_config_flow_control(adapter);
3192
3193 if (phydev->speed == SPEED_1000 &&
3194 adapter->registry_jumbo_packet > 2048) {
3195 u16 reg;
3196
3197 et131x_mii_read(adapter, PHY_CONFIG, ®);
3198 reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH;
3199 reg |= ET_PHY_CONFIG_FIFO_DEPTH_32;
3200 et131x_mii_write(adapter, phydev->mdio.addr,
3201 PHY_CONFIG, reg);
3202 }
3203
3204 et131x_set_rx_dma_timer(adapter);
3205 et1310_config_mac_regs2(adapter);
3206 } else {
3207 adapter->boot_coma = 0;
3208
3209 if (phydev->speed == SPEED_10) {
3210 u16 register18;
3211
3212 et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG,
3213 ®ister18);
3214 et131x_mii_write(adapter, phydev->mdio.addr,
3215 PHY_MPHY_CONTROL_REG,
3216 register18 | 0x4);
3217 et131x_mii_write(adapter, phydev->mdio.addr,
3218 PHY_INDEX_REG, register18 | 0x8402);
3219 et131x_mii_write(adapter, phydev->mdio.addr,
3220 PHY_DATA_REG, register18 | 511);
3221 et131x_mii_write(adapter, phydev->mdio.addr,
3222 PHY_MPHY_CONTROL_REG, register18);
3223 }
3224
3225 et131x_free_busy_send_packets(adapter);
3226 et131x_init_send(adapter);
3227
3228
3229
3230
3231
3232
3233 et131x_soft_reset(adapter);
3234
3235 et131x_adapter_setup(adapter);
3236
3237 et131x_disable_txrx(netdev);
3238 et131x_enable_txrx(netdev);
3239 }
3240 }
3241
3242 static int et131x_mii_probe(struct net_device *netdev)
3243 {
3244 struct et131x_adapter *adapter = netdev_priv(netdev);
3245 struct phy_device *phydev = NULL;
3246
3247 phydev = phy_find_first(adapter->mii_bus);
3248 if (!phydev) {
3249 dev_err(&adapter->pdev->dev, "no PHY found\n");
3250 return -ENODEV;
3251 }
3252
3253 phydev = phy_connect(netdev, phydev_name(phydev),
3254 &et131x_adjust_link, PHY_INTERFACE_MODE_MII);
3255
3256 if (IS_ERR(phydev)) {
3257 dev_err(&adapter->pdev->dev, "Could not attach to PHY\n");
3258 return PTR_ERR(phydev);
3259 }
3260
3261 phy_set_max_speed(phydev, SPEED_100);
3262
3263 if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST)
3264 phy_set_max_speed(phydev, SPEED_1000);
3265
3266 phydev->autoneg = AUTONEG_ENABLE;
3267
3268 phy_attached_info(phydev);
3269
3270 return 0;
3271 }
3272
3273 static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
3274 struct pci_dev *pdev)
3275 {
3276 static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 };
3277
3278 struct et131x_adapter *adapter;
3279
3280 adapter = netdev_priv(netdev);
3281 adapter->pdev = pci_dev_get(pdev);
3282 adapter->netdev = netdev;
3283
3284 spin_lock_init(&adapter->tcb_send_qlock);
3285 spin_lock_init(&adapter->tcb_ready_qlock);
3286 spin_lock_init(&adapter->rcv_lock);
3287
3288 adapter->registry_jumbo_packet = 1514;
3289
3290 ether_addr_copy(adapter->addr, default_mac);
3291
3292 return adapter;
3293 }
3294
3295 static void et131x_pci_remove(struct pci_dev *pdev)
3296 {
3297 struct net_device *netdev = pci_get_drvdata(pdev);
3298 struct et131x_adapter *adapter = netdev_priv(netdev);
3299
3300 unregister_netdev(netdev);
3301 netif_napi_del(&adapter->napi);
3302 phy_disconnect(netdev->phydev);
3303 mdiobus_unregister(adapter->mii_bus);
3304 mdiobus_free(adapter->mii_bus);
3305
3306 et131x_adapter_memory_free(adapter);
3307 iounmap(adapter->regs);
3308 pci_dev_put(pdev);
3309
3310 free_netdev(netdev);
3311 pci_release_regions(pdev);
3312 pci_disable_device(pdev);
3313 }
3314
3315 static void et131x_up(struct net_device *netdev)
3316 {
3317 et131x_enable_txrx(netdev);
3318 phy_start(netdev->phydev);
3319 }
3320
3321 static void et131x_down(struct net_device *netdev)
3322 {
3323
3324 netif_trans_update(netdev);
3325
3326 phy_stop(netdev->phydev);
3327 et131x_disable_txrx(netdev);
3328 }
3329
3330 #ifdef CONFIG_PM_SLEEP
3331 static int et131x_suspend(struct device *dev)
3332 {
3333 struct pci_dev *pdev = to_pci_dev(dev);
3334 struct net_device *netdev = pci_get_drvdata(pdev);
3335
3336 if (netif_running(netdev)) {
3337 netif_device_detach(netdev);
3338 et131x_down(netdev);
3339 pci_save_state(pdev);
3340 }
3341
3342 return 0;
3343 }
3344
3345 static int et131x_resume(struct device *dev)
3346 {
3347 struct pci_dev *pdev = to_pci_dev(dev);
3348 struct net_device *netdev = pci_get_drvdata(pdev);
3349
3350 if (netif_running(netdev)) {
3351 pci_restore_state(pdev);
3352 et131x_up(netdev);
3353 netif_device_attach(netdev);
3354 }
3355
3356 return 0;
3357 }
3358 #endif
3359
3360 static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
3361
3362 static irqreturn_t et131x_isr(int irq, void *dev_id)
3363 {
3364 bool handled = true;
3365 bool enable_interrupts = true;
3366 struct net_device *netdev = dev_id;
3367 struct et131x_adapter *adapter = netdev_priv(netdev);
3368 struct address_map __iomem *iomem = adapter->regs;
3369 struct rx_ring *rx_ring = &adapter->rx_ring;
3370 struct tx_ring *tx_ring = &adapter->tx_ring;
3371 u32 status;
3372
3373 if (!netif_device_present(netdev)) {
3374 handled = false;
3375 enable_interrupts = false;
3376 goto out;
3377 }
3378
3379 et131x_disable_interrupts(adapter);
3380
3381 status = readl(&adapter->regs->global.int_status);
3382
3383 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH)
3384 status &= ~INT_MASK_ENABLE;
3385 else
3386 status &= ~INT_MASK_ENABLE_NO_FLOW;
3387
3388
3389 if (!status) {
3390 handled = false;
3391 et131x_enable_interrupts(adapter);
3392 goto out;
3393 }
3394
3395
3396 if (status & ET_INTR_WATCHDOG) {
3397 struct tcb *tcb = tx_ring->send_head;
3398
3399 if (tcb)
3400 if (++tcb->stale > 1)
3401 status |= ET_INTR_TXDMA_ISR;
3402
3403 if (rx_ring->unfinished_receives)
3404 status |= ET_INTR_RXDMA_XFR_DONE;
3405 else if (tcb == NULL)
3406 writel(0, &adapter->regs->global.watchdog_timer);
3407
3408 status &= ~ET_INTR_WATCHDOG;
3409 }
3410
3411 if (status & (ET_INTR_RXDMA_XFR_DONE | ET_INTR_TXDMA_ISR)) {
3412 enable_interrupts = false;
3413 napi_schedule(&adapter->napi);
3414 }
3415
3416 status &= ~(ET_INTR_TXDMA_ISR | ET_INTR_RXDMA_XFR_DONE);
3417
3418 if (!status)
3419 goto out;
3420
3421 if (status & ET_INTR_TXDMA_ERR) {
3422
3423 u32 txdma_err = readl(&iomem->txdma.tx_dma_error);
3424
3425 dev_warn(&adapter->pdev->dev,
3426 "TXDMA_ERR interrupt, error = %d\n",
3427 txdma_err);
3428 }
3429
3430 if (status & (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) {
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440
3441
3442
3443
3444
3445
3446 if (adapter->flow == FLOW_TXONLY || adapter->flow == FLOW_BOTH) {
3447 u32 pm_csr;
3448
3449
3450
3451
3452 pm_csr = readl(&iomem->global.pm_csr);
3453 if (!et1310_in_phy_coma(adapter))
3454 writel(3, &iomem->txmac.bp_ctrl);
3455 }
3456 }
3457
3458
3459 if (status & ET_INTR_RXDMA_STAT_LOW) {
3460
3461
3462
3463
3464
3465
3466
3467 }
3468
3469 if (status & ET_INTR_RXDMA_ERR) {
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479
3480
3481
3482
3483
3484
3485
3486 dev_warn(&adapter->pdev->dev, "RxDMA_ERR interrupt, error %x\n",
3487 readl(&iomem->txmac.tx_test));
3488 }
3489
3490
3491 if (status & ET_INTR_WOL) {
3492
3493
3494
3495
3496 dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n");
3497 }
3498
3499 if (status & ET_INTR_TXMAC) {
3500 u32 err = readl(&iomem->txmac.err);
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510 dev_warn(&adapter->pdev->dev, "TXMAC interrupt, error 0x%08x\n",
3511 err);
3512
3513
3514
3515
3516 }
3517
3518 if (status & ET_INTR_RXMAC) {
3519
3520
3521
3522
3523 dev_warn(&adapter->pdev->dev,
3524 "RXMAC interrupt, error 0x%08x. Requesting reset\n",
3525 readl(&iomem->rxmac.err_reg));
3526
3527 dev_warn(&adapter->pdev->dev,
3528 "Enable 0x%08x, Diag 0x%08x\n",
3529 readl(&iomem->rxmac.ctrl),
3530 readl(&iomem->rxmac.rxq_diag));
3531
3532
3533
3534
3535 }
3536
3537 if (status & ET_INTR_MAC_STAT) {
3538
3539
3540
3541
3542 et1310_handle_macstat_interrupt(adapter);
3543 }
3544
3545 if (status & ET_INTR_SLV_TIMEOUT) {
3546
3547
3548
3549
3550
3551
3552 }
3553
3554 out:
3555 if (enable_interrupts)
3556 et131x_enable_interrupts(adapter);
3557
3558 return IRQ_RETVAL(handled);
3559 }
3560
3561 static int et131x_poll(struct napi_struct *napi, int budget)
3562 {
3563 struct et131x_adapter *adapter =
3564 container_of(napi, struct et131x_adapter, napi);
3565 int work_done = et131x_handle_recv_pkts(adapter, budget);
3566
3567 et131x_handle_send_pkts(adapter);
3568
3569 if (work_done < budget) {
3570 napi_complete_done(&adapter->napi, work_done);
3571 et131x_enable_interrupts(adapter);
3572 }
3573
3574 return work_done;
3575 }
3576
3577
3578 static struct net_device_stats *et131x_stats(struct net_device *netdev)
3579 {
3580 struct et131x_adapter *adapter = netdev_priv(netdev);
3581 struct net_device_stats *stats = &adapter->netdev->stats;
3582 struct ce_stats *devstat = &adapter->stats;
3583
3584 stats->rx_errors = devstat->rx_length_errs +
3585 devstat->rx_align_errs +
3586 devstat->rx_crc_errs +
3587 devstat->rx_code_violations +
3588 devstat->rx_other_errs;
3589 stats->tx_errors = devstat->tx_max_pkt_errs;
3590 stats->multicast = devstat->multicast_pkts_rcvd;
3591 stats->collisions = devstat->tx_collisions;
3592
3593 stats->rx_length_errors = devstat->rx_length_errs;
3594 stats->rx_over_errors = devstat->rx_overflows;
3595 stats->rx_crc_errors = devstat->rx_crc_errs;
3596 stats->rx_dropped = devstat->rcvd_pkts_dropped;
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608 return stats;
3609 }
3610
3611 static int et131x_open(struct net_device *netdev)
3612 {
3613 struct et131x_adapter *adapter = netdev_priv(netdev);
3614 struct pci_dev *pdev = adapter->pdev;
3615 unsigned int irq = pdev->irq;
3616 int result;
3617
3618
3619 timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
3620 adapter->error_timer.expires = jiffies +
3621 msecs_to_jiffies(TX_ERROR_PERIOD);
3622 add_timer(&adapter->error_timer);
3623
3624 result = request_irq(irq, et131x_isr,
3625 IRQF_SHARED, netdev->name, netdev);
3626 if (result) {
3627 dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
3628 return result;
3629 }
3630
3631 adapter->flags |= FMP_ADAPTER_INTERRUPT_IN_USE;
3632
3633 napi_enable(&adapter->napi);
3634
3635 et131x_up(netdev);
3636
3637 return result;
3638 }
3639
3640 static int et131x_close(struct net_device *netdev)
3641 {
3642 struct et131x_adapter *adapter = netdev_priv(netdev);
3643
3644 et131x_down(netdev);
3645 napi_disable(&adapter->napi);
3646
3647 adapter->flags &= ~FMP_ADAPTER_INTERRUPT_IN_USE;
3648 free_irq(adapter->pdev->irq, netdev);
3649
3650
3651 return del_timer_sync(&adapter->error_timer);
3652 }
3653
3654 static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
3655 int cmd)
3656 {
3657 if (!netdev->phydev)
3658 return -EINVAL;
3659
3660 return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
3661 }
3662
3663
3664 static int et131x_set_packet_filter(struct et131x_adapter *adapter)
3665 {
3666 int filter = adapter->packet_filter;
3667 u32 ctrl;
3668 u32 pf_ctrl;
3669
3670 ctrl = readl(&adapter->regs->rxmac.ctrl);
3671 pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl);
3672
3673
3674 ctrl |= 0x04;
3675
3676
3677
3678
3679 if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0)
3680 pf_ctrl &= ~7;
3681 else {
3682
3683
3684
3685
3686 if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST)
3687 pf_ctrl &= ~2;
3688 else {
3689 et1310_setup_device_for_multicast(adapter);
3690 pf_ctrl |= 2;
3691 ctrl &= ~0x04;
3692 }
3693
3694
3695 if (filter & ET131X_PACKET_TYPE_DIRECTED) {
3696 et1310_setup_device_for_unicast(adapter);
3697 pf_ctrl |= 4;
3698 ctrl &= ~0x04;
3699 }
3700
3701
3702 if (filter & ET131X_PACKET_TYPE_BROADCAST) {
3703 pf_ctrl |= 1;
3704 ctrl &= ~0x04;
3705 } else {
3706 pf_ctrl &= ~1;
3707 }
3708
3709
3710
3711
3712
3713 writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl);
3714 writel(ctrl, &adapter->regs->rxmac.ctrl);
3715 }
3716 return 0;
3717 }
3718
3719 static void et131x_multicast(struct net_device *netdev)
3720 {
3721 struct et131x_adapter *adapter = netdev_priv(netdev);
3722 int packet_filter;
3723 struct netdev_hw_addr *ha;
3724 int i;
3725
3726
3727
3728
3729
3730 packet_filter = adapter->packet_filter;
3731
3732
3733
3734
3735
3736
3737 packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3738
3739
3740
3741
3742 if (netdev->flags & IFF_PROMISC)
3743 adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS;
3744 else
3745 adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS;
3746
3747 if ((netdev->flags & IFF_ALLMULTI) ||
3748 (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST))
3749 adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
3750
3751 if (netdev_mc_count(netdev) < 1) {
3752 adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
3753 adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST;
3754 } else {
3755 adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST;
3756 }
3757
3758
3759 i = 0;
3760 netdev_for_each_mc_addr(ha, netdev) {
3761 if (i == NIC_MAX_MCAST_LIST)
3762 break;
3763 ether_addr_copy(adapter->multicast_list[i++], ha->addr);
3764 }
3765 adapter->multicast_addr_count = i;
3766
3767
3768
3769
3770
3771
3772
3773 if (packet_filter != adapter->packet_filter)
3774 et131x_set_packet_filter(adapter);
3775 }
3776
3777 static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
3778 {
3779 struct et131x_adapter *adapter = netdev_priv(netdev);
3780 struct tx_ring *tx_ring = &adapter->tx_ring;
3781
3782
3783 if (tx_ring->used >= NUM_TCB - 1 && !netif_queue_stopped(netdev))
3784 netif_stop_queue(netdev);
3785
3786
3787 netif_trans_update(netdev);
3788
3789
3790 if (tx_ring->used >= NUM_TCB)
3791 goto drop_err;
3792
3793 if ((adapter->flags & FMP_ADAPTER_FAIL_SEND_MASK) ||
3794 !netif_carrier_ok(netdev))
3795 goto drop_err;
3796
3797 if (send_packet(skb, adapter))
3798 goto drop_err;
3799
3800 return NETDEV_TX_OK;
3801
3802 drop_err:
3803 dev_kfree_skb_any(skb);
3804 adapter->netdev->stats.tx_dropped++;
3805 return NETDEV_TX_OK;
3806 }
3807
3808
3809
3810
3811
3812
3813
3814 static void et131x_tx_timeout(struct net_device *netdev)
3815 {
3816 struct et131x_adapter *adapter = netdev_priv(netdev);
3817 struct tx_ring *tx_ring = &adapter->tx_ring;
3818 struct tcb *tcb;
3819 unsigned long flags;
3820
3821
3822 if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
3823 return;
3824
3825
3826
3827
3828 if (adapter->flags & FMP_ADAPTER_NON_RECOVER_ERROR)
3829 return;
3830
3831
3832 if (adapter->flags & FMP_ADAPTER_HARDWARE_ERROR) {
3833 dev_err(&adapter->pdev->dev, "hardware error - reset\n");
3834 return;
3835 }
3836
3837
3838 spin_lock_irqsave(&adapter->tcb_send_qlock, flags);
3839 tcb = tx_ring->send_head;
3840 spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
3841
3842 if (tcb) {
3843 tcb->count++;
3844
3845 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
3846 dev_warn(&adapter->pdev->dev,
3847 "Send stuck - reset. tcb->WrIndex %x\n",
3848 tcb->index);
3849
3850 adapter->netdev->stats.tx_errors++;
3851
3852
3853 et131x_disable_txrx(netdev);
3854 et131x_enable_txrx(netdev);
3855 }
3856 }
3857 }
3858
3859 static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
3860 {
3861 int result = 0;
3862 struct et131x_adapter *adapter = netdev_priv(netdev);
3863
3864 et131x_disable_txrx(netdev);
3865
3866 netdev->mtu = new_mtu;
3867
3868 et131x_adapter_memory_free(adapter);
3869
3870
3871 adapter->registry_jumbo_packet = new_mtu + 14;
3872 et131x_soft_reset(adapter);
3873
3874 result = et131x_adapter_memory_alloc(adapter);
3875 if (result != 0) {
3876 dev_warn(&adapter->pdev->dev,
3877 "Change MTU failed; couldn't re-alloc DMA memory\n");
3878 return result;
3879 }
3880
3881 et131x_init_send(adapter);
3882 et131x_hwaddr_init(adapter);
3883 ether_addr_copy(netdev->dev_addr, adapter->addr);
3884
3885
3886 et131x_adapter_setup(adapter);
3887 et131x_enable_txrx(netdev);
3888
3889 return result;
3890 }
3891
3892 static const struct net_device_ops et131x_netdev_ops = {
3893 .ndo_open = et131x_open,
3894 .ndo_stop = et131x_close,
3895 .ndo_start_xmit = et131x_tx,
3896 .ndo_set_rx_mode = et131x_multicast,
3897 .ndo_tx_timeout = et131x_tx_timeout,
3898 .ndo_change_mtu = et131x_change_mtu,
3899 .ndo_set_mac_address = eth_mac_addr,
3900 .ndo_validate_addr = eth_validate_addr,
3901 .ndo_get_stats = et131x_stats,
3902 .ndo_do_ioctl = et131x_ioctl,
3903 };
3904
3905 static int et131x_pci_setup(struct pci_dev *pdev,
3906 const struct pci_device_id *ent)
3907 {
3908 struct net_device *netdev;
3909 struct et131x_adapter *adapter;
3910 int rc;
3911
3912 rc = pci_enable_device(pdev);
3913 if (rc < 0) {
3914 dev_err(&pdev->dev, "pci_enable_device() failed\n");
3915 goto out;
3916 }
3917
3918
3919 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
3920 dev_err(&pdev->dev, "Can't find PCI device's base address\n");
3921 rc = -ENODEV;
3922 goto err_disable;
3923 }
3924
3925 rc = pci_request_regions(pdev, DRIVER_NAME);
3926 if (rc < 0) {
3927 dev_err(&pdev->dev, "Can't get PCI resources\n");
3928 goto err_disable;
3929 }
3930
3931 pci_set_master(pdev);
3932
3933
3934 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) &&
3935 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
3936 dev_err(&pdev->dev, "No usable DMA addressing method\n");
3937 rc = -EIO;
3938 goto err_release_res;
3939 }
3940
3941 netdev = alloc_etherdev(sizeof(struct et131x_adapter));
3942 if (!netdev) {
3943 dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
3944 rc = -ENOMEM;
3945 goto err_release_res;
3946 }
3947
3948 netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
3949 netdev->netdev_ops = &et131x_netdev_ops;
3950 netdev->min_mtu = ET131X_MIN_MTU;
3951 netdev->max_mtu = ET131X_MAX_MTU;
3952
3953 SET_NETDEV_DEV(netdev, &pdev->dev);
3954 netdev->ethtool_ops = &et131x_ethtool_ops;
3955
3956 adapter = et131x_adapter_init(netdev, pdev);
3957
3958 rc = et131x_pci_init(adapter, pdev);
3959 if (rc < 0)
3960 goto err_free_dev;
3961
3962
3963 adapter->regs = pci_ioremap_bar(pdev, 0);
3964 if (!adapter->regs) {
3965 dev_err(&pdev->dev, "Cannot map device registers\n");
3966 rc = -ENOMEM;
3967 goto err_free_dev;
3968 }
3969
3970
3971 writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr);
3972
3973 et131x_soft_reset(adapter);
3974 et131x_disable_interrupts(adapter);
3975
3976 rc = et131x_adapter_memory_alloc(adapter);
3977 if (rc < 0) {
3978 dev_err(&pdev->dev, "Could not alloc adapter memory (DMA)\n");
3979 goto err_iounmap;
3980 }
3981
3982 et131x_init_send(adapter);
3983
3984 netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
3985
3986 ether_addr_copy(netdev->dev_addr, adapter->addr);
3987
3988 rc = -ENOMEM;
3989
3990 adapter->mii_bus = mdiobus_alloc();
3991 if (!adapter->mii_bus) {
3992 dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n");
3993 goto err_mem_free;
3994 }
3995
3996 adapter->mii_bus->name = "et131x_eth_mii";
3997 snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x",
3998 (adapter->pdev->bus->number << 8) | adapter->pdev->devfn);
3999 adapter->mii_bus->priv = netdev;
4000 adapter->mii_bus->read = et131x_mdio_read;
4001 adapter->mii_bus->write = et131x_mdio_write;
4002
4003 rc = mdiobus_register(adapter->mii_bus);
4004 if (rc < 0) {
4005 dev_err(&pdev->dev, "failed to register MII bus\n");
4006 goto err_mdio_free;
4007 }
4008
4009 rc = et131x_mii_probe(netdev);
4010 if (rc < 0) {
4011 dev_err(&pdev->dev, "failed to probe MII bus\n");
4012 goto err_mdio_unregister;
4013 }
4014
4015 et131x_adapter_setup(adapter);
4016
4017
4018 adapter->boot_coma = 0;
4019 et1310_disable_phy_coma(adapter);
4020
4021
4022
4023
4024
4025
4026
4027
4028 rc = register_netdev(netdev);
4029 if (rc < 0) {
4030 dev_err(&pdev->dev, "register_netdev() failed\n");
4031 goto err_phy_disconnect;
4032 }
4033
4034
4035
4036
4037
4038 pci_set_drvdata(pdev, netdev);
4039 out:
4040 return rc;
4041
4042 err_phy_disconnect:
4043 phy_disconnect(netdev->phydev);
4044 err_mdio_unregister:
4045 mdiobus_unregister(adapter->mii_bus);
4046 err_mdio_free:
4047 mdiobus_free(adapter->mii_bus);
4048 err_mem_free:
4049 et131x_adapter_memory_free(adapter);
4050 err_iounmap:
4051 iounmap(adapter->regs);
4052 err_free_dev:
4053 pci_dev_put(pdev);
4054 free_netdev(netdev);
4055 err_release_res:
4056 pci_release_regions(pdev);
4057 err_disable:
4058 pci_disable_device(pdev);
4059 goto out;
4060 }
4061
4062 static const struct pci_device_id et131x_pci_table[] = {
4063 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_GIG), 0UL},
4064 { PCI_VDEVICE(ATT, ET131X_PCI_DEVICE_ID_FAST), 0UL},
4065 { 0,}
4066 };
4067 MODULE_DEVICE_TABLE(pci, et131x_pci_table);
4068
4069 static struct pci_driver et131x_driver = {
4070 .name = DRIVER_NAME,
4071 .id_table = et131x_pci_table,
4072 .probe = et131x_pci_setup,
4073 .remove = et131x_pci_remove,
4074 .driver.pm = &et131x_pm_ops,
4075 };
4076
4077 module_pci_driver(et131x_driver);