This source file includes following definitions.
- de4x5_hw_init
- de4x5_open
- de4x5_init
- de4x5_sw_reset
- de4x5_queue_pkt
- de4x5_interrupt
- de4x5_rx
- de4x5_free_tx_buff
- de4x5_tx
- de4x5_ast
- de4x5_txur
- de4x5_rx_ovfc
- de4x5_close
- de4x5_get_stats
- de4x5_local_stats
- load_packet
- set_multicast_list
- SetMulticastFilter
- de4x5_eisa_probe
- de4x5_eisa_remove
- srom_search
- de4x5_pci_probe
- de4x5_pci_remove
- autoconf_media
- dc21040_autoconf
- dc21040_state
- de4x5_suspect_state
- dc21041_autoconf
- dc21140m_autoconf
- dc2114x_autoconf
- srom_autoconf
- srom_map_media
- de4x5_init_connection
- de4x5_reset_phy
- test_media
- test_tp
- test_for_100Mb
- wait_for_link
- test_mii_reg
- is_spd_100
- is_100_up
- is_10_up
- is_anc_capable
- ping_media
- de4x5_alloc_rx_buff
- de4x5_free_rx_buffs
- de4x5_free_tx_buffs
- de4x5_save_skbs
- de4x5_rst_desc_ring
- de4x5_cache_state
- de4x5_put_cache
- de4x5_putb_cache
- de4x5_get_cache
- test_ans
- de4x5_setup_intr
- reset_init_sia
- create_packet
- EISA_signature
- PCI_signature
- DevicePresent
- enet_addr_rst
- get_hw_addr
- de4x5_bad_srom
- srom_repair
- test_bad_enet
- an_exception
- srom_rd
- srom_latch
- srom_command
- srom_address
- srom_data
- sendto_srom
- getfrom_srom
- srom_infoleaf_info
- srom_init
- srom_exec
- dc21041_infoleaf
- dc21140_infoleaf
- dc21142_infoleaf
- dc21143_infoleaf
- compact_infoblock
- type0_infoblock
- type1_infoblock
- type2_infoblock
- type3_infoblock
- type4_infoblock
- type5_infoblock
- mii_rd
- mii_wr
- mii_rdata
- mii_wdata
- mii_address
- mii_ta
- mii_swap
- sendto_mii
- getfrom_mii
- mii_get_oui
- mii_get_phy
- build_setup_frame
- disable_ast
- de4x5_switch_mac_port
- gep_wr
- gep_rd
- yawn
- de4x5_parse_params
- de4x5_dbg_open
- de4x5_dbg_mii
- de4x5_dbg_media
- de4x5_dbg_srom
- de4x5_dbg_rx
- de4x5_ioctl
- de4x5_module_init
- de4x5_module_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446 #include <linux/module.h>
447 #include <linux/kernel.h>
448 #include <linux/string.h>
449 #include <linux/interrupt.h>
450 #include <linux/ptrace.h>
451 #include <linux/errno.h>
452 #include <linux/ioport.h>
453 #include <linux/pci.h>
454 #include <linux/eisa.h>
455 #include <linux/delay.h>
456 #include <linux/init.h>
457 #include <linux/spinlock.h>
458 #include <linux/crc32.h>
459 #include <linux/netdevice.h>
460 #include <linux/etherdevice.h>
461 #include <linux/skbuff.h>
462 #include <linux/time.h>
463 #include <linux/types.h>
464 #include <linux/unistd.h>
465 #include <linux/ctype.h>
466 #include <linux/dma-mapping.h>
467 #include <linux/moduleparam.h>
468 #include <linux/bitops.h>
469 #include <linux/gfp.h>
470
471 #include <asm/io.h>
472 #include <asm/dma.h>
473 #include <asm/byteorder.h>
474 #include <asm/unaligned.h>
475 #include <linux/uaccess.h>
476 #ifdef CONFIG_PPC_PMAC
477 #include <asm/machdep.h>
478 #endif
479
480 #include "de4x5.h"
481
482 static const char version[] =
483 KERN_INFO "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
484
485 #define c_char const char
486
487
488
489
490 struct phy_table {
491 int reset;
492 int id;
493 int ta;
494 struct {
495 int reg;
496 int mask;
497 int value;
498 } spd;
499 };
500
501 struct mii_phy {
502 int reset;
503 int id;
504 int ta;
505 struct {
506 int reg;
507 int mask;
508 int value;
509 } spd;
510 int addr;
511 u_char *gep;
512 u_char *rst;
513 u_int mc;
514 u_int ana;
515 u_int fdx;
516 u_int ttm;
517 u_int mci;
518 };
519
520 #define DE4X5_MAX_PHY 8
521
522 struct sia_phy {
523 u_char mc;
524 u_char ext;
525 int csr13;
526 int csr14;
527 int csr15;
528 int gepc;
529 int gep;
530 };
531
532
533
534
535
536 static struct phy_table phy_info[] = {
537 {0, NATIONAL_TX, 1, {0x19, 0x40, 0x00}},
538 {1, BROADCOM_T4, 1, {0x10, 0x02, 0x02}},
539 {0, SEEQ_T4 , 1, {0x12, 0x10, 0x10}},
540 {0, CYPRESS_T4 , 1, {0x05, 0x20, 0x20}},
541 {0, 0x7810 , 1, {0x14, 0x0800, 0x0800}}
542 };
543
544
545
546
547
548
549 #define GENERIC_REG 0x05
550 #define GENERIC_MASK MII_ANLPA_100M
551 #define GENERIC_VALUE MII_ANLPA_100M
552
553
554
555
556 static c_char enet_det[][ETH_ALEN] = {
557 {0x00, 0x00, 0xc0, 0x00, 0x00, 0x00},
558 {0x00, 0x00, 0xe8, 0x00, 0x00, 0x00}
559 };
560
561 #define SMC 1
562 #define ACCTON 2
563
564
565
566
567
568
569 static c_char srom_repair_info[][100] = {
570 {0x00,0x1e,0x00,0x00,0x00,0x08,
571 0x1f,0x01,0x8f,0x01,0x00,0x01,0x00,0x02,
572 0x01,0x00,0x00,0x78,0xe0,0x01,0x00,0x50,
573 0x00,0x18,}
574 };
575
576
577 #ifdef DE4X5_DEBUG
578 static int de4x5_debug = DE4X5_DEBUG;
579 #else
580
581 static int de4x5_debug = (DEBUG_MEDIA | DEBUG_VERSION);
582 #endif
583
584
585
586
587
588
589
590
591
592
593 #ifdef DE4X5_PARM
594 static char *args = DE4X5_PARM;
595 #else
596 static char *args;
597 #endif
598
599 struct parameters {
600 bool fdx;
601 int autosense;
602 };
603
604 #define DE4X5_AUTOSENSE_MS 250
605
606 #define DE4X5_NDA 0xffe0
607
608
609
610
611 #define PROBE_LENGTH 32
612 #define ETH_PROM_SIG 0xAA5500FFUL
613
614
615
616
617 #define PKT_BUF_SZ 1536
618 #define IEEE802_3_SZ 1518
619 #define MAX_PKT_SZ 1514
620 #define MAX_DAT_SZ 1500
621 #define MIN_DAT_SZ 1
622 #define PKT_HDR_LEN 14
623 #define FAKE_FRAME_LEN (MAX_PKT_SZ + 1)
624 #define QUEUE_PKT_TIMEOUT (3*HZ)
625
626
627
628
629
630 #define DE4X5_EISA_IO_PORTS 0x0c00
631 #define DE4X5_EISA_TOTAL_SIZE 0x100
632
633 #define EISA_ALLOWED_IRQ_LIST {5, 9, 10, 11}
634
635 #define DE4X5_SIGNATURE {"DE425","DE434","DE435","DE450","DE500"}
636 #define DE4X5_NAME_LENGTH 8
637
638 static c_char *de4x5_signatures[] = DE4X5_SIGNATURE;
639
640
641
642
643 #define PROBE_LENGTH 32
644 #define ETH_PROM_SIG 0xAA5500FFUL
645
646
647
648
649 #define PCI_MAX_BUS_NUM 8
650 #define DE4X5_PCI_TOTAL_SIZE 0x80
651 #define DE4X5_CLASS_CODE 0x00020000
652
653
654
655
656
657
658
659 #define DE4X5_ALIGN4 ((u_long)4 - 1)
660 #define DE4X5_ALIGN8 ((u_long)8 - 1)
661 #define DE4X5_ALIGN16 ((u_long)16 - 1)
662 #define DE4X5_ALIGN32 ((u_long)32 - 1)
663 #define DE4X5_ALIGN64 ((u_long)64 - 1)
664 #define DE4X5_ALIGN128 ((u_long)128 - 1)
665
666 #define DE4X5_ALIGN DE4X5_ALIGN32
667 #define DE4X5_CACHE_ALIGN CAL_16LONG
668 #define DESC_SKIP_LEN DSL_0
669
670 #define DESC_ALIGN
671
672 #ifndef DEC_ONLY
673 static int dec_only;
674 #else
675 static int dec_only = 1;
676 #endif
677
678
679
680
681 #define ENABLE_IRQs { \
682 imr |= lp->irq_en;\
683 outl(imr, DE4X5_IMR); \
684 }
685
686 #define DISABLE_IRQs {\
687 imr = inl(DE4X5_IMR);\
688 imr &= ~lp->irq_en;\
689 outl(imr, DE4X5_IMR); \
690 }
691
692 #define UNMASK_IRQs {\
693 imr |= lp->irq_mask;\
694 outl(imr, DE4X5_IMR); \
695 }
696
697 #define MASK_IRQs {\
698 imr = inl(DE4X5_IMR);\
699 imr &= ~lp->irq_mask;\
700 outl(imr, DE4X5_IMR); \
701 }
702
703
704
705
706 #define START_DE4X5 {\
707 omr = inl(DE4X5_OMR);\
708 omr |= OMR_ST | OMR_SR;\
709 outl(omr, DE4X5_OMR); \
710 }
711
712 #define STOP_DE4X5 {\
713 omr = inl(DE4X5_OMR);\
714 omr &= ~(OMR_ST|OMR_SR);\
715 outl(omr, DE4X5_OMR); \
716 }
717
718
719
720
721 #define RESET_SIA outl(0, DE4X5_SICR);
722
723
724
725
726 #define DE4X5_AUTOSENSE_MS 250
727
728
729
730
731 struct de4x5_srom {
732 char sub_vendor_id[2];
733 char sub_system_id[2];
734 char reserved[12];
735 char id_block_crc;
736 char reserved2;
737 char version;
738 char num_controllers;
739 char ieee_addr[6];
740 char info[100];
741 short chksum;
742 };
743 #define SUB_VENDOR_ID 0x500a
744
745
746
747
748
749
750
751
752
753 #define NUM_RX_DESC 8
754 #define NUM_TX_DESC 32
755 #define RX_BUFF_SZ 1536
756
757
758 struct de4x5_desc {
759 volatile __le32 status;
760 __le32 des1;
761 __le32 buf;
762 __le32 next;
763 DESC_ALIGN
764 };
765
766
767
768
769 #define DE4X5_PKT_STAT_SZ 16
770 #define DE4X5_PKT_BIN_SZ 128
771
772
773 struct pkt_stats {
774 u_int bins[DE4X5_PKT_STAT_SZ];
775 u_int unicast;
776 u_int multicast;
777 u_int broadcast;
778 u_int excessive_collisions;
779 u_int tx_underruns;
780 u_int excessive_underruns;
781 u_int rx_runt_frames;
782 u_int rx_collision;
783 u_int rx_dribble;
784 u_int rx_overflow;
785 };
786
787 struct de4x5_private {
788 char adapter_name[80];
789 u_long interrupt;
790 struct de4x5_desc *rx_ring;
791 struct de4x5_desc *tx_ring;
792 struct sk_buff *tx_skb[NUM_TX_DESC];
793 struct sk_buff *rx_skb[NUM_RX_DESC];
794 int rx_new, rx_old;
795 int tx_new, tx_old;
796 char setup_frame[SETUP_FRAME_LEN];
797 char frame[64];
798 spinlock_t lock;
799 struct net_device_stats stats;
800 struct pkt_stats pktStats;
801 char rxRingSize;
802 char txRingSize;
803 int bus;
804 int bus_num;
805 int device;
806 int state;
807 int chipset;
808 s32 irq_mask;
809 s32 irq_en;
810 int media;
811 int c_media;
812 bool fdx;
813 int linkOK;
814 int autosense;
815 bool tx_enable;
816 int setup_f;
817 int local_state;
818 struct mii_phy phy[DE4X5_MAX_PHY];
819 struct sia_phy sia;
820 int active;
821 int mii_cnt;
822 int timeout;
823 struct timer_list timer;
824 int tmp;
825 struct {
826 u_long lock;
827 s32 csr0;
828 s32 csr6;
829 s32 csr7;
830 s32 gep;
831 s32 gepc;
832 s32 csr13;
833 s32 csr14;
834 s32 csr15;
835 int save_cnt;
836 struct sk_buff_head queue;
837 } cache;
838 struct de4x5_srom srom;
839 int cfrv;
840 int rx_ovf;
841 bool useSROM;
842 bool useMII;
843 int asBitValid;
844 int asPolarity;
845 int asBit;
846 int defMedium;
847 int tcount;
848 int infoblock_init;
849 int infoleaf_offset;
850 s32 infoblock_csr6;
851 int infoblock_media;
852 int (*infoleaf_fn)(struct net_device *);
853 u_char *rst;
854 u_char ibn;
855 struct parameters params;
856 struct device *gendev;
857 dma_addr_t dma_rings;
858 int dma_size;
859 char *rx_bufs;
860 };
861
862
863
864
865
866
867
868
869
870
871
872
873
874 static struct {
875 int chipset;
876 int bus;
877 int irq;
878 u_char addr[ETH_ALEN];
879 } last = {0,};
880
881
882
883
884
885
886
887
888 #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
889 lp->tx_old+lp->txRingSize-lp->tx_new-1:\
890 lp->tx_old -lp->tx_new-1)
891
892 #define TX_PKT_PENDING (lp->tx_old != lp->tx_new)
893
894
895
896
897 static int de4x5_open(struct net_device *dev);
898 static netdev_tx_t de4x5_queue_pkt(struct sk_buff *skb,
899 struct net_device *dev);
900 static irqreturn_t de4x5_interrupt(int irq, void *dev_id);
901 static int de4x5_close(struct net_device *dev);
902 static struct net_device_stats *de4x5_get_stats(struct net_device *dev);
903 static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len);
904 static void set_multicast_list(struct net_device *dev);
905 static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
906
907
908
909
910 static int de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev);
911 static int de4x5_init(struct net_device *dev);
912 static int de4x5_sw_reset(struct net_device *dev);
913 static int de4x5_rx(struct net_device *dev);
914 static int de4x5_tx(struct net_device *dev);
915 static void de4x5_ast(struct timer_list *t);
916 static int de4x5_txur(struct net_device *dev);
917 static int de4x5_rx_ovfc(struct net_device *dev);
918
919 static int autoconf_media(struct net_device *dev);
920 static void create_packet(struct net_device *dev, char *frame, int len);
921 static void load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb);
922 static int dc21040_autoconf(struct net_device *dev);
923 static int dc21041_autoconf(struct net_device *dev);
924 static int dc21140m_autoconf(struct net_device *dev);
925 static int dc2114x_autoconf(struct net_device *dev);
926 static int srom_autoconf(struct net_device *dev);
927 static int de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state, int (*fn)(struct net_device *, int), int (*asfn)(struct net_device *));
928 static int dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout, int next_state, int suspect_state, int (*fn)(struct net_device *, int));
929 static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
930 static int test_for_100Mb(struct net_device *dev, int msec);
931 static int wait_for_link(struct net_device *dev);
932 static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
933 static int is_spd_100(struct net_device *dev);
934 static int is_100_up(struct net_device *dev);
935 static int is_10_up(struct net_device *dev);
936 static int is_anc_capable(struct net_device *dev);
937 static int ping_media(struct net_device *dev, int msec);
938 static struct sk_buff *de4x5_alloc_rx_buff(struct net_device *dev, int index, int len);
939 static void de4x5_free_rx_buffs(struct net_device *dev);
940 static void de4x5_free_tx_buffs(struct net_device *dev);
941 static void de4x5_save_skbs(struct net_device *dev);
942 static void de4x5_rst_desc_ring(struct net_device *dev);
943 static void de4x5_cache_state(struct net_device *dev, int flag);
944 static void de4x5_put_cache(struct net_device *dev, struct sk_buff *skb);
945 static void de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb);
946 static struct sk_buff *de4x5_get_cache(struct net_device *dev);
947 static void de4x5_setup_intr(struct net_device *dev);
948 static void de4x5_init_connection(struct net_device *dev);
949 static int de4x5_reset_phy(struct net_device *dev);
950 static void reset_init_sia(struct net_device *dev, s32 sicr, s32 strr, s32 sigr);
951 static int test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec);
952 static int test_tp(struct net_device *dev, s32 msec);
953 static int EISA_signature(char *name, struct device *device);
954 static int PCI_signature(char *name, struct de4x5_private *lp);
955 static void DevicePresent(struct net_device *dev, u_long iobase);
956 static void enet_addr_rst(u_long aprom_addr);
957 static int de4x5_bad_srom(struct de4x5_private *lp);
958 static short srom_rd(u_long address, u_char offset);
959 static void srom_latch(u_int command, u_long address);
960 static void srom_command(u_int command, u_long address);
961 static void srom_address(u_int command, u_long address, u_char offset);
962 static short srom_data(u_int command, u_long address);
963
964 static void sendto_srom(u_int command, u_long addr);
965 static int getfrom_srom(u_long addr);
966 static int srom_map_media(struct net_device *dev);
967 static int srom_infoleaf_info(struct net_device *dev);
968 static void srom_init(struct net_device *dev);
969 static void srom_exec(struct net_device *dev, u_char *p);
970 static int mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr);
971 static void mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr);
972 static int mii_rdata(u_long ioaddr);
973 static void mii_wdata(int data, int len, u_long ioaddr);
974 static void mii_ta(u_long rw, u_long ioaddr);
975 static int mii_swap(int data, int len);
976 static void mii_address(u_char addr, u_long ioaddr);
977 static void sendto_mii(u32 command, int data, u_long ioaddr);
978 static int getfrom_mii(u32 command, u_long ioaddr);
979 static int mii_get_oui(u_char phyaddr, u_long ioaddr);
980 static int mii_get_phy(struct net_device *dev);
981 static void SetMulticastFilter(struct net_device *dev);
982 static int get_hw_addr(struct net_device *dev);
983 static void srom_repair(struct net_device *dev, int card);
984 static int test_bad_enet(struct net_device *dev, int status);
985 static int an_exception(struct de4x5_private *lp);
986 static char *build_setup_frame(struct net_device *dev, int mode);
987 static void disable_ast(struct net_device *dev);
988 static long de4x5_switch_mac_port(struct net_device *dev);
989 static int gep_rd(struct net_device *dev);
990 static void gep_wr(s32 data, struct net_device *dev);
991 static void yawn(struct net_device *dev, int state);
992 static void de4x5_parse_params(struct net_device *dev);
993 static void de4x5_dbg_open(struct net_device *dev);
994 static void de4x5_dbg_mii(struct net_device *dev, int k);
995 static void de4x5_dbg_media(struct net_device *dev);
996 static void de4x5_dbg_srom(struct de4x5_srom *p);
997 static void de4x5_dbg_rx(struct sk_buff *skb, int len);
998 static int dc21041_infoleaf(struct net_device *dev);
999 static int dc21140_infoleaf(struct net_device *dev);
1000 static int dc21142_infoleaf(struct net_device *dev);
1001 static int dc21143_infoleaf(struct net_device *dev);
1002 static int type0_infoblock(struct net_device *dev, u_char count, u_char *p);
1003 static int type1_infoblock(struct net_device *dev, u_char count, u_char *p);
1004 static int type2_infoblock(struct net_device *dev, u_char count, u_char *p);
1005 static int type3_infoblock(struct net_device *dev, u_char count, u_char *p);
1006 static int type4_infoblock(struct net_device *dev, u_char count, u_char *p);
1007 static int type5_infoblock(struct net_device *dev, u_char count, u_char *p);
1008 static int compact_infoblock(struct net_device *dev, u_char count, u_char *p);
1009
1010
1011
1012
1013
1014
1015
1016 static int io=0x0;
1017
1018 module_param_hw(io, int, ioport, 0);
1019 module_param(de4x5_debug, int, 0);
1020 module_param(dec_only, int, 0);
1021 module_param(args, charp, 0);
1022
1023 MODULE_PARM_DESC(io, "de4x5 I/O base address");
1024 MODULE_PARM_DESC(de4x5_debug, "de4x5 debug mask");
1025 MODULE_PARM_DESC(dec_only, "de4x5 probe only for Digital boards (0-1)");
1026 MODULE_PARM_DESC(args, "de4x5 full duplex and media type settings; see de4x5.c for details");
1027 MODULE_LICENSE("GPL");
1028
1029
1030
1031
1032 struct InfoLeaf {
1033 int chipset;
1034 int (*fn)(struct net_device *);
1035 };
1036 static struct InfoLeaf infoleaf_array[] = {
1037 {DC21041, dc21041_infoleaf},
1038 {DC21140, dc21140_infoleaf},
1039 {DC21142, dc21142_infoleaf},
1040 {DC21143, dc21143_infoleaf}
1041 };
1042 #define INFOLEAF_SIZE ARRAY_SIZE(infoleaf_array)
1043
1044
1045
1046
1047 static int (*dc_infoblock[])(struct net_device *dev, u_char, u_char *) = {
1048 type0_infoblock,
1049 type1_infoblock,
1050 type2_infoblock,
1051 type3_infoblock,
1052 type4_infoblock,
1053 type5_infoblock,
1054 compact_infoblock
1055 };
1056
1057 #define COMPACT (ARRAY_SIZE(dc_infoblock) - 1)
1058
1059
1060
1061
1062 #define RESET_DE4X5 {\
1063 int i;\
1064 i=inl(DE4X5_BMR);\
1065 mdelay(1);\
1066 outl(i | BMR_SWR, DE4X5_BMR);\
1067 mdelay(1);\
1068 outl(i, DE4X5_BMR);\
1069 mdelay(1);\
1070 for (i=0;i<5;i++) {inl(DE4X5_BMR); mdelay(1);}\
1071 mdelay(1);\
1072 }
1073
1074 #define PHY_HARD_RESET {\
1075 outl(GEP_HRST, DE4X5_GEP); \
1076 mdelay(1); \
1077 outl(0x00, DE4X5_GEP);\
1078 mdelay(2); \
1079 }
1080
1081 static const struct net_device_ops de4x5_netdev_ops = {
1082 .ndo_open = de4x5_open,
1083 .ndo_stop = de4x5_close,
1084 .ndo_start_xmit = de4x5_queue_pkt,
1085 .ndo_get_stats = de4x5_get_stats,
1086 .ndo_set_rx_mode = set_multicast_list,
1087 .ndo_do_ioctl = de4x5_ioctl,
1088 .ndo_set_mac_address= eth_mac_addr,
1089 .ndo_validate_addr = eth_validate_addr,
1090 };
1091
1092
1093 static int
1094 de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1095 {
1096 char name[DE4X5_NAME_LENGTH + 1];
1097 struct de4x5_private *lp = netdev_priv(dev);
1098 struct pci_dev *pdev = NULL;
1099 int i, status=0;
1100
1101 dev_set_drvdata(gendev, dev);
1102
1103
1104 if (lp->bus == EISA) {
1105 outb(WAKEUP, PCI_CFPM);
1106 } else {
1107 pdev = to_pci_dev (gendev);
1108 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
1109 }
1110 mdelay(10);
1111
1112 RESET_DE4X5;
1113
1114 if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
1115 return -ENXIO;
1116 }
1117
1118
1119
1120
1121 lp->useSROM = false;
1122 if (lp->bus == PCI) {
1123 PCI_signature(name, lp);
1124 } else {
1125 EISA_signature(name, gendev);
1126 }
1127
1128 if (*name == '\0') {
1129 return -ENXIO;
1130 }
1131
1132 dev->base_addr = iobase;
1133 printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
1134
1135 status = get_hw_addr(dev);
1136 printk(", h/w address %pM\n", dev->dev_addr);
1137
1138 if (status != 0) {
1139 printk(" which has an Ethernet PROM CRC error.\n");
1140 return -ENXIO;
1141 } else {
1142 skb_queue_head_init(&lp->cache.queue);
1143 lp->cache.gepc = GEP_INIT;
1144 lp->asBit = GEP_SLNK;
1145 lp->asPolarity = GEP_SLNK;
1146 lp->asBitValid = ~0;
1147 lp->timeout = -1;
1148 lp->gendev = gendev;
1149 spin_lock_init(&lp->lock);
1150 timer_setup(&lp->timer, de4x5_ast, 0);
1151 de4x5_parse_params(dev);
1152
1153
1154
1155
1156 lp->autosense = lp->params.autosense;
1157 if (lp->chipset != DC21140) {
1158 if ((lp->chipset==DC21040) && (lp->params.autosense&TP_NW)) {
1159 lp->params.autosense = TP;
1160 }
1161 if ((lp->chipset==DC21041) && (lp->params.autosense&BNC_AUI)) {
1162 lp->params.autosense = BNC;
1163 }
1164 }
1165 lp->fdx = lp->params.fdx;
1166 sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
1167
1168 lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);
1169 #if defined(__alpha__) || defined(__powerpc__) || defined(CONFIG_SPARC) || defined(DE4X5_DO_MEMCPY)
1170 lp->dma_size += RX_BUFF_SZ * NUM_RX_DESC + DE4X5_ALIGN;
1171 #endif
1172 lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
1173 &lp->dma_rings, GFP_ATOMIC);
1174 if (lp->rx_ring == NULL) {
1175 return -ENOMEM;
1176 }
1177
1178 lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
1179
1180
1181
1182
1183
1184 #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
1185 for (i=0; i<NUM_RX_DESC; i++) {
1186 lp->rx_ring[i].status = 0;
1187 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1188 lp->rx_ring[i].buf = 0;
1189 lp->rx_ring[i].next = 0;
1190 lp->rx_skb[i] = (struct sk_buff *) 1;
1191 }
1192
1193 #else
1194 {
1195 dma_addr_t dma_rx_bufs;
1196
1197 dma_rx_bufs = lp->dma_rings + (NUM_RX_DESC + NUM_TX_DESC)
1198 * sizeof(struct de4x5_desc);
1199 dma_rx_bufs = (dma_rx_bufs + DE4X5_ALIGN) & ~DE4X5_ALIGN;
1200 lp->rx_bufs = (char *)(((long)(lp->rx_ring + NUM_RX_DESC
1201 + NUM_TX_DESC) + DE4X5_ALIGN) & ~DE4X5_ALIGN);
1202 for (i=0; i<NUM_RX_DESC; i++) {
1203 lp->rx_ring[i].status = 0;
1204 lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
1205 lp->rx_ring[i].buf =
1206 cpu_to_le32(dma_rx_bufs+i*RX_BUFF_SZ);
1207 lp->rx_ring[i].next = 0;
1208 lp->rx_skb[i] = (struct sk_buff *) 1;
1209 }
1210
1211 }
1212 #endif
1213
1214 barrier();
1215
1216 lp->rxRingSize = NUM_RX_DESC;
1217 lp->txRingSize = NUM_TX_DESC;
1218
1219
1220 lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
1221 lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
1222
1223
1224 outl(lp->dma_rings, DE4X5_RRBA);
1225 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1226 DE4X5_TRBA);
1227
1228
1229 lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
1230 lp->irq_en = IMR_NIM | IMR_AIM;
1231
1232
1233 create_packet(dev, lp->frame, sizeof(lp->frame));
1234
1235
1236 i = lp->cfrv & 0x000000fe;
1237 if ((lp->chipset == DC21140) && (i == 0x20)) {
1238 lp->rx_ovf = 1;
1239 }
1240
1241
1242 if (lp->useSROM) {
1243 lp->state = INITIALISED;
1244 if (srom_infoleaf_info(dev)) {
1245 dma_free_coherent (gendev, lp->dma_size,
1246 lp->rx_ring, lp->dma_rings);
1247 return -ENXIO;
1248 }
1249 srom_init(dev);
1250 }
1251
1252 lp->state = CLOSED;
1253
1254
1255
1256
1257 if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
1258 mii_get_phy(dev);
1259 }
1260
1261 printk(" and requires IRQ%d (provided by %s).\n", dev->irq,
1262 ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
1263 }
1264
1265 if (de4x5_debug & DEBUG_VERSION) {
1266 printk(version);
1267 }
1268
1269
1270 SET_NETDEV_DEV(dev, gendev);
1271 dev->netdev_ops = &de4x5_netdev_ops;
1272 dev->mem_start = 0;
1273
1274
1275 if ((status = register_netdev (dev))) {
1276 dma_free_coherent (gendev, lp->dma_size,
1277 lp->rx_ring, lp->dma_rings);
1278 return status;
1279 }
1280
1281
1282 yawn(dev, SLEEP);
1283
1284 return status;
1285 }
1286
1287
1288 static int
1289 de4x5_open(struct net_device *dev)
1290 {
1291 struct de4x5_private *lp = netdev_priv(dev);
1292 u_long iobase = dev->base_addr;
1293 int i, status = 0;
1294 s32 omr;
1295
1296
1297 for (i=0; i<lp->rxRingSize; i++) {
1298 if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
1299 de4x5_free_rx_buffs(dev);
1300 return -EAGAIN;
1301 }
1302 }
1303
1304
1305
1306
1307 yawn(dev, WAKEUP);
1308
1309
1310
1311
1312 status = de4x5_init(dev);
1313 spin_lock_init(&lp->lock);
1314 lp->state = OPEN;
1315 de4x5_dbg_open(dev);
1316
1317 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1318 lp->adapter_name, dev)) {
1319 printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
1320 if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
1321 lp->adapter_name, dev)) {
1322 printk("\n Cannot get IRQ- reconfigure your hardware.\n");
1323 disable_ast(dev);
1324 de4x5_free_rx_buffs(dev);
1325 de4x5_free_tx_buffs(dev);
1326 yawn(dev, SLEEP);
1327 lp->state = CLOSED;
1328 return -EAGAIN;
1329 } else {
1330 printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
1331 printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
1332 }
1333 }
1334
1335 lp->interrupt = UNMASK_INTERRUPTS;
1336 netif_trans_update(dev);
1337
1338 START_DE4X5;
1339
1340 de4x5_setup_intr(dev);
1341
1342 if (de4x5_debug & DEBUG_OPEN) {
1343 printk("\tsts: 0x%08x\n", inl(DE4X5_STS));
1344 printk("\tbmr: 0x%08x\n", inl(DE4X5_BMR));
1345 printk("\timr: 0x%08x\n", inl(DE4X5_IMR));
1346 printk("\tomr: 0x%08x\n", inl(DE4X5_OMR));
1347 printk("\tsisr: 0x%08x\n", inl(DE4X5_SISR));
1348 printk("\tsicr: 0x%08x\n", inl(DE4X5_SICR));
1349 printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
1350 printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
1351 }
1352
1353 return status;
1354 }
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364 static int
1365 de4x5_init(struct net_device *dev)
1366 {
1367
1368 netif_stop_queue(dev);
1369
1370 de4x5_sw_reset(dev);
1371
1372
1373 autoconf_media(dev);
1374
1375 return 0;
1376 }
1377
1378 static int
1379 de4x5_sw_reset(struct net_device *dev)
1380 {
1381 struct de4x5_private *lp = netdev_priv(dev);
1382 u_long iobase = dev->base_addr;
1383 int i, j, status = 0;
1384 s32 bmr, omr;
1385
1386
1387 if (!lp->useSROM) {
1388 if (lp->phy[lp->active].id != 0) {
1389 lp->infoblock_csr6 = OMR_SDP | OMR_PS | OMR_HBD;
1390 } else {
1391 lp->infoblock_csr6 = OMR_SDP | OMR_TTM;
1392 }
1393 de4x5_switch_mac_port(dev);
1394 }
1395
1396
1397
1398
1399
1400
1401 bmr = (lp->chipset==DC21140 ? PBL_8 : PBL_4) | DESC_SKIP_LEN | DE4X5_CACHE_ALIGN;
1402 bmr |= ((lp->chipset & ~0x00ff)==DC2114x ? BMR_RML : 0);
1403 outl(bmr, DE4X5_BMR);
1404
1405 omr = inl(DE4X5_OMR) & ~OMR_PR;
1406 if (lp->chipset == DC21140) {
1407 omr |= (OMR_SDP | OMR_SB);
1408 }
1409 lp->setup_f = PERFECT;
1410 outl(lp->dma_rings, DE4X5_RRBA);
1411 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
1412 DE4X5_TRBA);
1413
1414 lp->rx_new = lp->rx_old = 0;
1415 lp->tx_new = lp->tx_old = 0;
1416
1417 for (i = 0; i < lp->rxRingSize; i++) {
1418 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
1419 }
1420
1421 for (i = 0; i < lp->txRingSize; i++) {
1422 lp->tx_ring[i].status = cpu_to_le32(0);
1423 }
1424
1425 barrier();
1426
1427
1428 SetMulticastFilter(dev);
1429
1430 load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
1431 outl(omr|OMR_ST, DE4X5_OMR);
1432
1433
1434
1435 for (j=0, i=0;(i<500) && (j==0);i++) {
1436 mdelay(1);
1437 if ((s32)le32_to_cpu(lp->tx_ring[lp->tx_new].status) >= 0) j=1;
1438 }
1439 outl(omr, DE4X5_OMR);
1440
1441 if (j == 0) {
1442 printk("%s: Setup frame timed out, status %08x\n", dev->name,
1443 inl(DE4X5_STS));
1444 status = -EIO;
1445 }
1446
1447 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1448 lp->tx_old = lp->tx_new;
1449
1450 return status;
1451 }
1452
1453
1454
1455
1456 static netdev_tx_t
1457 de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1458 {
1459 struct de4x5_private *lp = netdev_priv(dev);
1460 u_long iobase = dev->base_addr;
1461 u_long flags = 0;
1462
1463 netif_stop_queue(dev);
1464 if (!lp->tx_enable)
1465 goto tx_err;
1466
1467
1468
1469
1470
1471
1472 spin_lock_irqsave(&lp->lock, flags);
1473 de4x5_tx(dev);
1474 spin_unlock_irqrestore(&lp->lock, flags);
1475
1476
1477 if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
1478 goto tx_err;
1479
1480
1481 if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
1482 if (lp->interrupt) {
1483 de4x5_putb_cache(dev, skb);
1484 } else {
1485 de4x5_put_cache(dev, skb);
1486 }
1487 if (de4x5_debug & DEBUG_TX) {
1488 printk("%s: transmit busy, lost media or stale skb found:\n STS:%08x\n tbusy:%d\n IMR:%08x\n OMR:%08x\n Stale skb: %s\n",dev->name, inl(DE4X5_STS), netif_queue_stopped(dev), inl(DE4X5_IMR), inl(DE4X5_OMR), ((u_long) lp->tx_skb[lp->tx_new] > 1) ? "YES" : "NO");
1489 }
1490 } else if (skb->len > 0) {
1491
1492 if (!skb_queue_empty(&lp->cache.queue) && !lp->interrupt) {
1493 de4x5_put_cache(dev, skb);
1494 skb = de4x5_get_cache(dev);
1495 }
1496
1497 while (skb && !netif_queue_stopped(dev) &&
1498 (u_long) lp->tx_skb[lp->tx_new] <= 1) {
1499 spin_lock_irqsave(&lp->lock, flags);
1500 netif_stop_queue(dev);
1501 load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
1502 lp->stats.tx_bytes += skb->len;
1503 outl(POLL_DEMAND, DE4X5_TPD);
1504
1505 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1506
1507 if (TX_BUFFS_AVAIL) {
1508 netif_start_queue(dev);
1509 }
1510 skb = de4x5_get_cache(dev);
1511 spin_unlock_irqrestore(&lp->lock, flags);
1512 }
1513 if (skb) de4x5_putb_cache(dev, skb);
1514 }
1515
1516 lp->cache.lock = 0;
1517
1518 return NETDEV_TX_OK;
1519 tx_err:
1520 dev_kfree_skb_any(skb);
1521 return NETDEV_TX_OK;
1522 }
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 static irqreturn_t
1536 de4x5_interrupt(int irq, void *dev_id)
1537 {
1538 struct net_device *dev = dev_id;
1539 struct de4x5_private *lp;
1540 s32 imr, omr, sts, limit;
1541 u_long iobase;
1542 unsigned int handled = 0;
1543
1544 lp = netdev_priv(dev);
1545 spin_lock(&lp->lock);
1546 iobase = dev->base_addr;
1547
1548 DISABLE_IRQs;
1549
1550 if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
1551 printk("%s: Re-entering the interrupt handler.\n", dev->name);
1552
1553 synchronize_irq(dev->irq);
1554
1555 for (limit=0; limit<8; limit++) {
1556 sts = inl(DE4X5_STS);
1557 outl(sts, DE4X5_STS);
1558
1559 if (!(sts & lp->irq_mask)) break;
1560 handled = 1;
1561
1562 if (sts & (STS_RI | STS_RU))
1563 de4x5_rx(dev);
1564
1565 if (sts & (STS_TI | STS_TU))
1566 de4x5_tx(dev);
1567
1568 if (sts & STS_LNF) {
1569 lp->irq_mask &= ~IMR_LFM;
1570 }
1571
1572 if (sts & STS_UNF) {
1573 de4x5_txur(dev);
1574 }
1575
1576 if (sts & STS_SE) {
1577 STOP_DE4X5;
1578 printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
1579 dev->name, sts);
1580 spin_unlock(&lp->lock);
1581 return IRQ_HANDLED;
1582 }
1583 }
1584
1585
1586 if (!test_and_set_bit(0, (void *)&lp->cache.lock)) {
1587 while (!skb_queue_empty(&lp->cache.queue) && !netif_queue_stopped(dev) && lp->tx_enable) {
1588 de4x5_queue_pkt(de4x5_get_cache(dev), dev);
1589 }
1590 lp->cache.lock = 0;
1591 }
1592
1593 lp->interrupt = UNMASK_INTERRUPTS;
1594 ENABLE_IRQs;
1595 spin_unlock(&lp->lock);
1596
1597 return IRQ_RETVAL(handled);
1598 }
1599
1600 static int
1601 de4x5_rx(struct net_device *dev)
1602 {
1603 struct de4x5_private *lp = netdev_priv(dev);
1604 u_long iobase = dev->base_addr;
1605 int entry;
1606 s32 status;
1607
1608 for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
1609 entry=lp->rx_new) {
1610 status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
1611
1612 if (lp->rx_ovf) {
1613 if (inl(DE4X5_MFC) & MFC_FOCM) {
1614 de4x5_rx_ovfc(dev);
1615 break;
1616 }
1617 }
1618
1619 if (status & RD_FS) {
1620 lp->rx_old = entry;
1621 }
1622
1623 if (status & RD_LS) {
1624 if (lp->tx_enable) lp->linkOK++;
1625 if (status & RD_ES) {
1626 lp->stats.rx_errors++;
1627 if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
1628 if (status & RD_CE) lp->stats.rx_crc_errors++;
1629 if (status & RD_OF) lp->stats.rx_fifo_errors++;
1630 if (status & RD_TL) lp->stats.rx_length_errors++;
1631 if (status & RD_RF) lp->pktStats.rx_runt_frames++;
1632 if (status & RD_CS) lp->pktStats.rx_collision++;
1633 if (status & RD_DB) lp->pktStats.rx_dribble++;
1634 if (status & RD_OF) lp->pktStats.rx_overflow++;
1635 } else {
1636 struct sk_buff *skb;
1637 short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
1638 >> 16) - 4;
1639
1640 if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
1641 printk("%s: Insufficient memory; nuking packet.\n",
1642 dev->name);
1643 lp->stats.rx_dropped++;
1644 } else {
1645 de4x5_dbg_rx(skb, pkt_len);
1646
1647
1648 skb->protocol=eth_type_trans(skb,dev);
1649 de4x5_local_stats(dev, skb->data, pkt_len);
1650 netif_rx(skb);
1651
1652
1653 lp->stats.rx_packets++;
1654 lp->stats.rx_bytes += pkt_len;
1655 }
1656 }
1657
1658
1659 for (;lp->rx_old!=entry;lp->rx_old=(lp->rx_old + 1)%lp->rxRingSize) {
1660 lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
1661 barrier();
1662 }
1663 lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
1664 barrier();
1665 }
1666
1667
1668
1669
1670 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1671 }
1672
1673 return 0;
1674 }
1675
1676 static inline void
1677 de4x5_free_tx_buff(struct de4x5_private *lp, int entry)
1678 {
1679 dma_unmap_single(lp->gendev, le32_to_cpu(lp->tx_ring[entry].buf),
1680 le32_to_cpu(lp->tx_ring[entry].des1) & TD_TBS1,
1681 DMA_TO_DEVICE);
1682 if ((u_long) lp->tx_skb[entry] > 1)
1683 dev_kfree_skb_irq(lp->tx_skb[entry]);
1684 lp->tx_skb[entry] = NULL;
1685 }
1686
1687
1688
1689
1690 static int
1691 de4x5_tx(struct net_device *dev)
1692 {
1693 struct de4x5_private *lp = netdev_priv(dev);
1694 u_long iobase = dev->base_addr;
1695 int entry;
1696 s32 status;
1697
1698 for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
1699 status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
1700 if (status < 0) {
1701 break;
1702 } else if (status != 0x7fffffff) {
1703 if (status & TD_ES) {
1704 lp->stats.tx_errors++;
1705 if (status & TD_NC) lp->stats.tx_carrier_errors++;
1706 if (status & TD_LC) lp->stats.tx_window_errors++;
1707 if (status & TD_UF) lp->stats.tx_fifo_errors++;
1708 if (status & TD_EC) lp->pktStats.excessive_collisions++;
1709 if (status & TD_DE) lp->stats.tx_aborted_errors++;
1710
1711 if (TX_PKT_PENDING) {
1712 outl(POLL_DEMAND, DE4X5_TPD);
1713 }
1714 } else {
1715 lp->stats.tx_packets++;
1716 if (lp->tx_enable) lp->linkOK++;
1717 }
1718
1719 lp->stats.collisions += ((status & TD_EC) ? 16 :
1720 ((status & TD_CC) >> 3));
1721
1722
1723 if (lp->tx_skb[entry] != NULL)
1724 de4x5_free_tx_buff(lp, entry);
1725 }
1726
1727
1728 lp->tx_old = (lp->tx_old + 1) % lp->txRingSize;
1729 }
1730
1731
1732 if (TX_BUFFS_AVAIL && netif_queue_stopped(dev)) {
1733 if (lp->interrupt)
1734 netif_wake_queue(dev);
1735 else
1736 netif_start_queue(dev);
1737 }
1738
1739 return 0;
1740 }
1741
1742 static void
1743 de4x5_ast(struct timer_list *t)
1744 {
1745 struct de4x5_private *lp = from_timer(lp, t, timer);
1746 struct net_device *dev = dev_get_drvdata(lp->gendev);
1747 int next_tick = DE4X5_AUTOSENSE_MS;
1748 int dt;
1749
1750 if (lp->useSROM)
1751 next_tick = srom_autoconf(dev);
1752 else if (lp->chipset == DC21140)
1753 next_tick = dc21140m_autoconf(dev);
1754 else if (lp->chipset == DC21041)
1755 next_tick = dc21041_autoconf(dev);
1756 else if (lp->chipset == DC21040)
1757 next_tick = dc21040_autoconf(dev);
1758 lp->linkOK = 0;
1759
1760 dt = (next_tick * HZ) / 1000;
1761
1762 if (!dt)
1763 dt = 1;
1764
1765 mod_timer(&lp->timer, jiffies + dt);
1766 }
1767
1768 static int
1769 de4x5_txur(struct net_device *dev)
1770 {
1771 struct de4x5_private *lp = netdev_priv(dev);
1772 u_long iobase = dev->base_addr;
1773 int omr;
1774
1775 omr = inl(DE4X5_OMR);
1776 if (!(omr & OMR_SF) || (lp->chipset==DC21041) || (lp->chipset==DC21040)) {
1777 omr &= ~(OMR_ST|OMR_SR);
1778 outl(omr, DE4X5_OMR);
1779 while (inl(DE4X5_STS) & STS_TS);
1780 if ((omr & OMR_TR) < OMR_TR) {
1781 omr += 0x4000;
1782 } else {
1783 omr |= OMR_SF;
1784 }
1785 outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
1786 }
1787
1788 return 0;
1789 }
1790
1791 static int
1792 de4x5_rx_ovfc(struct net_device *dev)
1793 {
1794 struct de4x5_private *lp = netdev_priv(dev);
1795 u_long iobase = dev->base_addr;
1796 int omr;
1797
1798 omr = inl(DE4X5_OMR);
1799 outl(omr & ~OMR_SR, DE4X5_OMR);
1800 while (inl(DE4X5_STS) & STS_RS);
1801
1802 for (; (s32)le32_to_cpu(lp->rx_ring[lp->rx_new].status)>=0;) {
1803 lp->rx_ring[lp->rx_new].status = cpu_to_le32(R_OWN);
1804 lp->rx_new = (lp->rx_new + 1) % lp->rxRingSize;
1805 }
1806
1807 outl(omr, DE4X5_OMR);
1808
1809 return 0;
1810 }
1811
1812 static int
1813 de4x5_close(struct net_device *dev)
1814 {
1815 struct de4x5_private *lp = netdev_priv(dev);
1816 u_long iobase = dev->base_addr;
1817 s32 imr, omr;
1818
1819 disable_ast(dev);
1820
1821 netif_stop_queue(dev);
1822
1823 if (de4x5_debug & DEBUG_CLOSE) {
1824 printk("%s: Shutting down ethercard, status was %8.8x.\n",
1825 dev->name, inl(DE4X5_STS));
1826 }
1827
1828
1829
1830
1831 DISABLE_IRQs;
1832 STOP_DE4X5;
1833
1834
1835 free_irq(dev->irq, dev);
1836 lp->state = CLOSED;
1837
1838
1839 de4x5_free_rx_buffs(dev);
1840 de4x5_free_tx_buffs(dev);
1841
1842
1843 yawn(dev, SLEEP);
1844
1845 return 0;
1846 }
1847
1848 static struct net_device_stats *
1849 de4x5_get_stats(struct net_device *dev)
1850 {
1851 struct de4x5_private *lp = netdev_priv(dev);
1852 u_long iobase = dev->base_addr;
1853
1854 lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
1855
1856 return &lp->stats;
1857 }
1858
1859 static void
1860 de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1861 {
1862 struct de4x5_private *lp = netdev_priv(dev);
1863 int i;
1864
1865 for (i=1; i<DE4X5_PKT_STAT_SZ-1; i++) {
1866 if (pkt_len < (i*DE4X5_PKT_BIN_SZ)) {
1867 lp->pktStats.bins[i]++;
1868 i = DE4X5_PKT_STAT_SZ;
1869 }
1870 }
1871 if (is_multicast_ether_addr(buf)) {
1872 if (is_broadcast_ether_addr(buf)) {
1873 lp->pktStats.broadcast++;
1874 } else {
1875 lp->pktStats.multicast++;
1876 }
1877 } else if (ether_addr_equal(buf, dev->dev_addr)) {
1878 lp->pktStats.unicast++;
1879 }
1880
1881 lp->pktStats.bins[0]++;
1882 if (lp->pktStats.bins[0] == 0) {
1883 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1884 }
1885 }
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895 static void
1896 load_packet(struct net_device *dev, char *buf, u32 flags, struct sk_buff *skb)
1897 {
1898 struct de4x5_private *lp = netdev_priv(dev);
1899 int entry = (lp->tx_new ? lp->tx_new-1 : lp->txRingSize-1);
1900 dma_addr_t buf_dma = dma_map_single(lp->gendev, buf, flags & TD_TBS1, DMA_TO_DEVICE);
1901
1902 lp->tx_ring[lp->tx_new].buf = cpu_to_le32(buf_dma);
1903 lp->tx_ring[lp->tx_new].des1 &= cpu_to_le32(TD_TER);
1904 lp->tx_ring[lp->tx_new].des1 |= cpu_to_le32(flags);
1905 lp->tx_skb[lp->tx_new] = skb;
1906 lp->tx_ring[entry].des1 &= cpu_to_le32(~TD_IC);
1907 barrier();
1908
1909 lp->tx_ring[lp->tx_new].status = cpu_to_le32(T_OWN);
1910 barrier();
1911 }
1912
1913
1914
1915
1916 static void
1917 set_multicast_list(struct net_device *dev)
1918 {
1919 struct de4x5_private *lp = netdev_priv(dev);
1920 u_long iobase = dev->base_addr;
1921
1922
1923 if (lp->state == OPEN) {
1924 if (dev->flags & IFF_PROMISC) {
1925 u32 omr;
1926 omr = inl(DE4X5_OMR);
1927 omr |= OMR_PR;
1928 outl(omr, DE4X5_OMR);
1929 } else {
1930 SetMulticastFilter(dev);
1931 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
1932 SETUP_FRAME_LEN, (struct sk_buff *)1);
1933
1934 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
1935 outl(POLL_DEMAND, DE4X5_TPD);
1936 netif_trans_update(dev);
1937 }
1938 }
1939 }
1940
1941
1942
1943
1944
1945
1946 static void
1947 SetMulticastFilter(struct net_device *dev)
1948 {
1949 struct de4x5_private *lp = netdev_priv(dev);
1950 struct netdev_hw_addr *ha;
1951 u_long iobase = dev->base_addr;
1952 int i, bit, byte;
1953 u16 hashcode;
1954 u32 omr, crc;
1955 char *pa;
1956 unsigned char *addrs;
1957
1958 omr = inl(DE4X5_OMR);
1959 omr &= ~(OMR_PR | OMR_PM);
1960 pa = build_setup_frame(dev, ALL);
1961
1962 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1963 omr |= OMR_PM;
1964 } else if (lp->setup_f == HASH_PERF) {
1965 netdev_for_each_mc_addr(ha, dev) {
1966 crc = ether_crc_le(ETH_ALEN, ha->addr);
1967 hashcode = crc & DE4X5_HASH_BITS;
1968
1969 byte = hashcode >> 3;
1970 bit = 1 << (hashcode & 0x07);
1971
1972 byte <<= 1;
1973 if (byte & 0x02) {
1974 byte -= 1;
1975 }
1976 lp->setup_frame[byte] |= bit;
1977 }
1978 } else {
1979 netdev_for_each_mc_addr(ha, dev) {
1980 addrs = ha->addr;
1981 for (i=0; i<ETH_ALEN; i++) {
1982 *(pa + (i&1)) = *addrs++;
1983 if (i & 0x01) pa += 4;
1984 }
1985 }
1986 }
1987 outl(omr, DE4X5_OMR);
1988 }
1989
1990 #ifdef CONFIG_EISA
1991
1992 static u_char de4x5_irq[] = EISA_ALLOWED_IRQ_LIST;
1993
1994 static int de4x5_eisa_probe(struct device *gendev)
1995 {
1996 struct eisa_device *edev;
1997 u_long iobase;
1998 u_char irq, regval;
1999 u_short vendor;
2000 u32 cfid;
2001 int status, device;
2002 struct net_device *dev;
2003 struct de4x5_private *lp;
2004
2005 edev = to_eisa_device (gendev);
2006 iobase = edev->base_addr;
2007
2008 if (!request_region (iobase, DE4X5_EISA_TOTAL_SIZE, "de4x5"))
2009 return -EBUSY;
2010
2011 if (!request_region (iobase + DE4X5_EISA_IO_PORTS,
2012 DE4X5_EISA_TOTAL_SIZE, "de4x5")) {
2013 status = -EBUSY;
2014 goto release_reg_1;
2015 }
2016
2017 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2018 status = -ENOMEM;
2019 goto release_reg_2;
2020 }
2021 lp = netdev_priv(dev);
2022
2023 cfid = (u32) inl(PCI_CFID);
2024 lp->cfrv = (u_short) inl(PCI_CFRV);
2025 device = (cfid >> 8) & 0x00ffff00;
2026 vendor = (u_short) cfid;
2027
2028
2029 regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
2030 #ifdef CONFIG_ALPHA
2031
2032
2033
2034
2035
2036
2037 outb (ER1_IAM | 1, EISA_REG1);
2038 mdelay (1);
2039
2040
2041 outb (ER1_IAM, EISA_REG1);
2042 mdelay (1);
2043
2044
2045 outb (ER3_BWE | ER3_BRE, EISA_REG3);
2046
2047
2048 outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
2049 #endif
2050 irq = de4x5_irq[(regval >> 1) & 0x03];
2051
2052 if (is_DC2114x) {
2053 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2054 }
2055 lp->chipset = device;
2056 lp->bus = EISA;
2057
2058
2059 outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
2060 outl(0x00006000, PCI_CFLT);
2061 outl(iobase, PCI_CBIO);
2062
2063 DevicePresent(dev, EISA_APROM);
2064
2065 dev->irq = irq;
2066
2067 if (!(status = de4x5_hw_init (dev, iobase, gendev))) {
2068 return 0;
2069 }
2070
2071 free_netdev (dev);
2072 release_reg_2:
2073 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2074 release_reg_1:
2075 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2076
2077 return status;
2078 }
2079
2080 static int de4x5_eisa_remove(struct device *device)
2081 {
2082 struct net_device *dev;
2083 u_long iobase;
2084
2085 dev = dev_get_drvdata(device);
2086 iobase = dev->base_addr;
2087
2088 unregister_netdev (dev);
2089 free_netdev (dev);
2090 release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
2091 release_region (iobase, DE4X5_EISA_TOTAL_SIZE);
2092
2093 return 0;
2094 }
2095
2096 static const struct eisa_device_id de4x5_eisa_ids[] = {
2097 { "DEC4250", 0 },
2098 { "" }
2099 };
2100 MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
2101
2102 static struct eisa_driver de4x5_eisa_driver = {
2103 .id_table = de4x5_eisa_ids,
2104 .driver = {
2105 .name = "de4x5",
2106 .probe = de4x5_eisa_probe,
2107 .remove = de4x5_eisa_remove,
2108 }
2109 };
2110 #endif
2111
2112 #ifdef CONFIG_PCI
2113
2114
2115
2116
2117
2118
2119
2120 static void
2121 srom_search(struct net_device *dev, struct pci_dev *pdev)
2122 {
2123 u_char pb;
2124 u_short vendor, status;
2125 u_int irq = 0, device;
2126 u_long iobase = 0;
2127 int i, j;
2128 struct de4x5_private *lp = netdev_priv(dev);
2129 struct pci_dev *this_dev;
2130
2131 list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
2132 vendor = this_dev->vendor;
2133 device = this_dev->device << 8;
2134 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
2135
2136
2137 pb = this_dev->bus->number;
2138
2139
2140 lp->device = PCI_SLOT(this_dev->devfn);
2141 lp->bus_num = pb;
2142
2143
2144 if (is_DC2114x) {
2145 device = ((this_dev->revision & CFRV_RN) < DC2114x_BRK
2146 ? DC21142 : DC21143);
2147 }
2148 lp->chipset = device;
2149
2150
2151 iobase = pci_resource_start(this_dev, 0);
2152
2153
2154 irq = this_dev->irq;
2155 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
2156
2157
2158 pci_read_config_word(this_dev, PCI_COMMAND, &status);
2159 if (!(status & PCI_COMMAND_IO)) continue;
2160
2161
2162 DevicePresent(dev, DE4X5_APROM);
2163 for (j=0, i=0; i<ETH_ALEN; i++) {
2164 j += (u_char) *((u_char *)&lp->srom + SROM_HWADD + i);
2165 }
2166 if (j != 0 && j != 6 * 0xff) {
2167 last.chipset = device;
2168 last.bus = pb;
2169 last.irq = irq;
2170 for (i=0; i<ETH_ALEN; i++) {
2171 last.addr[i] = (u_char)*((u_char *)&lp->srom + SROM_HWADD + i);
2172 }
2173 return;
2174 }
2175 }
2176 }
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194 static int de4x5_pci_probe(struct pci_dev *pdev,
2195 const struct pci_device_id *ent)
2196 {
2197 u_char pb, pbus = 0, dev_num, dnum = 0, timer;
2198 u_short vendor, status;
2199 u_int irq = 0, device;
2200 u_long iobase = 0;
2201 int error;
2202 struct net_device *dev;
2203 struct de4x5_private *lp;
2204
2205 dev_num = PCI_SLOT(pdev->devfn);
2206 pb = pdev->bus->number;
2207
2208 if (io) {
2209 pbus = (u_short)(io >> 8);
2210 dnum = (u_short)(io & 0xff);
2211 if ((pbus != pb) || (dnum != dev_num))
2212 return -ENODEV;
2213 }
2214
2215 vendor = pdev->vendor;
2216 device = pdev->device << 8;
2217 if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x))
2218 return -ENODEV;
2219
2220
2221 if ((error = pci_enable_device (pdev)))
2222 return error;
2223
2224 if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
2225 error = -ENOMEM;
2226 goto disable_dev;
2227 }
2228
2229 lp = netdev_priv(dev);
2230 lp->bus = PCI;
2231 lp->bus_num = 0;
2232
2233
2234 if (lp->bus_num != pb) {
2235 lp->bus_num = pb;
2236 srom_search(dev, pdev);
2237 }
2238
2239
2240 lp->cfrv = pdev->revision;
2241
2242
2243 lp->device = dev_num;
2244 lp->bus_num = pb;
2245
2246
2247 if (is_DC2114x) {
2248 device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
2249 }
2250 lp->chipset = device;
2251
2252
2253 iobase = pci_resource_start(pdev, 0);
2254
2255
2256 irq = pdev->irq;
2257 if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
2258 error = -ENODEV;
2259 goto free_dev;
2260 }
2261
2262
2263 pci_read_config_word(pdev, PCI_COMMAND, &status);
2264 #ifdef __powerpc__
2265 if (!(status & PCI_COMMAND_IO)) {
2266 status |= PCI_COMMAND_IO;
2267 pci_write_config_word(pdev, PCI_COMMAND, status);
2268 pci_read_config_word(pdev, PCI_COMMAND, &status);
2269 }
2270 #endif
2271 if (!(status & PCI_COMMAND_IO)) {
2272 error = -ENODEV;
2273 goto free_dev;
2274 }
2275
2276 if (!(status & PCI_COMMAND_MASTER)) {
2277 status |= PCI_COMMAND_MASTER;
2278 pci_write_config_word(pdev, PCI_COMMAND, status);
2279 pci_read_config_word(pdev, PCI_COMMAND, &status);
2280 }
2281 if (!(status & PCI_COMMAND_MASTER)) {
2282 error = -ENODEV;
2283 goto free_dev;
2284 }
2285
2286
2287 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &timer);
2288 if (timer < 0x60) {
2289 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x60);
2290 }
2291
2292 DevicePresent(dev, DE4X5_APROM);
2293
2294 if (!request_region (iobase, DE4X5_PCI_TOTAL_SIZE, "de4x5")) {
2295 error = -EBUSY;
2296 goto free_dev;
2297 }
2298
2299 dev->irq = irq;
2300
2301 if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
2302 goto release;
2303 }
2304
2305 return 0;
2306
2307 release:
2308 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2309 free_dev:
2310 free_netdev (dev);
2311 disable_dev:
2312 pci_disable_device (pdev);
2313 return error;
2314 }
2315
2316 static void de4x5_pci_remove(struct pci_dev *pdev)
2317 {
2318 struct net_device *dev;
2319 u_long iobase;
2320
2321 dev = pci_get_drvdata(pdev);
2322 iobase = dev->base_addr;
2323
2324 unregister_netdev (dev);
2325 free_netdev (dev);
2326 release_region (iobase, DE4X5_PCI_TOTAL_SIZE);
2327 pci_disable_device (pdev);
2328 }
2329
2330 static const struct pci_device_id de4x5_pci_tbl[] = {
2331 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
2332 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
2333 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
2334 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
2335 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
2336 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
2337 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142,
2338 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
2339 { },
2340 };
2341
2342 static struct pci_driver de4x5_pci_driver = {
2343 .name = "de4x5",
2344 .id_table = de4x5_pci_tbl,
2345 .probe = de4x5_pci_probe,
2346 .remove = de4x5_pci_remove,
2347 };
2348
2349 #endif
2350
2351
2352
2353
2354
2355
2356
2357
2358 static int
2359 autoconf_media(struct net_device *dev)
2360 {
2361 struct de4x5_private *lp = netdev_priv(dev);
2362 u_long iobase = dev->base_addr;
2363
2364 disable_ast(dev);
2365
2366 lp->c_media = AUTO;
2367 inl(DE4X5_MFC);
2368 lp->media = INIT;
2369 lp->tcount = 0;
2370
2371 de4x5_ast(&lp->timer);
2372
2373 return lp->media;
2374 }
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388 static int
2389 dc21040_autoconf(struct net_device *dev)
2390 {
2391 struct de4x5_private *lp = netdev_priv(dev);
2392 u_long iobase = dev->base_addr;
2393 int next_tick = DE4X5_AUTOSENSE_MS;
2394 s32 imr;
2395
2396 switch (lp->media) {
2397 case INIT:
2398 DISABLE_IRQs;
2399 lp->tx_enable = false;
2400 lp->timeout = -1;
2401 de4x5_save_skbs(dev);
2402 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
2403 lp->media = TP;
2404 } else if ((lp->autosense == BNC) || (lp->autosense == AUI) || (lp->autosense == BNC_AUI)) {
2405 lp->media = BNC_AUI;
2406 } else if (lp->autosense == EXT_SIA) {
2407 lp->media = EXT_SIA;
2408 } else {
2409 lp->media = NC;
2410 }
2411 lp->local_state = 0;
2412 next_tick = dc21040_autoconf(dev);
2413 break;
2414
2415 case TP:
2416 next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
2417 TP_SUSPECT, test_tp);
2418 break;
2419
2420 case TP_SUSPECT:
2421 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
2422 break;
2423
2424 case BNC:
2425 case AUI:
2426 case BNC_AUI:
2427 next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
2428 BNC_AUI_SUSPECT, ping_media);
2429 break;
2430
2431 case BNC_AUI_SUSPECT:
2432 next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
2433 break;
2434
2435 case EXT_SIA:
2436 next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
2437 NC, EXT_SIA_SUSPECT, ping_media);
2438 break;
2439
2440 case EXT_SIA_SUSPECT:
2441 next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
2442 break;
2443
2444 case NC:
2445
2446 reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
2447 if (lp->media != lp->c_media) {
2448 de4x5_dbg_media(dev);
2449 lp->c_media = lp->media;
2450 }
2451 lp->media = INIT;
2452 lp->tx_enable = false;
2453 break;
2454 }
2455
2456 return next_tick;
2457 }
2458
2459 static int
2460 dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
2461 int next_state, int suspect_state,
2462 int (*fn)(struct net_device *, int))
2463 {
2464 struct de4x5_private *lp = netdev_priv(dev);
2465 int next_tick = DE4X5_AUTOSENSE_MS;
2466 int linkBad;
2467
2468 switch (lp->local_state) {
2469 case 0:
2470 reset_init_sia(dev, csr13, csr14, csr15);
2471 lp->local_state++;
2472 next_tick = 500;
2473 break;
2474
2475 case 1:
2476 if (!lp->tx_enable) {
2477 linkBad = fn(dev, timeout);
2478 if (linkBad < 0) {
2479 next_tick = linkBad & ~TIMER_CB;
2480 } else {
2481 if (linkBad && (lp->autosense == AUTO)) {
2482 lp->local_state = 0;
2483 lp->media = next_state;
2484 } else {
2485 de4x5_init_connection(dev);
2486 }
2487 }
2488 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2489 lp->media = suspect_state;
2490 next_tick = 3000;
2491 }
2492 break;
2493 }
2494
2495 return next_tick;
2496 }
2497
2498 static int
2499 de4x5_suspect_state(struct net_device *dev, int timeout, int prev_state,
2500 int (*fn)(struct net_device *, int),
2501 int (*asfn)(struct net_device *))
2502 {
2503 struct de4x5_private *lp = netdev_priv(dev);
2504 int next_tick = DE4X5_AUTOSENSE_MS;
2505 int linkBad;
2506
2507 switch (lp->local_state) {
2508 case 1:
2509 if (lp->linkOK) {
2510 lp->media = prev_state;
2511 } else {
2512 lp->local_state++;
2513 next_tick = asfn(dev);
2514 }
2515 break;
2516
2517 case 2:
2518 linkBad = fn(dev, timeout);
2519 if (linkBad < 0) {
2520 next_tick = linkBad & ~TIMER_CB;
2521 } else if (!linkBad) {
2522 lp->local_state--;
2523 lp->media = prev_state;
2524 } else {
2525 lp->media = INIT;
2526 lp->tcount++;
2527 }
2528 }
2529
2530 return next_tick;
2531 }
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542 static int
2543 dc21041_autoconf(struct net_device *dev)
2544 {
2545 struct de4x5_private *lp = netdev_priv(dev);
2546 u_long iobase = dev->base_addr;
2547 s32 sts, irqs, irq_mask, imr, omr;
2548 int next_tick = DE4X5_AUTOSENSE_MS;
2549
2550 switch (lp->media) {
2551 case INIT:
2552 DISABLE_IRQs;
2553 lp->tx_enable = false;
2554 lp->timeout = -1;
2555 de4x5_save_skbs(dev);
2556 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
2557 lp->media = TP;
2558 } else if (lp->autosense == TP) {
2559 lp->media = TP;
2560 } else if (lp->autosense == BNC) {
2561 lp->media = BNC;
2562 } else if (lp->autosense == AUI) {
2563 lp->media = AUI;
2564 } else {
2565 lp->media = NC;
2566 }
2567 lp->local_state = 0;
2568 next_tick = dc21041_autoconf(dev);
2569 break;
2570
2571 case TP_NW:
2572 if (lp->timeout < 0) {
2573 omr = inl(DE4X5_OMR);
2574 outl(omr | OMR_FDX, DE4X5_OMR);
2575 }
2576 irqs = STS_LNF | STS_LNP;
2577 irq_mask = IMR_LFM | IMR_LPM;
2578 sts = test_media(dev, irqs, irq_mask, 0xef01, 0xffff, 0x0008, 2400);
2579 if (sts < 0) {
2580 next_tick = sts & ~TIMER_CB;
2581 } else {
2582 if (sts & STS_LNP) {
2583 lp->media = ANS;
2584 } else {
2585 lp->media = AUI;
2586 }
2587 next_tick = dc21041_autoconf(dev);
2588 }
2589 break;
2590
2591 case ANS:
2592 if (!lp->tx_enable) {
2593 irqs = STS_LNP;
2594 irq_mask = IMR_LPM;
2595 sts = test_ans(dev, irqs, irq_mask, 3000);
2596 if (sts < 0) {
2597 next_tick = sts & ~TIMER_CB;
2598 } else {
2599 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2600 lp->media = TP;
2601 next_tick = dc21041_autoconf(dev);
2602 } else {
2603 lp->local_state = 1;
2604 de4x5_init_connection(dev);
2605 }
2606 }
2607 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2608 lp->media = ANS_SUSPECT;
2609 next_tick = 3000;
2610 }
2611 break;
2612
2613 case ANS_SUSPECT:
2614 next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
2615 break;
2616
2617 case TP:
2618 if (!lp->tx_enable) {
2619 if (lp->timeout < 0) {
2620 omr = inl(DE4X5_OMR);
2621 outl(omr & ~OMR_FDX, DE4X5_OMR);
2622 }
2623 irqs = STS_LNF | STS_LNP;
2624 irq_mask = IMR_LFM | IMR_LPM;
2625 sts = test_media(dev,irqs, irq_mask, 0xef01, 0xff3f, 0x0008, 2400);
2626 if (sts < 0) {
2627 next_tick = sts & ~TIMER_CB;
2628 } else {
2629 if (!(sts & STS_LNP) && (lp->autosense == AUTO)) {
2630 if (inl(DE4X5_SISR) & SISR_NRA) {
2631 lp->media = AUI;
2632 } else {
2633 lp->media = BNC;
2634 }
2635 next_tick = dc21041_autoconf(dev);
2636 } else {
2637 lp->local_state = 1;
2638 de4x5_init_connection(dev);
2639 }
2640 }
2641 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2642 lp->media = TP_SUSPECT;
2643 next_tick = 3000;
2644 }
2645 break;
2646
2647 case TP_SUSPECT:
2648 next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
2649 break;
2650
2651 case AUI:
2652 if (!lp->tx_enable) {
2653 if (lp->timeout < 0) {
2654 omr = inl(DE4X5_OMR);
2655 outl(omr & ~OMR_FDX, DE4X5_OMR);
2656 }
2657 irqs = 0;
2658 irq_mask = 0;
2659 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x000e, 1000);
2660 if (sts < 0) {
2661 next_tick = sts & ~TIMER_CB;
2662 } else {
2663 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
2664 lp->media = BNC;
2665 next_tick = dc21041_autoconf(dev);
2666 } else {
2667 lp->local_state = 1;
2668 de4x5_init_connection(dev);
2669 }
2670 }
2671 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2672 lp->media = AUI_SUSPECT;
2673 next_tick = 3000;
2674 }
2675 break;
2676
2677 case AUI_SUSPECT:
2678 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
2679 break;
2680
2681 case BNC:
2682 switch (lp->local_state) {
2683 case 0:
2684 if (lp->timeout < 0) {
2685 omr = inl(DE4X5_OMR);
2686 outl(omr & ~OMR_FDX, DE4X5_OMR);
2687 }
2688 irqs = 0;
2689 irq_mask = 0;
2690 sts = test_media(dev,irqs, irq_mask, 0xef09, 0xf73d, 0x0006, 1000);
2691 if (sts < 0) {
2692 next_tick = sts & ~TIMER_CB;
2693 } else {
2694 lp->local_state++;
2695 next_tick = dc21041_autoconf(dev);
2696 }
2697 break;
2698
2699 case 1:
2700 if (!lp->tx_enable) {
2701 if ((sts = ping_media(dev, 3000)) < 0) {
2702 next_tick = sts & ~TIMER_CB;
2703 } else {
2704 if (sts) {
2705 lp->local_state = 0;
2706 lp->media = NC;
2707 } else {
2708 de4x5_init_connection(dev);
2709 }
2710 }
2711 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
2712 lp->media = BNC_SUSPECT;
2713 next_tick = 3000;
2714 }
2715 break;
2716 }
2717 break;
2718
2719 case BNC_SUSPECT:
2720 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
2721 break;
2722
2723 case NC:
2724 omr = inl(DE4X5_OMR);
2725 outl(omr | OMR_FDX, DE4X5_OMR);
2726 reset_init_sia(dev, 0xef01, 0xffff, 0x0008);
2727 if (lp->media != lp->c_media) {
2728 de4x5_dbg_media(dev);
2729 lp->c_media = lp->media;
2730 }
2731 lp->media = INIT;
2732 lp->tx_enable = false;
2733 break;
2734 }
2735
2736 return next_tick;
2737 }
2738
2739
2740
2741
2742
2743
2744 static int
2745 dc21140m_autoconf(struct net_device *dev)
2746 {
2747 struct de4x5_private *lp = netdev_priv(dev);
2748 int ana, anlpa, cap, cr, slnk, sr;
2749 int next_tick = DE4X5_AUTOSENSE_MS;
2750 u_long imr, omr, iobase = dev->base_addr;
2751
2752 switch(lp->media) {
2753 case INIT:
2754 if (lp->timeout < 0) {
2755 DISABLE_IRQs;
2756 lp->tx_enable = false;
2757 lp->linkOK = 0;
2758 de4x5_save_skbs(dev);
2759 }
2760 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2761 next_tick &= ~TIMER_CB;
2762 } else {
2763 if (lp->useSROM) {
2764 if (srom_map_media(dev) < 0) {
2765 lp->tcount++;
2766 return next_tick;
2767 }
2768 srom_exec(dev, lp->phy[lp->active].gep);
2769 if (lp->infoblock_media == ANS) {
2770 ana = lp->phy[lp->active].ana | MII_ANA_CSMA;
2771 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2772 }
2773 } else {
2774 lp->tmp = MII_SR_ASSC;
2775 SET_10Mb;
2776 if (lp->autosense == _100Mb) {
2777 lp->media = _100Mb;
2778 } else if (lp->autosense == _10Mb) {
2779 lp->media = _10Mb;
2780 } else if ((lp->autosense == AUTO) &&
2781 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2782 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2783 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2784 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2785 lp->media = ANS;
2786 } else if (lp->autosense == AUTO) {
2787 lp->media = SPD_DET;
2788 } else if (is_spd_100(dev) && is_100_up(dev)) {
2789 lp->media = _100Mb;
2790 } else {
2791 lp->media = NC;
2792 }
2793 }
2794 lp->local_state = 0;
2795 next_tick = dc21140m_autoconf(dev);
2796 }
2797 break;
2798
2799 case ANS:
2800 switch (lp->local_state) {
2801 case 0:
2802 if (lp->timeout < 0) {
2803 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2804 }
2805 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2806 if (cr < 0) {
2807 next_tick = cr & ~TIMER_CB;
2808 } else {
2809 if (cr) {
2810 lp->local_state = 0;
2811 lp->media = SPD_DET;
2812 } else {
2813 lp->local_state++;
2814 }
2815 next_tick = dc21140m_autoconf(dev);
2816 }
2817 break;
2818
2819 case 1:
2820 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2821 next_tick = sr & ~TIMER_CB;
2822 } else {
2823 lp->media = SPD_DET;
2824 lp->local_state = 0;
2825 if (sr) {
2826 lp->tmp = MII_SR_ASSC;
2827 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
2828 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2829 if (!(anlpa & MII_ANLPA_RF) &&
2830 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2831 if (cap & MII_ANA_100M) {
2832 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2833 lp->media = _100Mb;
2834 } else if (cap & MII_ANA_10M) {
2835 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2836
2837 lp->media = _10Mb;
2838 }
2839 }
2840 }
2841 next_tick = dc21140m_autoconf(dev);
2842 }
2843 break;
2844 }
2845 break;
2846
2847 case SPD_DET:
2848 if (lp->timeout < 0) {
2849 lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
2850 (~gep_rd(dev) & GEP_LNP));
2851 SET_100Mb_PDET;
2852 }
2853 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
2854 next_tick = slnk & ~TIMER_CB;
2855 } else {
2856 if (is_spd_100(dev) && is_100_up(dev)) {
2857 lp->media = _100Mb;
2858 } else if ((!is_spd_100(dev) && (is_10_up(dev) & lp->tmp))) {
2859 lp->media = _10Mb;
2860 } else {
2861 lp->media = NC;
2862 }
2863 next_tick = dc21140m_autoconf(dev);
2864 }
2865 break;
2866
2867 case _100Mb:
2868 next_tick = 3000;
2869 if (!lp->tx_enable) {
2870 SET_100Mb;
2871 de4x5_init_connection(dev);
2872 } else {
2873 if (!lp->linkOK && (lp->autosense == AUTO)) {
2874 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
2875 lp->media = INIT;
2876 lp->tcount++;
2877 next_tick = DE4X5_AUTOSENSE_MS;
2878 }
2879 }
2880 }
2881 break;
2882
2883 case BNC:
2884 case AUI:
2885 case _10Mb:
2886 next_tick = 3000;
2887 if (!lp->tx_enable) {
2888 SET_10Mb;
2889 de4x5_init_connection(dev);
2890 } else {
2891 if (!lp->linkOK && (lp->autosense == AUTO)) {
2892 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
2893 lp->media = INIT;
2894 lp->tcount++;
2895 next_tick = DE4X5_AUTOSENSE_MS;
2896 }
2897 }
2898 }
2899 break;
2900
2901 case NC:
2902 if (lp->media != lp->c_media) {
2903 de4x5_dbg_media(dev);
2904 lp->c_media = lp->media;
2905 }
2906 lp->media = INIT;
2907 lp->tx_enable = false;
2908 break;
2909 }
2910
2911 return next_tick;
2912 }
2913
2914
2915
2916
2917
2918
2919
2920
2921
2922
2923
2924
2925
2926
2927
2928 static int
2929 dc2114x_autoconf(struct net_device *dev)
2930 {
2931 struct de4x5_private *lp = netdev_priv(dev);
2932 u_long iobase = dev->base_addr;
2933 s32 cr, anlpa, ana, cap, irqs, irq_mask, imr, omr, slnk, sr, sts;
2934 int next_tick = DE4X5_AUTOSENSE_MS;
2935
2936 switch (lp->media) {
2937 case INIT:
2938 if (lp->timeout < 0) {
2939 DISABLE_IRQs;
2940 lp->tx_enable = false;
2941 lp->linkOK = 0;
2942 lp->timeout = -1;
2943 de4x5_save_skbs(dev);
2944 if (lp->params.autosense & ~AUTO) {
2945 srom_map_media(dev);
2946 if (lp->media != lp->params.autosense) {
2947 lp->tcount++;
2948 lp->media = INIT;
2949 return next_tick;
2950 }
2951 lp->media = INIT;
2952 }
2953 }
2954 if ((next_tick = de4x5_reset_phy(dev)) < 0) {
2955 next_tick &= ~TIMER_CB;
2956 } else {
2957 if (lp->autosense == _100Mb) {
2958 lp->media = _100Mb;
2959 } else if (lp->autosense == _10Mb) {
2960 lp->media = _10Mb;
2961 } else if (lp->autosense == TP) {
2962 lp->media = TP;
2963 } else if (lp->autosense == BNC) {
2964 lp->media = BNC;
2965 } else if (lp->autosense == AUI) {
2966 lp->media = AUI;
2967 } else {
2968 lp->media = SPD_DET;
2969 if ((lp->infoblock_media == ANS) &&
2970 ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
2971 ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
2972 ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
2973 mii_wr(ana, MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
2974 lp->media = ANS;
2975 }
2976 }
2977 lp->local_state = 0;
2978 next_tick = dc2114x_autoconf(dev);
2979 }
2980 break;
2981
2982 case ANS:
2983 switch (lp->local_state) {
2984 case 0:
2985 if (lp->timeout < 0) {
2986 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2987 }
2988 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2989 if (cr < 0) {
2990 next_tick = cr & ~TIMER_CB;
2991 } else {
2992 if (cr) {
2993 lp->local_state = 0;
2994 lp->media = SPD_DET;
2995 } else {
2996 lp->local_state++;
2997 }
2998 next_tick = dc2114x_autoconf(dev);
2999 }
3000 break;
3001
3002 case 1:
3003 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3004 if (sr < 0) {
3005 next_tick = sr & ~TIMER_CB;
3006 } else {
3007 lp->media = SPD_DET;
3008 lp->local_state = 0;
3009 if (sr) {
3010 lp->tmp = MII_SR_ASSC;
3011 anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
3012 ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
3013 if (!(anlpa & MII_ANLPA_RF) &&
3014 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3015 if (cap & MII_ANA_100M) {
3016 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3017 lp->media = _100Mb;
3018 } else if (cap & MII_ANA_10M) {
3019 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3020 lp->media = _10Mb;
3021 }
3022 }
3023 }
3024 next_tick = dc2114x_autoconf(dev);
3025 }
3026 break;
3027 }
3028 break;
3029
3030 case AUI:
3031 if (!lp->tx_enable) {
3032 if (lp->timeout < 0) {
3033 omr = inl(DE4X5_OMR);
3034 outl(omr & ~OMR_FDX, DE4X5_OMR);
3035 }
3036 irqs = 0;
3037 irq_mask = 0;
3038 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3039 if (sts < 0) {
3040 next_tick = sts & ~TIMER_CB;
3041 } else {
3042 if (!(inl(DE4X5_SISR) & SISR_SRA) && (lp->autosense == AUTO)) {
3043 lp->media = BNC;
3044 next_tick = dc2114x_autoconf(dev);
3045 } else {
3046 lp->local_state = 1;
3047 de4x5_init_connection(dev);
3048 }
3049 }
3050 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3051 lp->media = AUI_SUSPECT;
3052 next_tick = 3000;
3053 }
3054 break;
3055
3056 case AUI_SUSPECT:
3057 next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
3058 break;
3059
3060 case BNC:
3061 switch (lp->local_state) {
3062 case 0:
3063 if (lp->timeout < 0) {
3064 omr = inl(DE4X5_OMR);
3065 outl(omr & ~OMR_FDX, DE4X5_OMR);
3066 }
3067 irqs = 0;
3068 irq_mask = 0;
3069 sts = test_media(dev,irqs, irq_mask, 0, 0, 0, 1000);
3070 if (sts < 0) {
3071 next_tick = sts & ~TIMER_CB;
3072 } else {
3073 lp->local_state++;
3074 next_tick = dc2114x_autoconf(dev);
3075 }
3076 break;
3077
3078 case 1:
3079 if (!lp->tx_enable) {
3080 if ((sts = ping_media(dev, 3000)) < 0) {
3081 next_tick = sts & ~TIMER_CB;
3082 } else {
3083 if (sts) {
3084 lp->local_state = 0;
3085 lp->tcount++;
3086 lp->media = INIT;
3087 } else {
3088 de4x5_init_connection(dev);
3089 }
3090 }
3091 } else if (!lp->linkOK && (lp->autosense == AUTO)) {
3092 lp->media = BNC_SUSPECT;
3093 next_tick = 3000;
3094 }
3095 break;
3096 }
3097 break;
3098
3099 case BNC_SUSPECT:
3100 next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
3101 break;
3102
3103 case SPD_DET:
3104 if (srom_map_media(dev) < 0) {
3105 lp->tcount++;
3106 lp->media = INIT;
3107 return next_tick;
3108 }
3109 if (lp->media == _100Mb) {
3110 if ((slnk = test_for_100Mb(dev, 6500)) < 0) {
3111 lp->media = SPD_DET;
3112 return slnk & ~TIMER_CB;
3113 }
3114 } else {
3115 if (wait_for_link(dev) < 0) {
3116 lp->media = SPD_DET;
3117 return PDET_LINK_WAIT;
3118 }
3119 }
3120 if (lp->media == ANS) {
3121 if (is_spd_100(dev)) {
3122 lp->media = _100Mb;
3123 } else {
3124 lp->media = _10Mb;
3125 }
3126 next_tick = dc2114x_autoconf(dev);
3127 } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
3128 (((lp->media == _10Mb) || (lp->media == TP) ||
3129 (lp->media == BNC) || (lp->media == AUI)) &&
3130 is_10_up(dev))) {
3131 next_tick = dc2114x_autoconf(dev);
3132 } else {
3133 lp->tcount++;
3134 lp->media = INIT;
3135 }
3136 break;
3137
3138 case _10Mb:
3139 next_tick = 3000;
3140 if (!lp->tx_enable) {
3141 SET_10Mb;
3142 de4x5_init_connection(dev);
3143 } else {
3144 if (!lp->linkOK && (lp->autosense == AUTO)) {
3145 if (!is_10_up(dev) || (!lp->useSROM && is_spd_100(dev))) {
3146 lp->media = INIT;
3147 lp->tcount++;
3148 next_tick = DE4X5_AUTOSENSE_MS;
3149 }
3150 }
3151 }
3152 break;
3153
3154 case _100Mb:
3155 next_tick = 3000;
3156 if (!lp->tx_enable) {
3157 SET_100Mb;
3158 de4x5_init_connection(dev);
3159 } else {
3160 if (!lp->linkOK && (lp->autosense == AUTO)) {
3161 if (!is_100_up(dev) || (!lp->useSROM && !is_spd_100(dev))) {
3162 lp->media = INIT;
3163 lp->tcount++;
3164 next_tick = DE4X5_AUTOSENSE_MS;
3165 }
3166 }
3167 }
3168 break;
3169
3170 default:
3171 lp->tcount++;
3172 printk("Huh?: media:%02x\n", lp->media);
3173 lp->media = INIT;
3174 break;
3175 }
3176
3177 return next_tick;
3178 }
3179
3180 static int
3181 srom_autoconf(struct net_device *dev)
3182 {
3183 struct de4x5_private *lp = netdev_priv(dev);
3184
3185 return lp->infoleaf_fn(dev);
3186 }
3187
3188
3189
3190
3191
3192
3193 static int
3194 srom_map_media(struct net_device *dev)
3195 {
3196 struct de4x5_private *lp = netdev_priv(dev);
3197
3198 lp->fdx = false;
3199 if (lp->infoblock_media == lp->media)
3200 return 0;
3201
3202 switch(lp->infoblock_media) {
3203 case SROM_10BASETF:
3204 if (!lp->params.fdx) return -1;
3205 lp->fdx = true;
3206
3207
3208 case SROM_10BASET:
3209 if (lp->params.fdx && !lp->fdx) return -1;
3210 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
3211 lp->media = _10Mb;
3212 } else {
3213 lp->media = TP;
3214 }
3215 break;
3216
3217 case SROM_10BASE2:
3218 lp->media = BNC;
3219 break;
3220
3221 case SROM_10BASE5:
3222 lp->media = AUI;
3223 break;
3224
3225 case SROM_100BASETF:
3226 if (!lp->params.fdx) return -1;
3227 lp->fdx = true;
3228
3229
3230 case SROM_100BASET:
3231 if (lp->params.fdx && !lp->fdx) return -1;
3232 lp->media = _100Mb;
3233 break;
3234
3235 case SROM_100BASET4:
3236 lp->media = _100Mb;
3237 break;
3238
3239 case SROM_100BASEFF:
3240 if (!lp->params.fdx) return -1;
3241 lp->fdx = true;
3242
3243
3244 case SROM_100BASEF:
3245 if (lp->params.fdx && !lp->fdx) return -1;
3246 lp->media = _100Mb;
3247 break;
3248
3249 case ANS:
3250 lp->media = ANS;
3251 lp->fdx = lp->params.fdx;
3252 break;
3253
3254 default:
3255 printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
3256 lp->infoblock_media);
3257 return -1;
3258 }
3259
3260 return 0;
3261 }
3262
3263 static void
3264 de4x5_init_connection(struct net_device *dev)
3265 {
3266 struct de4x5_private *lp = netdev_priv(dev);
3267 u_long iobase = dev->base_addr;
3268 u_long flags = 0;
3269
3270 if (lp->media != lp->c_media) {
3271 de4x5_dbg_media(dev);
3272 lp->c_media = lp->media;
3273 }
3274
3275 spin_lock_irqsave(&lp->lock, flags);
3276 de4x5_rst_desc_ring(dev);
3277 de4x5_setup_intr(dev);
3278 lp->tx_enable = true;
3279 spin_unlock_irqrestore(&lp->lock, flags);
3280 outl(POLL_DEMAND, DE4X5_TPD);
3281
3282 netif_wake_queue(dev);
3283 }
3284
3285
3286
3287
3288
3289
3290 static int
3291 de4x5_reset_phy(struct net_device *dev)
3292 {
3293 struct de4x5_private *lp = netdev_priv(dev);
3294 u_long iobase = dev->base_addr;
3295 int next_tick = 0;
3296
3297 if ((lp->useSROM) || (lp->phy[lp->active].id)) {
3298 if (lp->timeout < 0) {
3299 if (lp->useSROM) {
3300 if (lp->phy[lp->active].rst) {
3301 srom_exec(dev, lp->phy[lp->active].rst);
3302 srom_exec(dev, lp->phy[lp->active].rst);
3303 } else if (lp->rst) {
3304 srom_exec(dev, lp->rst);
3305 srom_exec(dev, lp->rst);
3306 }
3307 } else {
3308 PHY_HARD_RESET;
3309 }
3310 if (lp->useMII) {
3311 mii_wr(MII_CR_RST, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3312 }
3313 }
3314 if (lp->useMII) {
3315 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3316 }
3317 } else if (lp->chipset == DC21140) {
3318 PHY_HARD_RESET;
3319 }
3320
3321 return next_tick;
3322 }
3323
3324 static int
3325 test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec)
3326 {
3327 struct de4x5_private *lp = netdev_priv(dev);
3328 u_long iobase = dev->base_addr;
3329 s32 sts, csr12;
3330
3331 if (lp->timeout < 0) {
3332 lp->timeout = msec/100;
3333 if (!lp->useSROM) {
3334 reset_init_sia(dev, csr13, csr14, csr15);
3335 }
3336
3337
3338 outl(irq_mask, DE4X5_IMR);
3339
3340
3341 sts = inl(DE4X5_STS);
3342 outl(sts, DE4X5_STS);
3343
3344
3345 if ((lp->chipset == DC21041) || lp->useSROM) {
3346 csr12 = inl(DE4X5_SISR);
3347 outl(csr12, DE4X5_SISR);
3348 }
3349 }
3350
3351 sts = inl(DE4X5_STS) & ~TIMER_CB;
3352
3353 if (!(sts & irqs) && --lp->timeout) {
3354 sts = 100 | TIMER_CB;
3355 } else {
3356 lp->timeout = -1;
3357 }
3358
3359 return sts;
3360 }
3361
3362 static int
3363 test_tp(struct net_device *dev, s32 msec)
3364 {
3365 struct de4x5_private *lp = netdev_priv(dev);
3366 u_long iobase = dev->base_addr;
3367 int sisr;
3368
3369 if (lp->timeout < 0) {
3370 lp->timeout = msec/100;
3371 }
3372
3373 sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
3374
3375 if (sisr && --lp->timeout) {
3376 sisr = 100 | TIMER_CB;
3377 } else {
3378 lp->timeout = -1;
3379 }
3380
3381 return sisr;
3382 }
3383
3384
3385
3386
3387
3388
3389 #define SAMPLE_INTERVAL 500
3390 #define SAMPLE_DELAY 2000
3391 static int
3392 test_for_100Mb(struct net_device *dev, int msec)
3393 {
3394 struct de4x5_private *lp = netdev_priv(dev);
3395 int gep = 0, ret = ((lp->chipset & ~0x00ff)==DC2114x? -1 :GEP_SLNK);
3396
3397 if (lp->timeout < 0) {
3398 if ((msec/SAMPLE_INTERVAL) <= 0) return 0;
3399 if (msec > SAMPLE_DELAY) {
3400 lp->timeout = (msec - SAMPLE_DELAY)/SAMPLE_INTERVAL;
3401 gep = SAMPLE_DELAY | TIMER_CB;
3402 return gep;
3403 } else {
3404 lp->timeout = msec/SAMPLE_INTERVAL;
3405 }
3406 }
3407
3408 if (lp->phy[lp->active].id || lp->useSROM) {
3409 gep = is_100_up(dev) | is_spd_100(dev);
3410 } else {
3411 gep = (~gep_rd(dev) & (GEP_SLNK | GEP_LNP));
3412 }
3413 if (!(gep & ret) && --lp->timeout) {
3414 gep = SAMPLE_INTERVAL | TIMER_CB;
3415 } else {
3416 lp->timeout = -1;
3417 }
3418
3419 return gep;
3420 }
3421
3422 static int
3423 wait_for_link(struct net_device *dev)
3424 {
3425 struct de4x5_private *lp = netdev_priv(dev);
3426
3427 if (lp->timeout < 0) {
3428 lp->timeout = 1;
3429 }
3430
3431 if (lp->timeout--) {
3432 return TIMER_CB;
3433 } else {
3434 lp->timeout = -1;
3435 }
3436
3437 return 0;
3438 }
3439
3440
3441
3442
3443
3444 static int
3445 test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3446 {
3447 struct de4x5_private *lp = netdev_priv(dev);
3448 int test;
3449 u_long iobase = dev->base_addr;
3450
3451 if (lp->timeout < 0) {
3452 lp->timeout = msec/100;
3453 }
3454
3455 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3456 test = (reg ^ (pol ? ~0 : 0)) & mask;
3457
3458 if (test && --lp->timeout) {
3459 reg = 100 | TIMER_CB;
3460 } else {
3461 lp->timeout = -1;
3462 }
3463
3464 return reg;
3465 }
3466
3467 static int
3468 is_spd_100(struct net_device *dev)
3469 {
3470 struct de4x5_private *lp = netdev_priv(dev);
3471 u_long iobase = dev->base_addr;
3472 int spd;
3473
3474 if (lp->useMII) {
3475 spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
3476 spd = ~(spd ^ lp->phy[lp->active].spd.value);
3477 spd &= lp->phy[lp->active].spd.mask;
3478 } else if (!lp->useSROM) {
3479 spd = ((~gep_rd(dev)) & GEP_SLNK);
3480 } else {
3481 if ((lp->ibn == 2) || !lp->asBitValid)
3482 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3483
3484 spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
3485 (lp->linkOK & ~lp->asBitValid);
3486 }
3487
3488 return spd;
3489 }
3490
3491 static int
3492 is_100_up(struct net_device *dev)
3493 {
3494 struct de4x5_private *lp = netdev_priv(dev);
3495 u_long iobase = dev->base_addr;
3496
3497 if (lp->useMII) {
3498
3499 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3500 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3501 } else if (!lp->useSROM) {
3502 return (~gep_rd(dev)) & GEP_SLNK;
3503 } else {
3504 if ((lp->ibn == 2) || !lp->asBitValid)
3505 return (lp->chipset == DC21143) ? (~inl(DE4X5_SISR)&SISR_LS100) : 0;
3506
3507 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3508 (lp->linkOK & ~lp->asBitValid);
3509 }
3510 }
3511
3512 static int
3513 is_10_up(struct net_device *dev)
3514 {
3515 struct de4x5_private *lp = netdev_priv(dev);
3516 u_long iobase = dev->base_addr;
3517
3518 if (lp->useMII) {
3519
3520 mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3521 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII) & MII_SR_LKS;
3522 } else if (!lp->useSROM) {
3523 return (~gep_rd(dev)) & GEP_LNP;
3524 } else {
3525 if ((lp->ibn == 2) || !lp->asBitValid)
3526 return ((lp->chipset & ~0x00ff) == DC2114x) ?
3527 (~inl(DE4X5_SISR)&SISR_LS10):
3528 0;
3529
3530 return (lp->asBitValid&(lp->asPolarity^(gep_rd(dev)&lp->asBit))) |
3531 (lp->linkOK & ~lp->asBitValid);
3532 }
3533 }
3534
3535 static int
3536 is_anc_capable(struct net_device *dev)
3537 {
3538 struct de4x5_private *lp = netdev_priv(dev);
3539 u_long iobase = dev->base_addr;
3540
3541 if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
3542 return mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
3543 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3544 return (inl(DE4X5_SISR) & SISR_LPN) >> 12;
3545 } else {
3546 return 0;
3547 }
3548 }
3549
3550
3551
3552
3553
3554 static int
3555 ping_media(struct net_device *dev, int msec)
3556 {
3557 struct de4x5_private *lp = netdev_priv(dev);
3558 u_long iobase = dev->base_addr;
3559 int sisr;
3560
3561 if (lp->timeout < 0) {
3562 lp->timeout = msec/100;
3563
3564 lp->tmp = lp->tx_new;
3565 load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
3566 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
3567 outl(POLL_DEMAND, DE4X5_TPD);
3568 }
3569
3570 sisr = inl(DE4X5_SISR);
3571
3572 if ((!(sisr & SISR_NCR)) &&
3573 ((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
3574 (--lp->timeout)) {
3575 sisr = 100 | TIMER_CB;
3576 } else {
3577 if ((!(sisr & SISR_NCR)) &&
3578 !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
3579 lp->timeout) {
3580 sisr = 0;
3581 } else {
3582 sisr = 1;
3583 }
3584 lp->timeout = -1;
3585 }
3586
3587 return sisr;
3588 }
3589
3590
3591
3592
3593
3594
3595 static struct sk_buff *
3596 de4x5_alloc_rx_buff(struct net_device *dev, int index, int len)
3597 {
3598 struct de4x5_private *lp = netdev_priv(dev);
3599 struct sk_buff *p;
3600
3601 #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
3602 struct sk_buff *ret;
3603 u_long i=0, tmp;
3604
3605 p = netdev_alloc_skb(dev, IEEE802_3_SZ + DE4X5_ALIGN + 2);
3606 if (!p) return NULL;
3607
3608 tmp = virt_to_bus(p->data);
3609 i = ((tmp + DE4X5_ALIGN) & ~DE4X5_ALIGN) - tmp;
3610 skb_reserve(p, i);
3611 lp->rx_ring[index].buf = cpu_to_le32(tmp + i);
3612
3613 ret = lp->rx_skb[index];
3614 lp->rx_skb[index] = p;
3615
3616 if ((u_long) ret > 1) {
3617 skb_put(ret, len);
3618 }
3619
3620 return ret;
3621
3622 #else
3623 if (lp->state != OPEN) return (struct sk_buff *)1;
3624
3625 p = netdev_alloc_skb(dev, len + 2);
3626 if (!p) return NULL;
3627
3628 skb_reserve(p, 2);
3629 if (index < lp->rx_old) {
3630 short tlen = (lp->rxRingSize - lp->rx_old) * RX_BUFF_SZ;
3631 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, tlen);
3632 skb_put_data(p, lp->rx_bufs, len - tlen);
3633 } else {
3634 skb_put_data(p, lp->rx_bufs + lp->rx_old * RX_BUFF_SZ, len);
3635 }
3636
3637 return p;
3638 #endif
3639 }
3640
3641 static void
3642 de4x5_free_rx_buffs(struct net_device *dev)
3643 {
3644 struct de4x5_private *lp = netdev_priv(dev);
3645 int i;
3646
3647 for (i=0; i<lp->rxRingSize; i++) {
3648 if ((u_long) lp->rx_skb[i] > 1) {
3649 dev_kfree_skb(lp->rx_skb[i]);
3650 }
3651 lp->rx_ring[i].status = 0;
3652 lp->rx_skb[i] = (struct sk_buff *)1;
3653 }
3654 }
3655
3656 static void
3657 de4x5_free_tx_buffs(struct net_device *dev)
3658 {
3659 struct de4x5_private *lp = netdev_priv(dev);
3660 int i;
3661
3662 for (i=0; i<lp->txRingSize; i++) {
3663 if (lp->tx_skb[i])
3664 de4x5_free_tx_buff(lp, i);
3665 lp->tx_ring[i].status = 0;
3666 }
3667
3668
3669 __skb_queue_purge(&lp->cache.queue);
3670 }
3671
3672
3673
3674
3675
3676
3677
3678
3679 static void
3680 de4x5_save_skbs(struct net_device *dev)
3681 {
3682 struct de4x5_private *lp = netdev_priv(dev);
3683 u_long iobase = dev->base_addr;
3684 s32 omr;
3685
3686 if (!lp->cache.save_cnt) {
3687 STOP_DE4X5;
3688 de4x5_tx(dev);
3689 de4x5_free_tx_buffs(dev);
3690 de4x5_cache_state(dev, DE4X5_SAVE_STATE);
3691 de4x5_sw_reset(dev);
3692 de4x5_cache_state(dev, DE4X5_RESTORE_STATE);
3693 lp->cache.save_cnt++;
3694 START_DE4X5;
3695 }
3696 }
3697
3698 static void
3699 de4x5_rst_desc_ring(struct net_device *dev)
3700 {
3701 struct de4x5_private *lp = netdev_priv(dev);
3702 u_long iobase = dev->base_addr;
3703 int i;
3704 s32 omr;
3705
3706 if (lp->cache.save_cnt) {
3707 STOP_DE4X5;
3708 outl(lp->dma_rings, DE4X5_RRBA);
3709 outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
3710 DE4X5_TRBA);
3711
3712 lp->rx_new = lp->rx_old = 0;
3713 lp->tx_new = lp->tx_old = 0;
3714
3715 for (i = 0; i < lp->rxRingSize; i++) {
3716 lp->rx_ring[i].status = cpu_to_le32(R_OWN);
3717 }
3718
3719 for (i = 0; i < lp->txRingSize; i++) {
3720 lp->tx_ring[i].status = cpu_to_le32(0);
3721 }
3722
3723 barrier();
3724 lp->cache.save_cnt--;
3725 START_DE4X5;
3726 }
3727 }
3728
3729 static void
3730 de4x5_cache_state(struct net_device *dev, int flag)
3731 {
3732 struct de4x5_private *lp = netdev_priv(dev);
3733 u_long iobase = dev->base_addr;
3734
3735 switch(flag) {
3736 case DE4X5_SAVE_STATE:
3737 lp->cache.csr0 = inl(DE4X5_BMR);
3738 lp->cache.csr6 = (inl(DE4X5_OMR) & ~(OMR_ST | OMR_SR));
3739 lp->cache.csr7 = inl(DE4X5_IMR);
3740 break;
3741
3742 case DE4X5_RESTORE_STATE:
3743 outl(lp->cache.csr0, DE4X5_BMR);
3744 outl(lp->cache.csr6, DE4X5_OMR);
3745 outl(lp->cache.csr7, DE4X5_IMR);
3746 if (lp->chipset == DC21140) {
3747 gep_wr(lp->cache.gepc, dev);
3748 gep_wr(lp->cache.gep, dev);
3749 } else {
3750 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
3751 lp->cache.csr15);
3752 }
3753 break;
3754 }
3755 }
3756
3757 static void
3758 de4x5_put_cache(struct net_device *dev, struct sk_buff *skb)
3759 {
3760 struct de4x5_private *lp = netdev_priv(dev);
3761
3762 __skb_queue_tail(&lp->cache.queue, skb);
3763 }
3764
3765 static void
3766 de4x5_putb_cache(struct net_device *dev, struct sk_buff *skb)
3767 {
3768 struct de4x5_private *lp = netdev_priv(dev);
3769
3770 __skb_queue_head(&lp->cache.queue, skb);
3771 }
3772
3773 static struct sk_buff *
3774 de4x5_get_cache(struct net_device *dev)
3775 {
3776 struct de4x5_private *lp = netdev_priv(dev);
3777
3778 return __skb_dequeue(&lp->cache.queue);
3779 }
3780
3781
3782
3783
3784
3785 static int
3786 test_ans(struct net_device *dev, s32 irqs, s32 irq_mask, s32 msec)
3787 {
3788 struct de4x5_private *lp = netdev_priv(dev);
3789 u_long iobase = dev->base_addr;
3790 s32 sts, ans;
3791
3792 if (lp->timeout < 0) {
3793 lp->timeout = msec/100;
3794 outl(irq_mask, DE4X5_IMR);
3795
3796
3797 sts = inl(DE4X5_STS);
3798 outl(sts, DE4X5_STS);
3799 }
3800
3801 ans = inl(DE4X5_SISR) & SISR_ANS;
3802 sts = inl(DE4X5_STS) & ~TIMER_CB;
3803
3804 if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
3805 sts = 100 | TIMER_CB;
3806 } else {
3807 lp->timeout = -1;
3808 }
3809
3810 return sts;
3811 }
3812
3813 static void
3814 de4x5_setup_intr(struct net_device *dev)
3815 {
3816 struct de4x5_private *lp = netdev_priv(dev);
3817 u_long iobase = dev->base_addr;
3818 s32 imr, sts;
3819
3820 if (inl(DE4X5_OMR) & OMR_SR) {
3821 imr = 0;
3822 UNMASK_IRQs;
3823 sts = inl(DE4X5_STS);
3824 outl(sts, DE4X5_STS);
3825 ENABLE_IRQs;
3826 }
3827 }
3828
3829
3830
3831
3832 static void
3833 reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3834 {
3835 struct de4x5_private *lp = netdev_priv(dev);
3836 u_long iobase = dev->base_addr;
3837
3838 RESET_SIA;
3839 if (lp->useSROM) {
3840 if (lp->ibn == 3) {
3841 srom_exec(dev, lp->phy[lp->active].rst);
3842 srom_exec(dev, lp->phy[lp->active].gep);
3843 outl(1, DE4X5_SICR);
3844 return;
3845 } else {
3846 csr15 = lp->cache.csr15;
3847 csr14 = lp->cache.csr14;
3848 csr13 = lp->cache.csr13;
3849 outl(csr15 | lp->cache.gepc, DE4X5_SIGR);
3850 outl(csr15 | lp->cache.gep, DE4X5_SIGR);
3851 }
3852 } else {
3853 outl(csr15, DE4X5_SIGR);
3854 }
3855 outl(csr14, DE4X5_STRR);
3856 outl(csr13, DE4X5_SICR);
3857
3858 mdelay(10);
3859 }
3860
3861
3862
3863
3864 static void
3865 create_packet(struct net_device *dev, char *frame, int len)
3866 {
3867 int i;
3868 char *buf = frame;
3869
3870 for (i=0; i<ETH_ALEN; i++) {
3871 *buf++ = dev->dev_addr[i];
3872 }
3873 for (i=0; i<ETH_ALEN; i++) {
3874 *buf++ = dev->dev_addr[i];
3875 }
3876
3877 *buf++ = 0;
3878 *buf++ = 1;
3879 }
3880
3881
3882
3883
3884 static int
3885 EISA_signature(char *name, struct device *device)
3886 {
3887 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3888 struct eisa_device *edev;
3889
3890 *name = '\0';
3891 edev = to_eisa_device (device);
3892 i = edev->id.driver_data;
3893
3894 if (i >= 0 && i < siglen) {
3895 strcpy (name, de4x5_signatures[i]);
3896 status = 1;
3897 }
3898
3899 return status;
3900 }
3901
3902
3903
3904
3905 static int
3906 PCI_signature(char *name, struct de4x5_private *lp)
3907 {
3908 int i, status = 0, siglen = ARRAY_SIZE(de4x5_signatures);
3909
3910 if (lp->chipset == DC21040) {
3911 strcpy(name, "DE434/5");
3912 return status;
3913 } else {
3914 int tmp = *((char *)&lp->srom + 19) * 3;
3915 strncpy(name, (char *)&lp->srom + 26 + tmp, 8);
3916 }
3917 name[8] = '\0';
3918 for (i=0; i<siglen; i++) {
3919 if (strstr(name,de4x5_signatures[i])!=NULL) break;
3920 }
3921 if (i == siglen) {
3922 if (dec_only) {
3923 *name = '\0';
3924 } else {
3925 strcpy(name, (((lp->chipset == DC21040) ? "DC21040" :
3926 ((lp->chipset == DC21041) ? "DC21041" :
3927 ((lp->chipset == DC21140) ? "DC21140" :
3928 ((lp->chipset == DC21142) ? "DC21142" :
3929 ((lp->chipset == DC21143) ? "DC21143" : "UNKNOWN"
3930 )))))));
3931 }
3932 if (lp->chipset != DC21041) {
3933 lp->useSROM = true;
3934 }
3935 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3936 lp->useSROM = true;
3937 }
3938
3939 return status;
3940 }
3941
3942
3943
3944
3945
3946
3947
3948
3949
3950 static void
3951 DevicePresent(struct net_device *dev, u_long aprom_addr)
3952 {
3953 int i, j=0;
3954 struct de4x5_private *lp = netdev_priv(dev);
3955
3956 if (lp->chipset == DC21040) {
3957 if (lp->bus == EISA) {
3958 enet_addr_rst(aprom_addr);
3959 } else {
3960 outl(0, aprom_addr);
3961 }
3962 } else {
3963 u_short tmp;
3964 __le16 *p = (__le16 *)((char *)&lp->srom + SROM_HWADD);
3965 for (i=0; i<(ETH_ALEN>>1); i++) {
3966 tmp = srom_rd(aprom_addr, (SROM_HWADD>>1) + i);
3967 j += tmp;
3968 *p = cpu_to_le16(tmp);
3969 }
3970 if (j == 0 || j == 3 * 0xffff) {
3971
3972 return;
3973 }
3974
3975 p = (__le16 *)&lp->srom;
3976 for (i=0; i<(sizeof(struct de4x5_srom)>>1); i++) {
3977 tmp = srom_rd(aprom_addr, i);
3978 *p++ = cpu_to_le16(tmp);
3979 }
3980 de4x5_dbg_srom(&lp->srom);
3981 }
3982 }
3983
3984
3985
3986
3987
3988
3989 static void
3990 enet_addr_rst(u_long aprom_addr)
3991 {
3992 union {
3993 struct {
3994 u32 a;
3995 u32 b;
3996 } llsig;
3997 char Sig[sizeof(u32) << 1];
3998 } dev;
3999 short sigLength=0;
4000 s8 data;
4001 int i, j;
4002
4003 dev.llsig.a = ETH_PROM_SIG;
4004 dev.llsig.b = ETH_PROM_SIG;
4005 sigLength = sizeof(u32) << 1;
4006
4007 for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
4008 data = inb(aprom_addr);
4009 if (dev.Sig[j] == data) {
4010 j++;
4011 } else {
4012 if (data == dev.Sig[0]) {
4013 j=1;
4014 } else {
4015 j=0;
4016 }
4017 }
4018 }
4019 }
4020
4021
4022
4023
4024
4025
4026
4027 static int
4028 get_hw_addr(struct net_device *dev)
4029 {
4030 u_long iobase = dev->base_addr;
4031 int broken, i, k, tmp, status = 0;
4032 u_short j,chksum;
4033 struct de4x5_private *lp = netdev_priv(dev);
4034
4035 broken = de4x5_bad_srom(lp);
4036
4037 for (i=0,k=0,j=0;j<3;j++) {
4038 k <<= 1;
4039 if (k > 0xffff) k-=0xffff;
4040
4041 if (lp->bus == PCI) {
4042 if (lp->chipset == DC21040) {
4043 while ((tmp = inl(DE4X5_APROM)) < 0);
4044 k += (u_char) tmp;
4045 dev->dev_addr[i++] = (u_char) tmp;
4046 while ((tmp = inl(DE4X5_APROM)) < 0);
4047 k += (u_short) (tmp << 8);
4048 dev->dev_addr[i++] = (u_char) tmp;
4049 } else if (!broken) {
4050 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4051 dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
4052 } else if ((broken == SMC) || (broken == ACCTON)) {
4053 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4054 dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
4055 }
4056 } else {
4057 k += (u_char) (tmp = inb(EISA_APROM));
4058 dev->dev_addr[i++] = (u_char) tmp;
4059 k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
4060 dev->dev_addr[i++] = (u_char) tmp;
4061 }
4062
4063 if (k > 0xffff) k-=0xffff;
4064 }
4065 if (k == 0xffff) k=0;
4066
4067 if (lp->bus == PCI) {
4068 if (lp->chipset == DC21040) {
4069 while ((tmp = inl(DE4X5_APROM)) < 0);
4070 chksum = (u_char) tmp;
4071 while ((tmp = inl(DE4X5_APROM)) < 0);
4072 chksum |= (u_short) (tmp << 8);
4073 if ((k != chksum) && (dec_only)) status = -1;
4074 }
4075 } else {
4076 chksum = (u_char) inb(EISA_APROM);
4077 chksum |= (u_short) (inb(EISA_APROM) << 8);
4078 if ((k != chksum) && (dec_only)) status = -1;
4079 }
4080
4081
4082 srom_repair(dev, broken);
4083
4084 #ifdef CONFIG_PPC_PMAC
4085
4086
4087
4088
4089 if ( machine_is(powermac) &&
4090 (dev->dev_addr[0] == 0) &&
4091 (dev->dev_addr[1] == 0xa0) )
4092 {
4093 for (i = 0; i < ETH_ALEN; ++i)
4094 {
4095 int x = dev->dev_addr[i];
4096 x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
4097 x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
4098 dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
4099 }
4100 }
4101 #endif
4102
4103
4104 status = test_bad_enet(dev, status);
4105
4106 return status;
4107 }
4108
4109
4110
4111
4112 static int
4113 de4x5_bad_srom(struct de4x5_private *lp)
4114 {
4115 int i, status = 0;
4116
4117 for (i = 0; i < ARRAY_SIZE(enet_det); i++) {
4118 if (!memcmp(&lp->srom, &enet_det[i], 3) &&
4119 !memcmp((char *)&lp->srom+0x10, &enet_det[i], 3)) {
4120 if (i == 0) {
4121 status = SMC;
4122 } else if (i == 1) {
4123 status = ACCTON;
4124 }
4125 break;
4126 }
4127 }
4128
4129 return status;
4130 }
4131
4132 static void
4133 srom_repair(struct net_device *dev, int card)
4134 {
4135 struct de4x5_private *lp = netdev_priv(dev);
4136
4137 switch(card) {
4138 case SMC:
4139 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4140 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4141 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4142 lp->useSROM = true;
4143 break;
4144 }
4145 }
4146
4147
4148
4149
4150
4151 static int
4152 test_bad_enet(struct net_device *dev, int status)
4153 {
4154 struct de4x5_private *lp = netdev_priv(dev);
4155 int i, tmp;
4156
4157 for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
4158 if ((tmp == 0) || (tmp == 0x5fa)) {
4159 if ((lp->chipset == last.chipset) &&
4160 (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
4161 for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
4162 for (i=ETH_ALEN-1; i>2; --i) {
4163 dev->dev_addr[i] += 1;
4164 if (dev->dev_addr[i] != 0) break;
4165 }
4166 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4167 if (!an_exception(lp)) {
4168 dev->irq = last.irq;
4169 }
4170
4171 status = 0;
4172 }
4173 } else if (!status) {
4174 last.chipset = lp->chipset;
4175 last.bus = lp->bus_num;
4176 last.irq = dev->irq;
4177 for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
4178 }
4179
4180 return status;
4181 }
4182
4183
4184
4185
4186 static int
4187 an_exception(struct de4x5_private *lp)
4188 {
4189 if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
4190 (*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
4191 return -1;
4192 }
4193
4194 return 0;
4195 }
4196
4197
4198
4199
4200 static short
4201 srom_rd(u_long addr, u_char offset)
4202 {
4203 sendto_srom(SROM_RD | SROM_SR, addr);
4204
4205 srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
4206 srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
4207 srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
4208
4209 return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
4210 }
4211
4212 static void
4213 srom_latch(u_int command, u_long addr)
4214 {
4215 sendto_srom(command, addr);
4216 sendto_srom(command | DT_CLK, addr);
4217 sendto_srom(command, addr);
4218 }
4219
4220 static void
4221 srom_command(u_int command, u_long addr)
4222 {
4223 srom_latch(command, addr);
4224 srom_latch(command, addr);
4225 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4226 }
4227
4228 static void
4229 srom_address(u_int command, u_long addr, u_char offset)
4230 {
4231 int i, a;
4232
4233 a = offset << 2;
4234 for (i=0; i<6; i++, a <<= 1) {
4235 srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
4236 }
4237 udelay(1);
4238
4239 i = (getfrom_srom(addr) >> 3) & 0x01;
4240 }
4241
4242 static short
4243 srom_data(u_int command, u_long addr)
4244 {
4245 int i;
4246 short word = 0;
4247 s32 tmp;
4248
4249 for (i=0; i<16; i++) {
4250 sendto_srom(command | DT_CLK, addr);
4251 tmp = getfrom_srom(addr);
4252 sendto_srom(command, addr);
4253
4254 word = (word << 1) | ((tmp >> 3) & 0x01);
4255 }
4256
4257 sendto_srom(command & 0x0000ff00, addr);
4258
4259 return word;
4260 }
4261
4262
4263
4264
4265
4266
4267
4268
4269
4270
4271
4272
4273
4274
4275
4276 static void
4277 sendto_srom(u_int command, u_long addr)
4278 {
4279 outl(command, addr);
4280 udelay(1);
4281 }
4282
4283 static int
4284 getfrom_srom(u_long addr)
4285 {
4286 s32 tmp;
4287
4288 tmp = inl(addr);
4289 udelay(1);
4290
4291 return tmp;
4292 }
4293
4294 static int
4295 srom_infoleaf_info(struct net_device *dev)
4296 {
4297 struct de4x5_private *lp = netdev_priv(dev);
4298 int i, count;
4299 u_char *p;
4300
4301
4302 for (i=0; i<INFOLEAF_SIZE; i++) {
4303 if (lp->chipset == infoleaf_array[i].chipset) break;
4304 }
4305 if (i == INFOLEAF_SIZE) {
4306 lp->useSROM = false;
4307 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4308 dev->name);
4309 return -ENXIO;
4310 }
4311
4312 lp->infoleaf_fn = infoleaf_array[i].fn;
4313
4314
4315 count = *((u_char *)&lp->srom + 19);
4316 p = (u_char *)&lp->srom + 26;
4317
4318 if (count > 1) {
4319 for (i=count; i; --i, p+=3) {
4320 if (lp->device == *p) break;
4321 }
4322 if (i == 0) {
4323 lp->useSROM = false;
4324 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4325 dev->name, lp->device);
4326 return -ENXIO;
4327 }
4328 }
4329
4330 lp->infoleaf_offset = get_unaligned_le16(p + 1);
4331
4332 return 0;
4333 }
4334
4335
4336
4337
4338
4339
4340
4341
4342 static void
4343 srom_init(struct net_device *dev)
4344 {
4345 struct de4x5_private *lp = netdev_priv(dev);
4346 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4347 u_char count;
4348
4349 p+=2;
4350 if (lp->chipset == DC21140) {
4351 lp->cache.gepc = (*p++ | GEP_CTRL);
4352 gep_wr(lp->cache.gepc, dev);
4353 }
4354
4355
4356 count = *p++;
4357
4358
4359 for (;count; --count) {
4360 if (*p < 128) {
4361 p += COMPACT_LEN;
4362 } else if (*(p+1) == 5) {
4363 type5_infoblock(dev, 1, p);
4364 p += ((*p & BLOCK_LEN) + 1);
4365 } else if (*(p+1) == 4) {
4366 p += ((*p & BLOCK_LEN) + 1);
4367 } else if (*(p+1) == 3) {
4368 type3_infoblock(dev, 1, p);
4369 p += ((*p & BLOCK_LEN) + 1);
4370 } else if (*(p+1) == 2) {
4371 p += ((*p & BLOCK_LEN) + 1);
4372 } else if (*(p+1) == 1) {
4373 type1_infoblock(dev, 1, p);
4374 p += ((*p & BLOCK_LEN) + 1);
4375 } else {
4376 p += ((*p & BLOCK_LEN) + 1);
4377 }
4378 }
4379 }
4380
4381
4382
4383
4384
4385 static void
4386 srom_exec(struct net_device *dev, u_char *p)
4387 {
4388 struct de4x5_private *lp = netdev_priv(dev);
4389 u_long iobase = dev->base_addr;
4390 u_char count = (p ? *p++ : 0);
4391 u_short *w = (u_short *)p;
4392
4393 if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
4394
4395 if (lp->chipset != DC21140) RESET_SIA;
4396
4397 while (count--) {
4398 gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
4399 *p++ : get_unaligned_le16(w++)), dev);
4400 mdelay(2);
4401 }
4402
4403 if (lp->chipset != DC21140) {
4404 outl(lp->cache.csr14, DE4X5_STRR);
4405 outl(lp->cache.csr13, DE4X5_SICR);
4406 }
4407 }
4408
4409
4410
4411
4412
4413
4414 static int
4415 dc21041_infoleaf(struct net_device *dev)
4416 {
4417 return DE4X5_AUTOSENSE_MS;
4418 }
4419
4420 static int
4421 dc21140_infoleaf(struct net_device *dev)
4422 {
4423 struct de4x5_private *lp = netdev_priv(dev);
4424 u_char count = 0;
4425 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4426 int next_tick = DE4X5_AUTOSENSE_MS;
4427
4428
4429 p+=2;
4430
4431
4432 lp->cache.gepc = (*p++ | GEP_CTRL);
4433
4434
4435 count = *p++;
4436
4437
4438 if (*p < 128) {
4439 next_tick = dc_infoblock[COMPACT](dev, count, p);
4440 } else {
4441 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4442 }
4443
4444 if (lp->tcount == count) {
4445 lp->media = NC;
4446 if (lp->media != lp->c_media) {
4447 de4x5_dbg_media(dev);
4448 lp->c_media = lp->media;
4449 }
4450 lp->media = INIT;
4451 lp->tcount = 0;
4452 lp->tx_enable = false;
4453 }
4454
4455 return next_tick & ~TIMER_CB;
4456 }
4457
4458 static int
4459 dc21142_infoleaf(struct net_device *dev)
4460 {
4461 struct de4x5_private *lp = netdev_priv(dev);
4462 u_char count = 0;
4463 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4464 int next_tick = DE4X5_AUTOSENSE_MS;
4465
4466
4467 p+=2;
4468
4469
4470 count = *p++;
4471
4472
4473 if (*p < 128) {
4474 next_tick = dc_infoblock[COMPACT](dev, count, p);
4475 } else {
4476 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4477 }
4478
4479 if (lp->tcount == count) {
4480 lp->media = NC;
4481 if (lp->media != lp->c_media) {
4482 de4x5_dbg_media(dev);
4483 lp->c_media = lp->media;
4484 }
4485 lp->media = INIT;
4486 lp->tcount = 0;
4487 lp->tx_enable = false;
4488 }
4489
4490 return next_tick & ~TIMER_CB;
4491 }
4492
4493 static int
4494 dc21143_infoleaf(struct net_device *dev)
4495 {
4496 struct de4x5_private *lp = netdev_priv(dev);
4497 u_char count = 0;
4498 u_char *p = (u_char *)&lp->srom + lp->infoleaf_offset;
4499 int next_tick = DE4X5_AUTOSENSE_MS;
4500
4501
4502 p+=2;
4503
4504
4505 count = *p++;
4506
4507
4508 if (*p < 128) {
4509 next_tick = dc_infoblock[COMPACT](dev, count, p);
4510 } else {
4511 next_tick = dc_infoblock[*(p+1)](dev, count, p);
4512 }
4513 if (lp->tcount == count) {
4514 lp->media = NC;
4515 if (lp->media != lp->c_media) {
4516 de4x5_dbg_media(dev);
4517 lp->c_media = lp->media;
4518 }
4519 lp->media = INIT;
4520 lp->tcount = 0;
4521 lp->tx_enable = false;
4522 }
4523
4524 return next_tick & ~TIMER_CB;
4525 }
4526
4527
4528
4529
4530
4531 static int
4532 compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4533 {
4534 struct de4x5_private *lp = netdev_priv(dev);
4535 u_char flags, csr6;
4536
4537
4538 if (--count > lp->tcount) {
4539 if (*(p+COMPACT_LEN) < 128) {
4540 return dc_infoblock[COMPACT](dev, count, p+COMPACT_LEN);
4541 } else {
4542 return dc_infoblock[*(p+COMPACT_LEN+1)](dev, count, p+COMPACT_LEN);
4543 }
4544 }
4545
4546 if ((lp->media == INIT) && (lp->timeout < 0)) {
4547 lp->ibn = COMPACT;
4548 lp->active = 0;
4549 gep_wr(lp->cache.gepc, dev);
4550 lp->infoblock_media = (*p++) & COMPACT_MC;
4551 lp->cache.gep = *p++;
4552 csr6 = *p++;
4553 flags = *p++;
4554
4555 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4556 lp->defMedium = (flags & 0x40) ? -1 : 0;
4557 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4558 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4559 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4560 lp->useMII = false;
4561
4562 de4x5_switch_mac_port(dev);
4563 }
4564
4565 return dc21140m_autoconf(dev);
4566 }
4567
4568
4569
4570
4571 static int
4572 type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4573 {
4574 struct de4x5_private *lp = netdev_priv(dev);
4575 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4576
4577
4578 if (--count > lp->tcount) {
4579 if (*(p+len) < 128) {
4580 return dc_infoblock[COMPACT](dev, count, p+len);
4581 } else {
4582 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4583 }
4584 }
4585
4586 if ((lp->media == INIT) && (lp->timeout < 0)) {
4587 lp->ibn = 0;
4588 lp->active = 0;
4589 gep_wr(lp->cache.gepc, dev);
4590 p+=2;
4591 lp->infoblock_media = (*p++) & BLOCK0_MC;
4592 lp->cache.gep = *p++;
4593 csr6 = *p++;
4594 flags = *p++;
4595
4596 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4597 lp->defMedium = (flags & 0x40) ? -1 : 0;
4598 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4599 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4600 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4601 lp->useMII = false;
4602
4603 de4x5_switch_mac_port(dev);
4604 }
4605
4606 return dc21140m_autoconf(dev);
4607 }
4608
4609
4610
4611 static int
4612 type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4613 {
4614 struct de4x5_private *lp = netdev_priv(dev);
4615 u_char len = (*p & BLOCK_LEN)+1;
4616
4617
4618 if (--count > lp->tcount) {
4619 if (*(p+len) < 128) {
4620 return dc_infoblock[COMPACT](dev, count, p+len);
4621 } else {
4622 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4623 }
4624 }
4625
4626 p += 2;
4627 if (lp->state == INITIALISED) {
4628 lp->ibn = 1;
4629 lp->active = *p++;
4630 lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
4631 lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
4632 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4633 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4634 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4635 lp->phy[lp->active].ttm = get_unaligned_le16(p);
4636 return 0;
4637 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4638 lp->ibn = 1;
4639 lp->active = *p;
4640 lp->infoblock_csr6 = OMR_MII_100;
4641 lp->useMII = true;
4642 lp->infoblock_media = ANS;
4643
4644 de4x5_switch_mac_port(dev);
4645 }
4646
4647 return dc21140m_autoconf(dev);
4648 }
4649
4650 static int
4651 type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4652 {
4653 struct de4x5_private *lp = netdev_priv(dev);
4654 u_char len = (*p & BLOCK_LEN)+1;
4655
4656
4657 if (--count > lp->tcount) {
4658 if (*(p+len) < 128) {
4659 return dc_infoblock[COMPACT](dev, count, p+len);
4660 } else {
4661 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4662 }
4663 }
4664
4665 if ((lp->media == INIT) && (lp->timeout < 0)) {
4666 lp->ibn = 2;
4667 lp->active = 0;
4668 p += 2;
4669 lp->infoblock_media = (*p) & MEDIA_CODE;
4670
4671 if ((*p++) & EXT_FIELD) {
4672 lp->cache.csr13 = get_unaligned_le16(p); p += 2;
4673 lp->cache.csr14 = get_unaligned_le16(p); p += 2;
4674 lp->cache.csr15 = get_unaligned_le16(p); p += 2;
4675 } else {
4676 lp->cache.csr13 = CSR13;
4677 lp->cache.csr14 = CSR14;
4678 lp->cache.csr15 = CSR15;
4679 }
4680 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4681 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16);
4682 lp->infoblock_csr6 = OMR_SIA;
4683 lp->useMII = false;
4684
4685 de4x5_switch_mac_port(dev);
4686 }
4687
4688 return dc2114x_autoconf(dev);
4689 }
4690
4691 static int
4692 type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4693 {
4694 struct de4x5_private *lp = netdev_priv(dev);
4695 u_char len = (*p & BLOCK_LEN)+1;
4696
4697
4698 if (--count > lp->tcount) {
4699 if (*(p+len) < 128) {
4700 return dc_infoblock[COMPACT](dev, count, p+len);
4701 } else {
4702 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4703 }
4704 }
4705
4706 p += 2;
4707 if (lp->state == INITIALISED) {
4708 lp->ibn = 3;
4709 lp->active = *p++;
4710 if (MOTO_SROM_BUG) lp->active = 0;
4711 lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
4712 lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
4713 lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2;
4714 lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
4715 lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
4716 lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
4717 lp->phy[lp->active].mci = *p;
4718 return 0;
4719 } else if ((lp->media == INIT) && (lp->timeout < 0)) {
4720 lp->ibn = 3;
4721 lp->active = *p;
4722 if (MOTO_SROM_BUG) lp->active = 0;
4723 lp->infoblock_csr6 = OMR_MII_100;
4724 lp->useMII = true;
4725 lp->infoblock_media = ANS;
4726
4727 de4x5_switch_mac_port(dev);
4728 }
4729
4730 return dc2114x_autoconf(dev);
4731 }
4732
4733 static int
4734 type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4735 {
4736 struct de4x5_private *lp = netdev_priv(dev);
4737 u_char flags, csr6, len = (*p & BLOCK_LEN)+1;
4738
4739
4740 if (--count > lp->tcount) {
4741 if (*(p+len) < 128) {
4742 return dc_infoblock[COMPACT](dev, count, p+len);
4743 } else {
4744 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4745 }
4746 }
4747
4748 if ((lp->media == INIT) && (lp->timeout < 0)) {
4749 lp->ibn = 4;
4750 lp->active = 0;
4751 p+=2;
4752 lp->infoblock_media = (*p++) & MEDIA_CODE;
4753 lp->cache.csr13 = CSR13;
4754 lp->cache.csr14 = CSR14;
4755 lp->cache.csr15 = CSR15;
4756 lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4757 lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
4758 csr6 = *p++;
4759 flags = *p++;
4760
4761 lp->asBitValid = (flags & 0x80) ? 0 : -1;
4762 lp->defMedium = (flags & 0x40) ? -1 : 0;
4763 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4764 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4765 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4766 lp->useMII = false;
4767
4768 de4x5_switch_mac_port(dev);
4769 }
4770
4771 return dc2114x_autoconf(dev);
4772 }
4773
4774
4775
4776
4777
4778 static int
4779 type5_infoblock(struct net_device *dev, u_char count, u_char *p)
4780 {
4781 struct de4x5_private *lp = netdev_priv(dev);
4782 u_char len = (*p & BLOCK_LEN)+1;
4783
4784
4785 if (--count > lp->tcount) {
4786 if (*(p+len) < 128) {
4787 return dc_infoblock[COMPACT](dev, count, p+len);
4788 } else {
4789 return dc_infoblock[*(p+len+1)](dev, count, p+len);
4790 }
4791 }
4792
4793
4794 if ((lp->state == INITIALISED) || (lp->media == INIT)) {
4795 p+=2;
4796 lp->rst = p;
4797 srom_exec(dev, lp->rst);
4798 }
4799
4800 return DE4X5_AUTOSENSE_MS;
4801 }
4802
4803
4804
4805
4806
4807 static int
4808 mii_rd(u_char phyreg, u_char phyaddr, u_long ioaddr)
4809 {
4810 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4811 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4812 mii_wdata(MII_STRD, 4, ioaddr);
4813 mii_address(phyaddr, ioaddr);
4814 mii_address(phyreg, ioaddr);
4815 mii_ta(MII_STRD, ioaddr);
4816
4817 return mii_rdata(ioaddr);
4818 }
4819
4820 static void
4821 mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4822 {
4823 mii_wdata(MII_PREAMBLE, 2, ioaddr);
4824 mii_wdata(MII_PREAMBLE, 32, ioaddr);
4825 mii_wdata(MII_STWR, 4, ioaddr);
4826 mii_address(phyaddr, ioaddr);
4827 mii_address(phyreg, ioaddr);
4828 mii_ta(MII_STWR, ioaddr);
4829 data = mii_swap(data, 16);
4830 mii_wdata(data, 16, ioaddr);
4831 }
4832
4833 static int
4834 mii_rdata(u_long ioaddr)
4835 {
4836 int i;
4837 s32 tmp = 0;
4838
4839 for (i=0; i<16; i++) {
4840 tmp <<= 1;
4841 tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
4842 }
4843
4844 return tmp;
4845 }
4846
4847 static void
4848 mii_wdata(int data, int len, u_long ioaddr)
4849 {
4850 int i;
4851
4852 for (i=0; i<len; i++) {
4853 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4854 data >>= 1;
4855 }
4856 }
4857
4858 static void
4859 mii_address(u_char addr, u_long ioaddr)
4860 {
4861 int i;
4862
4863 addr = mii_swap(addr, 5);
4864 for (i=0; i<5; i++) {
4865 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4866 addr >>= 1;
4867 }
4868 }
4869
4870 static void
4871 mii_ta(u_long rw, u_long ioaddr)
4872 {
4873 if (rw == MII_STWR) {
4874 sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
4875 sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
4876 } else {
4877 getfrom_mii(MII_MRD | MII_RD, ioaddr);
4878 }
4879 }
4880
4881 static int
4882 mii_swap(int data, int len)
4883 {
4884 int i, tmp = 0;
4885
4886 for (i=0; i<len; i++) {
4887 tmp <<= 1;
4888 tmp |= (data & 1);
4889 data >>= 1;
4890 }
4891
4892 return tmp;
4893 }
4894
4895 static void
4896 sendto_mii(u32 command, int data, u_long ioaddr)
4897 {
4898 u32 j;
4899
4900 j = (data & 1) << 17;
4901 outl(command | j, ioaddr);
4902 udelay(1);
4903 outl(command | MII_MDC | j, ioaddr);
4904 udelay(1);
4905 }
4906
4907 static int
4908 getfrom_mii(u32 command, u_long ioaddr)
4909 {
4910 outl(command, ioaddr);
4911 udelay(1);
4912 outl(command | MII_MDC, ioaddr);
4913 udelay(1);
4914
4915 return (inl(ioaddr) >> 19) & 1;
4916 }
4917
4918
4919
4920
4921 static int
4922 mii_get_oui(u_char phyaddr, u_long ioaddr)
4923 {
4924
4925
4926
4927
4928
4929
4930 int r2, r3;
4931
4932
4933 r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
4934 r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
4935
4936
4937
4938
4939
4940
4941
4942
4943
4944
4945
4946
4947
4948
4949
4950
4951
4952
4953
4954
4955
4956
4957
4958
4959
4960
4961
4962 return r2;
4963 }
4964
4965
4966
4967
4968 static int
4969 mii_get_phy(struct net_device *dev)
4970 {
4971 struct de4x5_private *lp = netdev_priv(dev);
4972 u_long iobase = dev->base_addr;
4973 int i, j, k, n, limit=ARRAY_SIZE(phy_info);
4974 int id;
4975
4976 lp->active = 0;
4977 lp->useMII = true;
4978
4979
4980 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
4981 lp->phy[lp->active].addr = i;
4982 if (i==0) n++;
4983 while (de4x5_reset_phy(dev)<0) udelay(100);
4984 id = mii_get_oui(i, DE4X5_MII);
4985 if ((id == 0) || (id == 65535)) continue;
4986 for (j=0; j<limit; j++) {
4987 if (id != phy_info[j].id) continue;
4988 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
4989 if (k < DE4X5_MAX_PHY) {
4990 memcpy((char *)&lp->phy[k],
4991 (char *)&phy_info[j], sizeof(struct phy_table));
4992 lp->phy[k].addr = i;
4993 lp->mii_cnt++;
4994 lp->active++;
4995 } else {
4996 goto purgatory;
4997 }
4998 break;
4999 }
5000 if ((j == limit) && (i < DE4X5_MAX_MII)) {
5001 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++);
5002 lp->phy[k].addr = i;
5003 lp->phy[k].id = id;
5004 lp->phy[k].spd.reg = GENERIC_REG;
5005 lp->phy[k].spd.mask = GENERIC_MASK;
5006 lp->phy[k].spd.value = GENERIC_VALUE;
5007 lp->mii_cnt++;
5008 lp->active++;
5009 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5010 j = de4x5_debug;
5011 de4x5_debug |= DEBUG_MII;
5012 de4x5_dbg_mii(dev, k);
5013 de4x5_debug = j;
5014 printk("\n");
5015 }
5016 }
5017 purgatory:
5018 lp->active = 0;
5019 if (lp->phy[0].id) {
5020 for (k=0; k < DE4X5_MAX_PHY && lp->phy[k].id; k++) {
5021 mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
5022 while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
5023
5024 de4x5_dbg_mii(dev, k);
5025 }
5026 }
5027 if (!lp->mii_cnt) lp->useMII = false;
5028
5029 return lp->mii_cnt;
5030 }
5031
5032 static char *
5033 build_setup_frame(struct net_device *dev, int mode)
5034 {
5035 struct de4x5_private *lp = netdev_priv(dev);
5036 int i;
5037 char *pa = lp->setup_frame;
5038
5039
5040 if (mode == ALL) {
5041 memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
5042 }
5043
5044 if (lp->setup_f == HASH_PERF) {
5045 for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
5046 *(pa + i) = dev->dev_addr[i];
5047 if (i & 0x01) pa += 2;
5048 }
5049 *(lp->setup_frame + (DE4X5_HASH_TABLE_LEN >> 3) - 3) = 0x80;
5050 } else {
5051 for (i=0; i<ETH_ALEN; i++) {
5052 *(pa + (i&1)) = dev->dev_addr[i];
5053 if (i & 0x01) pa += 4;
5054 }
5055 for (i=0; i<ETH_ALEN; i++) {
5056 *(pa + (i&1)) = (char) 0xff;
5057 if (i & 0x01) pa += 4;
5058 }
5059 }
5060
5061 return pa;
5062 }
5063
5064 static void
5065 disable_ast(struct net_device *dev)
5066 {
5067 struct de4x5_private *lp = netdev_priv(dev);
5068 del_timer_sync(&lp->timer);
5069 }
5070
5071 static long
5072 de4x5_switch_mac_port(struct net_device *dev)
5073 {
5074 struct de4x5_private *lp = netdev_priv(dev);
5075 u_long iobase = dev->base_addr;
5076 s32 omr;
5077
5078 STOP_DE4X5;
5079
5080
5081 omr = (inl(DE4X5_OMR) & ~(OMR_PS | OMR_HBD | OMR_TTM | OMR_PCS | OMR_SCR |
5082 OMR_FDX));
5083 omr |= lp->infoblock_csr6;
5084 if (omr & OMR_PS) omr |= OMR_HBD;
5085 outl(omr, DE4X5_OMR);
5086
5087
5088 RESET_DE4X5;
5089
5090
5091 if (lp->chipset == DC21140) {
5092 gep_wr(lp->cache.gepc, dev);
5093 gep_wr(lp->cache.gep, dev);
5094 } else if ((lp->chipset & ~0x0ff) == DC2114x) {
5095 reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, lp->cache.csr15);
5096 }
5097
5098
5099 outl(omr, DE4X5_OMR);
5100
5101
5102 inl(DE4X5_MFC);
5103
5104 return omr;
5105 }
5106
5107 static void
5108 gep_wr(s32 data, struct net_device *dev)
5109 {
5110 struct de4x5_private *lp = netdev_priv(dev);
5111 u_long iobase = dev->base_addr;
5112
5113 if (lp->chipset == DC21140) {
5114 outl(data, DE4X5_GEP);
5115 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5116 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5117 }
5118 }
5119
5120 static int
5121 gep_rd(struct net_device *dev)
5122 {
5123 struct de4x5_private *lp = netdev_priv(dev);
5124 u_long iobase = dev->base_addr;
5125
5126 if (lp->chipset == DC21140) {
5127 return inl(DE4X5_GEP);
5128 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5129 return inl(DE4X5_SIGR) & 0x000fffff;
5130 }
5131
5132 return 0;
5133 }
5134
5135 static void
5136 yawn(struct net_device *dev, int state)
5137 {
5138 struct de4x5_private *lp = netdev_priv(dev);
5139 u_long iobase = dev->base_addr;
5140
5141 if ((lp->chipset == DC21040) || (lp->chipset == DC21140)) return;
5142
5143 if(lp->bus == EISA) {
5144 switch(state) {
5145 case WAKEUP:
5146 outb(WAKEUP, PCI_CFPM);
5147 mdelay(10);
5148 break;
5149
5150 case SNOOZE:
5151 outb(SNOOZE, PCI_CFPM);
5152 break;
5153
5154 case SLEEP:
5155 outl(0, DE4X5_SICR);
5156 outb(SLEEP, PCI_CFPM);
5157 break;
5158 }
5159 } else {
5160 struct pci_dev *pdev = to_pci_dev (lp->gendev);
5161 switch(state) {
5162 case WAKEUP:
5163 pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
5164 mdelay(10);
5165 break;
5166
5167 case SNOOZE:
5168 pci_write_config_byte(pdev, PCI_CFDA_PSM, SNOOZE);
5169 break;
5170
5171 case SLEEP:
5172 outl(0, DE4X5_SICR);
5173 pci_write_config_byte(pdev, PCI_CFDA_PSM, SLEEP);
5174 break;
5175 }
5176 }
5177 }
5178
5179 static void
5180 de4x5_parse_params(struct net_device *dev)
5181 {
5182 struct de4x5_private *lp = netdev_priv(dev);
5183 char *p, *q, t;
5184
5185 lp->params.fdx = false;
5186 lp->params.autosense = AUTO;
5187
5188 if (args == NULL) return;
5189
5190 if ((p = strstr(args, dev->name))) {
5191 if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p);
5192 t = *q;
5193 *q = '\0';
5194
5195 if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
5196
5197 if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
5198 if (strstr(p, "TP_NW")) {
5199 lp->params.autosense = TP_NW;
5200 } else if (strstr(p, "TP")) {
5201 lp->params.autosense = TP;
5202 } else if (strstr(p, "BNC_AUI")) {
5203 lp->params.autosense = BNC;
5204 } else if (strstr(p, "BNC")) {
5205 lp->params.autosense = BNC;
5206 } else if (strstr(p, "AUI")) {
5207 lp->params.autosense = AUI;
5208 } else if (strstr(p, "10Mb")) {
5209 lp->params.autosense = _10Mb;
5210 } else if (strstr(p, "100Mb")) {
5211 lp->params.autosense = _100Mb;
5212 } else if (strstr(p, "AUTO")) {
5213 lp->params.autosense = AUTO;
5214 }
5215 }
5216 *q = t;
5217 }
5218 }
5219
5220 static void
5221 de4x5_dbg_open(struct net_device *dev)
5222 {
5223 struct de4x5_private *lp = netdev_priv(dev);
5224 int i;
5225
5226 if (de4x5_debug & DEBUG_OPEN) {
5227 printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
5228 printk("\tphysical address: %pM\n", dev->dev_addr);
5229 printk("Descriptor head addresses:\n");
5230 printk("\t0x%8.8lx 0x%8.8lx\n",(u_long)lp->rx_ring,(u_long)lp->tx_ring);
5231 printk("Descriptor addresses:\nRX: ");
5232 for (i=0;i<lp->rxRingSize-1;i++){
5233 if (i < 3) {
5234 printk("0x%8.8lx ",(u_long)&lp->rx_ring[i].status);
5235 }
5236 }
5237 printk("...0x%8.8lx\n",(u_long)&lp->rx_ring[i].status);
5238 printk("TX: ");
5239 for (i=0;i<lp->txRingSize-1;i++){
5240 if (i < 3) {
5241 printk("0x%8.8lx ", (u_long)&lp->tx_ring[i].status);
5242 }
5243 }
5244 printk("...0x%8.8lx\n", (u_long)&lp->tx_ring[i].status);
5245 printk("Descriptor buffers:\nRX: ");
5246 for (i=0;i<lp->rxRingSize-1;i++){
5247 if (i < 3) {
5248 printk("0x%8.8x ",le32_to_cpu(lp->rx_ring[i].buf));
5249 }
5250 }
5251 printk("...0x%8.8x\n",le32_to_cpu(lp->rx_ring[i].buf));
5252 printk("TX: ");
5253 for (i=0;i<lp->txRingSize-1;i++){
5254 if (i < 3) {
5255 printk("0x%8.8x ", le32_to_cpu(lp->tx_ring[i].buf));
5256 }
5257 }
5258 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5259 printk("Ring size:\nRX: %d\nTX: %d\n",
5260 (short)lp->rxRingSize,
5261 (short)lp->txRingSize);
5262 }
5263 }
5264
5265 static void
5266 de4x5_dbg_mii(struct net_device *dev, int k)
5267 {
5268 struct de4x5_private *lp = netdev_priv(dev);
5269 u_long iobase = dev->base_addr;
5270
5271 if (de4x5_debug & DEBUG_MII) {
5272 printk("\nMII device address: %d\n", lp->phy[k].addr);
5273 printk("MII CR: %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
5274 printk("MII SR: %x\n",mii_rd(MII_SR,lp->phy[k].addr,DE4X5_MII));
5275 printk("MII ID0: %x\n",mii_rd(MII_ID0,lp->phy[k].addr,DE4X5_MII));
5276 printk("MII ID1: %x\n",mii_rd(MII_ID1,lp->phy[k].addr,DE4X5_MII));
5277 if (lp->phy[k].id != BROADCOM_T4) {
5278 printk("MII ANA: %x\n",mii_rd(0x04,lp->phy[k].addr,DE4X5_MII));
5279 printk("MII ANC: %x\n",mii_rd(0x05,lp->phy[k].addr,DE4X5_MII));
5280 }
5281 printk("MII 16: %x\n",mii_rd(0x10,lp->phy[k].addr,DE4X5_MII));
5282 if (lp->phy[k].id != BROADCOM_T4) {
5283 printk("MII 17: %x\n",mii_rd(0x11,lp->phy[k].addr,DE4X5_MII));
5284 printk("MII 18: %x\n",mii_rd(0x12,lp->phy[k].addr,DE4X5_MII));
5285 } else {
5286 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5287 }
5288 }
5289 }
5290
5291 static void
5292 de4x5_dbg_media(struct net_device *dev)
5293 {
5294 struct de4x5_private *lp = netdev_priv(dev);
5295
5296 if (lp->media != lp->c_media) {
5297 if (de4x5_debug & DEBUG_MEDIA) {
5298 printk("%s: media is %s%s\n", dev->name,
5299 (lp->media == NC ? "unconnected, link down or incompatible connection" :
5300 (lp->media == TP ? "TP" :
5301 (lp->media == ANS ? "TP/Nway" :
5302 (lp->media == BNC ? "BNC" :
5303 (lp->media == AUI ? "AUI" :
5304 (lp->media == BNC_AUI ? "BNC/AUI" :
5305 (lp->media == EXT_SIA ? "EXT SIA" :
5306 (lp->media == _100Mb ? "100Mb/s" :
5307 (lp->media == _10Mb ? "10Mb/s" :
5308 "???"
5309 ))))))))), (lp->fdx?" full duplex.":"."));
5310 }
5311 lp->c_media = lp->media;
5312 }
5313 }
5314
5315 static void
5316 de4x5_dbg_srom(struct de4x5_srom *p)
5317 {
5318 int i;
5319
5320 if (de4x5_debug & DEBUG_SROM) {
5321 printk("Sub-system Vendor ID: %04x\n", *((u_short *)p->sub_vendor_id));
5322 printk("Sub-system ID: %04x\n", *((u_short *)p->sub_system_id));
5323 printk("ID Block CRC: %02x\n", (u_char)(p->id_block_crc));
5324 printk("SROM version: %02x\n", (u_char)(p->version));
5325 printk("# controllers: %02x\n", (u_char)(p->num_controllers));
5326
5327 printk("Hardware Address: %pM\n", p->ieee_addr);
5328 printk("CRC checksum: %04x\n", (u_short)(p->chksum));
5329 for (i=0; i<64; i++) {
5330 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5331 }
5332 }
5333 }
5334
5335 static void
5336 de4x5_dbg_rx(struct sk_buff *skb, int len)
5337 {
5338 int i, j;
5339
5340 if (de4x5_debug & DEBUG_RX) {
5341 printk("R: %pM <- %pM len/SAP:%02x%02x [%d]\n",
5342 skb->data, &skb->data[6],
5343 (u_char)skb->data[12],
5344 (u_char)skb->data[13],
5345 len);
5346 for (j=0; len>0;j+=16, len-=16) {
5347 printk(" %03x: ",j);
5348 for (i=0; i<16 && i<len; i++) {
5349 printk("%02x ",(u_char)skb->data[i+j]);
5350 }
5351 printk("\n");
5352 }
5353 }
5354 }
5355
5356
5357
5358
5359
5360
5361 static int
5362 de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5363 {
5364 struct de4x5_private *lp = netdev_priv(dev);
5365 struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru;
5366 u_long iobase = dev->base_addr;
5367 int i, j, status = 0;
5368 s32 omr;
5369 union {
5370 u8 addr[144];
5371 u16 sval[72];
5372 u32 lval[36];
5373 } tmp;
5374 u_long flags = 0;
5375
5376 switch(ioc->cmd) {
5377 case DE4X5_GET_HWADDR:
5378 ioc->len = ETH_ALEN;
5379 for (i=0; i<ETH_ALEN; i++) {
5380 tmp.addr[i] = dev->dev_addr[i];
5381 }
5382 if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
5383 break;
5384
5385 case DE4X5_SET_HWADDR:
5386 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5387 if (copy_from_user(tmp.addr, ioc->data, ETH_ALEN)) return -EFAULT;
5388 if (netif_queue_stopped(dev))
5389 return -EBUSY;
5390 netif_stop_queue(dev);
5391 for (i=0; i<ETH_ALEN; i++) {
5392 dev->dev_addr[i] = tmp.addr[i];
5393 }
5394 build_setup_frame(dev, PHYS_ADDR_ONLY);
5395
5396 load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
5397 SETUP_FRAME_LEN, (struct sk_buff *)1);
5398 lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
5399 outl(POLL_DEMAND, DE4X5_TPD);
5400 netif_wake_queue(dev);
5401 break;
5402
5403 case DE4X5_SAY_BOO:
5404 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5405 printk("%s: Boo!\n", dev->name);
5406 break;
5407
5408 case DE4X5_MCA_EN:
5409 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5410 omr = inl(DE4X5_OMR);
5411 omr |= OMR_PM;
5412 outl(omr, DE4X5_OMR);
5413 break;
5414
5415 case DE4X5_GET_STATS:
5416 {
5417 struct pkt_stats statbuf;
5418 ioc->len = sizeof(statbuf);
5419 spin_lock_irqsave(&lp->lock, flags);
5420 memcpy(&statbuf, &lp->pktStats, ioc->len);
5421 spin_unlock_irqrestore(&lp->lock, flags);
5422 if (copy_to_user(ioc->data, &statbuf, ioc->len))
5423 return -EFAULT;
5424 break;
5425 }
5426 case DE4X5_CLR_STATS:
5427 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5428 spin_lock_irqsave(&lp->lock, flags);
5429 memset(&lp->pktStats, 0, sizeof(lp->pktStats));
5430 spin_unlock_irqrestore(&lp->lock, flags);
5431 break;
5432
5433 case DE4X5_GET_OMR:
5434 tmp.addr[0] = inl(DE4X5_OMR);
5435 if (copy_to_user(ioc->data, tmp.addr, 1)) return -EFAULT;
5436 break;
5437
5438 case DE4X5_SET_OMR:
5439 if (!capable(CAP_NET_ADMIN)) return -EPERM;
5440 if (copy_from_user(tmp.addr, ioc->data, 1)) return -EFAULT;
5441 outl(tmp.addr[0], DE4X5_OMR);
5442 break;
5443
5444 case DE4X5_GET_REG:
5445 j = 0;
5446 tmp.lval[0] = inl(DE4X5_STS); j+=4;
5447 tmp.lval[1] = inl(DE4X5_BMR); j+=4;
5448 tmp.lval[2] = inl(DE4X5_IMR); j+=4;
5449 tmp.lval[3] = inl(DE4X5_OMR); j+=4;
5450 tmp.lval[4] = inl(DE4X5_SISR); j+=4;
5451 tmp.lval[5] = inl(DE4X5_SICR); j+=4;
5452 tmp.lval[6] = inl(DE4X5_STRR); j+=4;
5453 tmp.lval[7] = inl(DE4X5_SIGR); j+=4;
5454 ioc->len = j;
5455 if (copy_to_user(ioc->data, tmp.lval, ioc->len))
5456 return -EFAULT;
5457 break;
5458
5459 #define DE4X5_DUMP 0x0f
5460
5461
5462
5463
5464
5465
5466
5467
5468
5469
5470
5471
5472
5473
5474
5475
5476
5477
5478
5479
5480
5481
5482
5483
5484
5485
5486
5487
5488
5489
5490
5491
5492
5493
5494
5495
5496
5497
5498
5499
5500
5501
5502
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518
5519
5520
5521
5522
5523
5524
5525
5526
5527
5528
5529
5530
5531
5532
5533
5534
5535
5536
5537
5538
5539
5540
5541
5542
5543
5544
5545
5546
5547
5548
5549 default:
5550 return -EOPNOTSUPP;
5551 }
5552
5553 return status;
5554 }
5555
5556 static int __init de4x5_module_init (void)
5557 {
5558 int err = 0;
5559
5560 #ifdef CONFIG_PCI
5561 err = pci_register_driver(&de4x5_pci_driver);
5562 #endif
5563 #ifdef CONFIG_EISA
5564 err |= eisa_driver_register (&de4x5_eisa_driver);
5565 #endif
5566
5567 return err;
5568 }
5569
5570 static void __exit de4x5_module_exit (void)
5571 {
5572 #ifdef CONFIG_PCI
5573 pci_unregister_driver (&de4x5_pci_driver);
5574 #endif
5575 #ifdef CONFIG_EISA
5576 eisa_driver_unregister (&de4x5_eisa_driver);
5577 #endif
5578 }
5579
5580 module_init (de4x5_module_init);
5581 module_exit (de4x5_module_exit);