This source file includes following definitions.
- ks_check_endian
- ks_rdreg16
- ks_wrreg16
- ks_inblk
- ks_outblk
- ks_disable_int
- ks_enable_int
- ks_tx_fifo_space
- ks_save_cmd_reg
- ks_restore_cmd_reg
- ks_set_powermode
- ks_read_config
- ks_soft_reset
- ks_enable_qmu
- ks_disable_qmu
- ks_read_qmu
- ks_rcv
- ks_update_link_status
- ks_irq
- ks_net_open
- ks_net_stop
- ks_write_qmu
- ks_start_xmit
- ks_start_rx
- ks_stop_rx
- ether_gen_crc
- ks_set_grpaddr
- ks_clear_mcast
- ks_set_promis
- ks_set_mcast
- ks_set_rx_mode
- ks_set_mac
- ks_set_mac_address
- ks_net_ioctl
- ks_get_drvinfo
- ks_get_msglevel
- ks_set_msglevel
- ks_get_link_ksettings
- ks_set_link_ksettings
- ks_get_link
- ks_nway_reset
- ks_phy_reg
- ks_phy_read
- ks_phy_write
- ks_read_selftest
- ks_setup
- ks_setup_int
- ks_hw_init
- ks8851_probe
- ks8851_remove
1
2
3
4
5
6
7
8
9
10
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/cache.h>
20 #include <linux/crc32.h>
21 #include <linux/crc32poly.h>
22 #include <linux/mii.h>
23 #include <linux/platform_device.h>
24 #include <linux/delay.h>
25 #include <linux/slab.h>
26 #include <linux/ks8851_mll.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_net.h>
30
31 #include "ks8851.h"
32
33 #define DRV_NAME "ks8851_mll"
34
35 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
36 #define MAX_RECV_FRAMES 255
37 #define MAX_BUF_SIZE 2048
38 #define TX_BUF_SIZE 2000
39 #define RX_BUF_SIZE 2000
40
41 #define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
42 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
43 #define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
44
45 #define ENUM_BUS_NONE 0
46 #define ENUM_BUS_8BIT 1
47 #define ENUM_BUS_16BIT 2
48 #define ENUM_BUS_32BIT 3
49
50 #define MAX_MCAST_LST 32
51 #define HW_MCAST_SIZE 8
52
53
54
55
56
57
58
59
60
61
62 union ks_tx_hdr {
63 u8 txb[4];
64 __le16 txw[2];
65 };
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109 struct type_frame_head {
110 u16 sts;
111 u16 len;
112 };
113
114 struct ks_net {
115 struct net_device *netdev;
116 void __iomem *hw_addr;
117 void __iomem *hw_addr_cmd;
118 union ks_tx_hdr txh ____cacheline_aligned;
119 struct mutex lock;
120 struct platform_device *pdev;
121 struct mii_if_info mii;
122 struct type_frame_head *frame_head_info;
123 spinlock_t statelock;
124 u32 msg_enable;
125 u32 frame_cnt;
126 int bus_width;
127
128 u16 rc_rxqcr;
129 u16 rc_txcr;
130 u16 rc_ier;
131 u16 sharedbus;
132 u16 cmd_reg_cache;
133 u16 cmd_reg_cache_int;
134 u16 promiscuous;
135 u16 all_mcast;
136 u16 mcast_lst_size;
137 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
138 u8 mcast_bits[HW_MCAST_SIZE];
139 u8 mac_addr[6];
140 u8 fid;
141 u8 extra_byte;
142 u8 enabled;
143 };
144
145 static int msg_enable;
146
147 #define BE3 0x8000
148 #define BE2 0x4000
149 #define BE1 0x2000
150 #define BE0 0x1000
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168 static int ks_check_endian(struct ks_net *ks)
169 {
170 u16 cider;
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193 iowrite16(BE3 | BE2 | KS_CIDER, ks->hw_addr_cmd);
194 cider = ioread16(ks->hw_addr);
195 if (!cider)
196 return 0;
197
198 netdev_err(ks->netdev, "incorrect EESK endian strap setting\n");
199
200 return -EINVAL;
201 }
202
203
204
205
206
207
208
209
210
211 static u16 ks_rdreg16(struct ks_net *ks, int offset)
212 {
213 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
214 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
215 return ioread16(ks->hw_addr);
216 }
217
218
219
220
221
222
223
224
225
226 static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
227 {
228 ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
229 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
230 iowrite16(value, ks->hw_addr);
231 }
232
233
234
235
236
237
238
239
240 static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
241 {
242 len >>= 1;
243 while (len--)
244 *wptr++ = (u16)ioread16(ks->hw_addr);
245 }
246
247
248
249
250
251
252
253
254 static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
255 {
256 len >>= 1;
257 while (len--)
258 iowrite16(*wptr++, ks->hw_addr);
259 }
260
261 static void ks_disable_int(struct ks_net *ks)
262 {
263 ks_wrreg16(ks, KS_IER, 0x0000);
264 }
265
266 static void ks_enable_int(struct ks_net *ks)
267 {
268 ks_wrreg16(ks, KS_IER, ks->rc_ier);
269 }
270
271
272
273
274
275
276 static inline u16 ks_tx_fifo_space(struct ks_net *ks)
277 {
278 return ks_rdreg16(ks, KS_TXMIR) & 0x1fff;
279 }
280
281
282
283
284
285
286 static inline void ks_save_cmd_reg(struct ks_net *ks)
287 {
288
289
290
291 ks->cmd_reg_cache_int = ks->cmd_reg_cache;
292 }
293
294
295
296
297
298
299
300 static inline void ks_restore_cmd_reg(struct ks_net *ks)
301 {
302 ks->cmd_reg_cache = ks->cmd_reg_cache_int;
303 iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
304 }
305
306
307
308
309
310
311
312
313 static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
314 {
315 unsigned pmecr;
316
317 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
318
319 ks_rdreg16(ks, KS_GRR);
320 pmecr = ks_rdreg16(ks, KS_PMECR);
321 pmecr &= ~PMECR_PM_MASK;
322 pmecr |= pwrmode;
323
324 ks_wrreg16(ks, KS_PMECR, pmecr);
325 }
326
327
328
329
330
331
332 static void ks_read_config(struct ks_net *ks)
333 {
334 u16 reg_data = 0;
335
336
337 reg_data = ks_rdreg16(ks, KS_CCR);
338
339
340 ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
341
342
343
344
345
346 if (reg_data & CCR_8BIT) {
347 ks->bus_width = ENUM_BUS_8BIT;
348 ks->extra_byte = 1;
349 } else if (reg_data & CCR_16BIT) {
350 ks->bus_width = ENUM_BUS_16BIT;
351 ks->extra_byte = 2;
352 } else {
353 ks->bus_width = ENUM_BUS_32BIT;
354 ks->extra_byte = 4;
355 }
356 }
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371 static void ks_soft_reset(struct ks_net *ks, unsigned op)
372 {
373
374 ks_wrreg16(ks, KS_IER, 0x0000);
375 ks_wrreg16(ks, KS_GRR, op);
376 mdelay(10);
377 ks_wrreg16(ks, KS_GRR, 0);
378 mdelay(1);
379 }
380
381
382 static void ks_enable_qmu(struct ks_net *ks)
383 {
384 u16 w;
385
386 w = ks_rdreg16(ks, KS_TXCR);
387
388 ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
389
390
391
392
393
394
395 w = ks_rdreg16(ks, KS_RXQCR);
396 ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
397
398
399 w = ks_rdreg16(ks, KS_RXCR1);
400 ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
401 ks->enabled = true;
402 }
403
404 static void ks_disable_qmu(struct ks_net *ks)
405 {
406 u16 w;
407
408 w = ks_rdreg16(ks, KS_TXCR);
409
410
411 w &= ~TXCR_TXE;
412 ks_wrreg16(ks, KS_TXCR, w);
413
414
415 w = ks_rdreg16(ks, KS_RXCR1);
416 w &= ~RXCR1_RXE ;
417 ks_wrreg16(ks, KS_RXCR1, w);
418
419 ks->enabled = false;
420
421 }
422
423
424
425
426
427
428
429
430
431
432
433
434 static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
435 {
436 u32 r = ks->extra_byte & 0x1 ;
437 u32 w = ks->extra_byte - r;
438
439
440 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
441 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
442
443
444
445
446
447
448
449
450 if (unlikely(r))
451 ioread8(ks->hw_addr);
452 ks_inblk(ks, buf, w + 2 + 2);
453
454
455 ks_inblk(ks, buf, ALIGN(len, 4));
456
457
458 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
459 }
460
461
462
463
464
465
466
467
468
469
470 static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
471 {
472 u32 i;
473 struct type_frame_head *frame_hdr = ks->frame_head_info;
474 struct sk_buff *skb;
475
476 ks->frame_cnt = ks_rdreg16(ks, KS_RXFCTR) >> 8;
477
478
479 for (i = 0; i < ks->frame_cnt; i++) {
480
481 frame_hdr->sts = ks_rdreg16(ks, KS_RXFHSR);
482
483 frame_hdr->len = ks_rdreg16(ks, KS_RXFHBCR);
484 frame_hdr++;
485 }
486
487 frame_hdr = ks->frame_head_info;
488 while (ks->frame_cnt--) {
489 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
490 frame_hdr->len >= RX_BUF_SIZE ||
491 frame_hdr->len <= 0)) {
492
493
494 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
495 netdev->stats.rx_dropped++;
496 if (!(frame_hdr->sts & RXFSHR_RXFV))
497 netdev->stats.rx_frame_errors++;
498 else
499 netdev->stats.rx_length_errors++;
500 frame_hdr++;
501 continue;
502 }
503
504 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
505 if (likely(skb)) {
506 skb_reserve(skb, 2);
507
508 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
509 skb_put(skb, frame_hdr->len - 4);
510 skb->protocol = eth_type_trans(skb, netdev);
511 netif_rx(skb);
512
513 netdev->stats.rx_bytes += frame_hdr->len - 4;
514 netdev->stats.rx_packets++;
515 } else {
516 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
517 netdev->stats.rx_dropped++;
518 }
519 frame_hdr++;
520 }
521 }
522
523
524
525
526
527
528
529
530 static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
531 {
532
533 u32 link_up_status;
534 if (ks_rdreg16(ks, KS_P1SR) & P1SR_LINK_GOOD) {
535 netif_carrier_on(netdev);
536 link_up_status = true;
537 } else {
538 netif_carrier_off(netdev);
539 link_up_status = false;
540 }
541 netif_dbg(ks, link, ks->netdev,
542 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
543 }
544
545
546
547
548
549
550
551
552
553
554
555
556 static irqreturn_t ks_irq(int irq, void *pw)
557 {
558 struct net_device *netdev = pw;
559 struct ks_net *ks = netdev_priv(netdev);
560 unsigned long flags;
561 u16 status;
562
563 spin_lock_irqsave(&ks->statelock, flags);
564
565 ks_save_cmd_reg(ks);
566
567 status = ks_rdreg16(ks, KS_ISR);
568 if (unlikely(!status)) {
569 ks_restore_cmd_reg(ks);
570 spin_unlock_irqrestore(&ks->statelock, flags);
571 return IRQ_NONE;
572 }
573
574 ks_wrreg16(ks, KS_ISR, status);
575
576 if (likely(status & IRQ_RXI))
577 ks_rcv(ks, netdev);
578
579 if (unlikely(status & IRQ_LCI))
580 ks_update_link_status(netdev, ks);
581
582 if (unlikely(status & IRQ_TXI))
583 netif_wake_queue(netdev);
584
585 if (unlikely(status & IRQ_LDI)) {
586
587 u16 pmecr = ks_rdreg16(ks, KS_PMECR);
588 pmecr &= ~PMECR_WKEVT_MASK;
589 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
590 }
591
592 if (unlikely(status & IRQ_RXOI))
593 ks->netdev->stats.rx_over_errors++;
594
595 ks_restore_cmd_reg(ks);
596 spin_unlock_irqrestore(&ks->statelock, flags);
597 return IRQ_HANDLED;
598 }
599
600
601
602
603
604
605
606
607
608 static int ks_net_open(struct net_device *netdev)
609 {
610 struct ks_net *ks = netdev_priv(netdev);
611 int err;
612
613 #define KS_INT_FLAGS IRQF_TRIGGER_LOW
614
615
616
617
618 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
619
620
621 err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
622
623 if (err) {
624 pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
625 return err;
626 }
627
628
629 ks_set_powermode(ks, PMECR_PM_NORMAL);
630 mdelay(1);
631
632 ks_wrreg16(ks, KS_ISR, 0xffff);
633 ks_enable_int(ks);
634 ks_enable_qmu(ks);
635 netif_start_queue(ks->netdev);
636
637 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
638
639 return 0;
640 }
641
642
643
644
645
646
647
648
649
650 static int ks_net_stop(struct net_device *netdev)
651 {
652 struct ks_net *ks = netdev_priv(netdev);
653
654 netif_info(ks, ifdown, netdev, "shutting down\n");
655
656 netif_stop_queue(netdev);
657
658 mutex_lock(&ks->lock);
659
660
661 ks_wrreg16(ks, KS_IER, 0x0000);
662 ks_wrreg16(ks, KS_ISR, 0xffff);
663
664
665 ks_disable_qmu(ks);
666 ks_disable_int(ks);
667
668
669 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
670 free_irq(netdev->irq, netdev);
671 mutex_unlock(&ks->lock);
672 return 0;
673 }
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
690 {
691
692 ks->txh.txw[0] = 0;
693 ks->txh.txw[1] = cpu_to_le16(len);
694
695
696 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
697
698 ks_outblk(ks, ks->txh.txw, 4);
699
700 ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
701
702 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
703
704 ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
705
706 while (ks_rdreg16(ks, KS_TXQCR) & TXQCR_METFE)
707 ;
708 }
709
710
711
712
713
714
715
716
717
718
719 static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
720 {
721 netdev_tx_t retv = NETDEV_TX_OK;
722 struct ks_net *ks = netdev_priv(netdev);
723 unsigned long flags;
724
725 spin_lock_irqsave(&ks->statelock, flags);
726
727
728
729
730
731 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
732 ks_write_qmu(ks, skb->data, skb->len);
733
734 netdev->stats.tx_bytes += skb->len;
735 netdev->stats.tx_packets++;
736 dev_kfree_skb(skb);
737 } else
738 retv = NETDEV_TX_BUSY;
739 spin_unlock_irqrestore(&ks->statelock, flags);
740 return retv;
741 }
742
743
744
745
746
747
748 static void ks_start_rx(struct ks_net *ks)
749 {
750 u16 cntl;
751
752
753 cntl = ks_rdreg16(ks, KS_RXCR1);
754 cntl |= RXCR1_RXE ;
755 ks_wrreg16(ks, KS_RXCR1, cntl);
756 }
757
758
759
760
761
762
763 static void ks_stop_rx(struct ks_net *ks)
764 {
765 u16 cntl;
766
767
768 cntl = ks_rdreg16(ks, KS_RXCR1);
769 cntl &= ~RXCR1_RXE ;
770 ks_wrreg16(ks, KS_RXCR1, cntl);
771
772 }
773
774 static unsigned long const ethernet_polynomial = CRC32_POLY_BE;
775
776 static unsigned long ether_gen_crc(int length, u8 *data)
777 {
778 long crc = -1;
779 while (--length >= 0) {
780 u8 current_octet = *data++;
781 int bit;
782
783 for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
784 crc = (crc << 1) ^
785 ((crc < 0) ^ (current_octet & 1) ?
786 ethernet_polynomial : 0);
787 }
788 }
789 return (unsigned long)crc;
790 }
791
792
793
794
795
796
797 static void ks_set_grpaddr(struct ks_net *ks)
798 {
799 u8 i;
800 u32 index, position, value;
801
802 memset(ks->mcast_bits, 0, sizeof(u8) * HW_MCAST_SIZE);
803
804 for (i = 0; i < ks->mcast_lst_size; i++) {
805 position = (ether_gen_crc(6, ks->mcast_lst[i]) >> 26) & 0x3f;
806 index = position >> 3;
807 value = 1 << (position & 7);
808 ks->mcast_bits[index] |= (u8)value;
809 }
810
811 for (i = 0; i < HW_MCAST_SIZE; i++) {
812 if (i & 1) {
813 ks_wrreg16(ks, (u16)((KS_MAHTR0 + i) & ~1),
814 (ks->mcast_bits[i] << 8) |
815 ks->mcast_bits[i - 1]);
816 }
817 }
818 }
819
820
821
822
823
824
825
826
827 static void ks_clear_mcast(struct ks_net *ks)
828 {
829 u16 i, mcast_size;
830 for (i = 0; i < HW_MCAST_SIZE; i++)
831 ks->mcast_bits[i] = 0;
832
833 mcast_size = HW_MCAST_SIZE >> 2;
834 for (i = 0; i < mcast_size; i++)
835 ks_wrreg16(ks, KS_MAHTR0 + (2*i), 0);
836 }
837
838 static void ks_set_promis(struct ks_net *ks, u16 promiscuous_mode)
839 {
840 u16 cntl;
841 ks->promiscuous = promiscuous_mode;
842 ks_stop_rx(ks);
843 cntl = ks_rdreg16(ks, KS_RXCR1);
844
845 cntl &= ~RXCR1_FILTER_MASK;
846 if (promiscuous_mode)
847
848 cntl |= RXCR1_RXAE | RXCR1_RXINVF;
849 else
850
851 cntl |= RXCR1_RXPAFMA;
852
853 ks_wrreg16(ks, KS_RXCR1, cntl);
854
855 if (ks->enabled)
856 ks_start_rx(ks);
857
858 }
859
860 static void ks_set_mcast(struct ks_net *ks, u16 mcast)
861 {
862 u16 cntl;
863
864 ks->all_mcast = mcast;
865 ks_stop_rx(ks);
866 cntl = ks_rdreg16(ks, KS_RXCR1);
867 cntl &= ~RXCR1_FILTER_MASK;
868 if (mcast)
869
870 cntl |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
871 else
872
873
874
875
876 cntl |= RXCR1_RXPAFMA;
877
878 ks_wrreg16(ks, KS_RXCR1, cntl);
879
880 if (ks->enabled)
881 ks_start_rx(ks);
882 }
883
884 static void ks_set_rx_mode(struct net_device *netdev)
885 {
886 struct ks_net *ks = netdev_priv(netdev);
887 struct netdev_hw_addr *ha;
888
889
890 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
891 ks_set_promis(ks,
892 (u16)((netdev->flags & IFF_PROMISC) == IFF_PROMISC));
893
894 else if ((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI)
895 ks_set_mcast(ks,
896 (u16)((netdev->flags & IFF_ALLMULTI) == IFF_ALLMULTI));
897 else
898 ks_set_promis(ks, false);
899
900 if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
901 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
902 int i = 0;
903
904 netdev_for_each_mc_addr(ha, netdev) {
905 if (i >= MAX_MCAST_LST)
906 break;
907 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
908 }
909 ks->mcast_lst_size = (u8)i;
910 ks_set_grpaddr(ks);
911 } else {
912
913
914
915
916 ks->mcast_lst_size = MAX_MCAST_LST;
917 ks_set_mcast(ks, true);
918 }
919 } else {
920 ks->mcast_lst_size = 0;
921 ks_clear_mcast(ks);
922 }
923 }
924
925 static void ks_set_mac(struct ks_net *ks, u8 *data)
926 {
927 u16 *pw = (u16 *)data;
928 u16 w, u;
929
930 ks_stop_rx(ks);
931
932 u = *pw++;
933 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
934 ks_wrreg16(ks, KS_MARH, w);
935
936 u = *pw++;
937 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
938 ks_wrreg16(ks, KS_MARM, w);
939
940 u = *pw;
941 w = ((u & 0xFF) << 8) | ((u >> 8) & 0xFF);
942 ks_wrreg16(ks, KS_MARL, w);
943
944 memcpy(ks->mac_addr, data, ETH_ALEN);
945
946 if (ks->enabled)
947 ks_start_rx(ks);
948 }
949
950 static int ks_set_mac_address(struct net_device *netdev, void *paddr)
951 {
952 struct ks_net *ks = netdev_priv(netdev);
953 struct sockaddr *addr = paddr;
954 u8 *da;
955
956 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
957
958 da = (u8 *)netdev->dev_addr;
959
960 ks_set_mac(ks, da);
961 return 0;
962 }
963
964 static int ks_net_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
965 {
966 struct ks_net *ks = netdev_priv(netdev);
967
968 if (!netif_running(netdev))
969 return -EINVAL;
970
971 return generic_mii_ioctl(&ks->mii, if_mii(req), cmd, NULL);
972 }
973
974 static const struct net_device_ops ks_netdev_ops = {
975 .ndo_open = ks_net_open,
976 .ndo_stop = ks_net_stop,
977 .ndo_do_ioctl = ks_net_ioctl,
978 .ndo_start_xmit = ks_start_xmit,
979 .ndo_set_mac_address = ks_set_mac_address,
980 .ndo_set_rx_mode = ks_set_rx_mode,
981 .ndo_validate_addr = eth_validate_addr,
982 };
983
984
985
986 static void ks_get_drvinfo(struct net_device *netdev,
987 struct ethtool_drvinfo *di)
988 {
989 strlcpy(di->driver, DRV_NAME, sizeof(di->driver));
990 strlcpy(di->version, "1.00", sizeof(di->version));
991 strlcpy(di->bus_info, dev_name(netdev->dev.parent),
992 sizeof(di->bus_info));
993 }
994
995 static u32 ks_get_msglevel(struct net_device *netdev)
996 {
997 struct ks_net *ks = netdev_priv(netdev);
998 return ks->msg_enable;
999 }
1000
1001 static void ks_set_msglevel(struct net_device *netdev, u32 to)
1002 {
1003 struct ks_net *ks = netdev_priv(netdev);
1004 ks->msg_enable = to;
1005 }
1006
1007 static int ks_get_link_ksettings(struct net_device *netdev,
1008 struct ethtool_link_ksettings *cmd)
1009 {
1010 struct ks_net *ks = netdev_priv(netdev);
1011
1012 mii_ethtool_get_link_ksettings(&ks->mii, cmd);
1013
1014 return 0;
1015 }
1016
1017 static int ks_set_link_ksettings(struct net_device *netdev,
1018 const struct ethtool_link_ksettings *cmd)
1019 {
1020 struct ks_net *ks = netdev_priv(netdev);
1021 return mii_ethtool_set_link_ksettings(&ks->mii, cmd);
1022 }
1023
1024 static u32 ks_get_link(struct net_device *netdev)
1025 {
1026 struct ks_net *ks = netdev_priv(netdev);
1027 return mii_link_ok(&ks->mii);
1028 }
1029
1030 static int ks_nway_reset(struct net_device *netdev)
1031 {
1032 struct ks_net *ks = netdev_priv(netdev);
1033 return mii_nway_restart(&ks->mii);
1034 }
1035
1036 static const struct ethtool_ops ks_ethtool_ops = {
1037 .get_drvinfo = ks_get_drvinfo,
1038 .get_msglevel = ks_get_msglevel,
1039 .set_msglevel = ks_set_msglevel,
1040 .get_link = ks_get_link,
1041 .nway_reset = ks_nway_reset,
1042 .get_link_ksettings = ks_get_link_ksettings,
1043 .set_link_ksettings = ks_set_link_ksettings,
1044 };
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056 static int ks_phy_reg(int reg)
1057 {
1058 switch (reg) {
1059 case MII_BMCR:
1060 return KS_P1MBCR;
1061 case MII_BMSR:
1062 return KS_P1MBSR;
1063 case MII_PHYSID1:
1064 return KS_PHY1ILR;
1065 case MII_PHYSID2:
1066 return KS_PHY1IHR;
1067 case MII_ADVERTISE:
1068 return KS_P1ANAR;
1069 case MII_LPA:
1070 return KS_P1ANLPR;
1071 }
1072
1073 return 0x0;
1074 }
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 static int ks_phy_read(struct net_device *netdev, int phy_addr, int reg)
1092 {
1093 struct ks_net *ks = netdev_priv(netdev);
1094 int ksreg;
1095 int result;
1096
1097 ksreg = ks_phy_reg(reg);
1098 if (!ksreg)
1099 return 0x0;
1100
1101 mutex_lock(&ks->lock);
1102 result = ks_rdreg16(ks, ksreg);
1103 mutex_unlock(&ks->lock);
1104
1105 return result;
1106 }
1107
1108 static void ks_phy_write(struct net_device *netdev,
1109 int phy, int reg, int value)
1110 {
1111 struct ks_net *ks = netdev_priv(netdev);
1112 int ksreg;
1113
1114 ksreg = ks_phy_reg(reg);
1115 if (ksreg) {
1116 mutex_lock(&ks->lock);
1117 ks_wrreg16(ks, ksreg, value);
1118 mutex_unlock(&ks->lock);
1119 }
1120 }
1121
1122
1123
1124
1125
1126
1127
1128 static int ks_read_selftest(struct ks_net *ks)
1129 {
1130 unsigned both_done = MBIR_TXMBF | MBIR_RXMBF;
1131 int ret = 0;
1132 unsigned rd;
1133
1134 rd = ks_rdreg16(ks, KS_MBIR);
1135
1136 if ((rd & both_done) != both_done) {
1137 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1138 return 0;
1139 }
1140
1141 if (rd & MBIR_TXMBFA) {
1142 netdev_err(ks->netdev, "TX memory selftest fails\n");
1143 ret |= 1;
1144 }
1145
1146 if (rd & MBIR_RXMBFA) {
1147 netdev_err(ks->netdev, "RX memory selftest fails\n");
1148 ret |= 2;
1149 }
1150
1151 netdev_info(ks->netdev, "the selftest passes\n");
1152 return ret;
1153 }
1154
1155 static void ks_setup(struct ks_net *ks)
1156 {
1157 u16 w;
1158
1159
1160
1161
1162
1163
1164 ks_wrreg16(ks, KS_TXFDPR, TXFDPR_TXFPAI);
1165
1166
1167 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1168
1169
1170 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1171
1172
1173 ks->rc_rxqcr = RXQCR_CMD_CNTL;
1174 ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
1175
1176
1177
1178
1179
1180
1181
1182 w = ks_rdreg16(ks, KS_P1MBCR);
1183 w &= ~BMCR_FULLDPLX;
1184 ks_wrreg16(ks, KS_P1MBCR, w);
1185
1186 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
1187 ks_wrreg16(ks, KS_TXCR, w);
1188
1189 w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
1190
1191 if (ks->promiscuous)
1192 w |= (RXCR1_RXAE | RXCR1_RXINVF);
1193 else if (ks->all_mcast)
1194 w |= (RXCR1_RXAE | RXCR1_RXMAFMA | RXCR1_RXPAFMA);
1195 else
1196 w |= RXCR1_RXPAFMA;
1197
1198 ks_wrreg16(ks, KS_RXCR1, w);
1199 }
1200
1201
1202 static void ks_setup_int(struct ks_net *ks)
1203 {
1204 ks->rc_ier = 0x00;
1205
1206 ks_wrreg16(ks, KS_ISR, 0xffff);
1207
1208
1209 ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
1210 }
1211
1212 static int ks_hw_init(struct ks_net *ks)
1213 {
1214 #define MHEADER_SIZE (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
1215 ks->promiscuous = 0;
1216 ks->all_mcast = 0;
1217 ks->mcast_lst_size = 0;
1218
1219 ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
1220 GFP_KERNEL);
1221 if (!ks->frame_head_info)
1222 return false;
1223
1224 ks_set_mac(ks, KS_DEFAULT_MAC_ADDRESS);
1225 return true;
1226 }
1227
1228 #if defined(CONFIG_OF)
1229 static const struct of_device_id ks8851_ml_dt_ids[] = {
1230 { .compatible = "micrel,ks8851-mll" },
1231 { }
1232 };
1233 MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
1234 #endif
1235
1236 static int ks8851_probe(struct platform_device *pdev)
1237 {
1238 int err;
1239 struct net_device *netdev;
1240 struct ks_net *ks;
1241 u16 id, data;
1242 const char *mac;
1243
1244 netdev = alloc_etherdev(sizeof(struct ks_net));
1245 if (!netdev)
1246 return -ENOMEM;
1247
1248 SET_NETDEV_DEV(netdev, &pdev->dev);
1249
1250 ks = netdev_priv(netdev);
1251 ks->netdev = netdev;
1252
1253 ks->hw_addr = devm_platform_ioremap_resource(pdev, 0);
1254 if (IS_ERR(ks->hw_addr)) {
1255 err = PTR_ERR(ks->hw_addr);
1256 goto err_free;
1257 }
1258
1259 ks->hw_addr_cmd = devm_platform_ioremap_resource(pdev, 1);
1260 if (IS_ERR(ks->hw_addr_cmd)) {
1261 err = PTR_ERR(ks->hw_addr_cmd);
1262 goto err_free;
1263 }
1264
1265 err = ks_check_endian(ks);
1266 if (err)
1267 goto err_free;
1268
1269 netdev->irq = platform_get_irq(pdev, 0);
1270
1271 if ((int)netdev->irq < 0) {
1272 err = netdev->irq;
1273 goto err_free;
1274 }
1275
1276 ks->pdev = pdev;
1277
1278 mutex_init(&ks->lock);
1279 spin_lock_init(&ks->statelock);
1280
1281 netdev->netdev_ops = &ks_netdev_ops;
1282 netdev->ethtool_ops = &ks_ethtool_ops;
1283
1284
1285 ks->mii.dev = netdev;
1286 ks->mii.phy_id = 1,
1287 ks->mii.phy_id_mask = 1;
1288 ks->mii.reg_num_mask = 0xf;
1289 ks->mii.mdio_read = ks_phy_read;
1290 ks->mii.mdio_write = ks_phy_write;
1291
1292 netdev_info(netdev, "message enable is %d\n", msg_enable);
1293
1294 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1295 NETIF_MSG_PROBE |
1296 NETIF_MSG_LINK));
1297 ks_read_config(ks);
1298
1299
1300 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1301 netdev_err(netdev, "failed to read device ID\n");
1302 err = -ENODEV;
1303 goto err_free;
1304 }
1305
1306 if (ks_read_selftest(ks)) {
1307 netdev_err(netdev, "failed to read device ID\n");
1308 err = -ENODEV;
1309 goto err_free;
1310 }
1311
1312 err = register_netdev(netdev);
1313 if (err)
1314 goto err_free;
1315
1316 platform_set_drvdata(pdev, netdev);
1317
1318 ks_soft_reset(ks, GRR_GSR);
1319 ks_hw_init(ks);
1320 ks_disable_qmu(ks);
1321 ks_setup(ks);
1322 ks_setup_int(ks);
1323
1324 data = ks_rdreg16(ks, KS_OBCR);
1325 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1326
1327
1328 if (pdev->dev.of_node) {
1329 mac = of_get_mac_address(pdev->dev.of_node);
1330 if (!IS_ERR(mac))
1331 ether_addr_copy(ks->mac_addr, mac);
1332 } else {
1333 struct ks8851_mll_platform_data *pdata;
1334
1335 pdata = dev_get_platdata(&pdev->dev);
1336 if (!pdata) {
1337 netdev_err(netdev, "No platform data\n");
1338 err = -ENODEV;
1339 goto err_pdata;
1340 }
1341 memcpy(ks->mac_addr, pdata->mac_addr, ETH_ALEN);
1342 }
1343 if (!is_valid_ether_addr(ks->mac_addr)) {
1344
1345 eth_random_addr(ks->mac_addr);
1346 netdev_info(netdev, "Using random mac address\n");
1347 }
1348 netdev_info(netdev, "Mac address is: %pM\n", ks->mac_addr);
1349
1350 memcpy(netdev->dev_addr, ks->mac_addr, ETH_ALEN);
1351
1352 ks_set_mac(ks, netdev->dev_addr);
1353
1354 id = ks_rdreg16(ks, KS_CIDER);
1355
1356 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1357 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1358 return 0;
1359
1360 err_pdata:
1361 unregister_netdev(netdev);
1362 err_free:
1363 free_netdev(netdev);
1364 return err;
1365 }
1366
1367 static int ks8851_remove(struct platform_device *pdev)
1368 {
1369 struct net_device *netdev = platform_get_drvdata(pdev);
1370
1371 unregister_netdev(netdev);
1372 free_netdev(netdev);
1373 return 0;
1374
1375 }
1376
1377 static struct platform_driver ks8851_platform_driver = {
1378 .driver = {
1379 .name = DRV_NAME,
1380 .of_match_table = of_match_ptr(ks8851_ml_dt_ids),
1381 },
1382 .probe = ks8851_probe,
1383 .remove = ks8851_remove,
1384 };
1385
1386 module_platform_driver(ks8851_platform_driver);
1387
1388 MODULE_DESCRIPTION("KS8851 MLL Network driver");
1389 MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
1390 MODULE_LICENSE("GPL");
1391 module_param_named(message, msg_enable, int, 0);
1392 MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
1393