This source file includes following definitions.
- e1000e_get_bus_info_pcie
- e1000_set_lan_id_multi_port_pcie
- e1000_set_lan_id_single_port
- e1000_clear_vfta_generic
- e1000_write_vfta_generic
- e1000e_init_rx_addrs
- e1000_check_alt_mac_addr_generic
- e1000e_rar_get_count_generic
- e1000e_rar_set_generic
- e1000_hash_mc_addr
- e1000e_update_mc_addr_list_generic
- e1000e_clear_hw_cntrs_base
- e1000e_check_for_copper_link
- e1000e_check_for_fiber_link
- e1000e_check_for_serdes_link
- e1000_set_default_fc_generic
- e1000e_setup_link_generic
- e1000_commit_fc_settings_generic
- e1000_poll_fiber_serdes_link_generic
- e1000e_setup_fiber_serdes_link
- e1000e_config_collision_dist_generic
- e1000e_set_fc_watermarks
- e1000e_force_mac_fc
- e1000e_config_fc_after_link_up
- e1000e_get_speed_and_duplex_copper
- e1000e_get_speed_and_duplex_fiber_serdes
- e1000e_get_hw_semaphore
- e1000e_put_hw_semaphore
- e1000e_get_auto_rd_done
- e1000e_valid_led_default
- e1000e_id_led_init_generic
- e1000e_setup_led_generic
- e1000e_cleanup_led_generic
- e1000e_blink_led_generic
- e1000e_led_on_generic
- e1000e_led_off_generic
- e1000e_set_pcie_no_snoop
- e1000e_disable_pcie_master
- e1000e_reset_adaptive
- e1000e_update_adaptive
1
2
3
4 #include "e1000.h"
5
6
7
8
9
10
11
12
13
14 s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
15 {
16 struct e1000_mac_info *mac = &hw->mac;
17 struct e1000_bus_info *bus = &hw->bus;
18 struct e1000_adapter *adapter = hw->adapter;
19 u16 pcie_link_status, cap_offset;
20
21 cap_offset = adapter->pdev->pcie_cap;
22 if (!cap_offset) {
23 bus->width = e1000_bus_width_unknown;
24 } else {
25 pci_read_config_word(adapter->pdev,
26 cap_offset + PCIE_LINK_STATUS,
27 &pcie_link_status);
28 bus->width = (enum e1000_bus_width)((pcie_link_status &
29 PCIE_LINK_WIDTH_MASK) >>
30 PCIE_LINK_WIDTH_SHIFT);
31 }
32
33 mac->ops.set_lan_id(hw);
34
35 return 0;
36 }
37
38
39
40
41
42
43
44
45
46 void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
47 {
48 struct e1000_bus_info *bus = &hw->bus;
49 u32 reg;
50
51
52
53
54 reg = er32(STATUS);
55 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
56 }
57
58
59
60
61
62
63
64 void e1000_set_lan_id_single_port(struct e1000_hw *hw)
65 {
66 struct e1000_bus_info *bus = &hw->bus;
67
68 bus->func = 0;
69 }
70
71
72
73
74
75
76
77
78 void e1000_clear_vfta_generic(struct e1000_hw *hw)
79 {
80 u32 offset;
81
82 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
83 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
84 e1e_flush();
85 }
86 }
87
88
89
90
91
92
93
94
95
96
97 void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
98 {
99 E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
100 e1e_flush();
101 }
102
103
104
105
106
107
108
109
110
111
112 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
113 {
114 u32 i;
115 u8 mac_addr[ETH_ALEN] = { 0 };
116
117
118 e_dbg("Programming MAC Address into RAR[0]\n");
119
120 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
121
122
123 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
124 for (i = 1; i < rar_count; i++)
125 hw->mac.ops.rar_set(hw, mac_addr, i);
126 }
127
128
129
130
131
132
133
134
135
136
137
138
139
140 s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
141 {
142 u32 i;
143 s32 ret_val;
144 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
145 u8 alt_mac_addr[ETH_ALEN];
146
147 ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
148 if (ret_val)
149 return ret_val;
150
151
152 if (hw->mac.type == e1000_82573)
153 return 0;
154
155 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
156 &nvm_alt_mac_addr_offset);
157 if (ret_val) {
158 e_dbg("NVM Read Error\n");
159 return ret_val;
160 }
161
162 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
163 (nvm_alt_mac_addr_offset == 0x0000))
164
165 return 0;
166
167 if (hw->bus.func == E1000_FUNC_1)
168 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
169 for (i = 0; i < ETH_ALEN; i += 2) {
170 offset = nvm_alt_mac_addr_offset + (i >> 1);
171 ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
172 if (ret_val) {
173 e_dbg("NVM Read Error\n");
174 return ret_val;
175 }
176
177 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
178 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
179 }
180
181
182 if (is_multicast_ether_addr(alt_mac_addr)) {
183 e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
184 return 0;
185 }
186
187
188
189
190
191 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
192
193 return 0;
194 }
195
196 u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
197 {
198 return hw->mac.rar_entry_count;
199 }
200
201
202
203
204
205
206
207
208
209
210 int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
211 {
212 u32 rar_low, rar_high;
213
214
215
216
217 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
218 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
219
220 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
221
222
223 if (rar_low || rar_high)
224 rar_high |= E1000_RAH_AV;
225
226
227
228
229
230 ew32(RAL(index), rar_low);
231 e1e_flush();
232 ew32(RAH(index), rar_high);
233 e1e_flush();
234
235 return 0;
236 }
237
238
239
240
241
242
243
244
245
246 static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
247 {
248 u32 hash_value, hash_mask;
249 u8 bit_shift = 0;
250
251
252 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
253
254
255
256
257 while (hash_mask >> bit_shift != 0xFF)
258 bit_shift++;
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285 switch (hw->mac.mc_filter_type) {
286 default:
287 case 0:
288 break;
289 case 1:
290 bit_shift += 1;
291 break;
292 case 2:
293 bit_shift += 2;
294 break;
295 case 3:
296 bit_shift += 4;
297 break;
298 }
299
300 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
301 (((u16)mc_addr[5]) << bit_shift)));
302
303 return hash_value;
304 }
305
306
307
308
309
310
311
312
313
314
315 void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
316 u8 *mc_addr_list, u32 mc_addr_count)
317 {
318 u32 hash_value, hash_bit, hash_reg;
319 int i;
320
321
322 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
323
324
325 for (i = 0; (u32)i < mc_addr_count; i++) {
326 hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
327
328 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
329 hash_bit = hash_value & 0x1F;
330
331 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
332 mc_addr_list += (ETH_ALEN);
333 }
334
335
336 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
337 E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
338 e1e_flush();
339 }
340
341
342
343
344
345
346
347 void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
348 {
349 er32(CRCERRS);
350 er32(SYMERRS);
351 er32(MPC);
352 er32(SCC);
353 er32(ECOL);
354 er32(MCC);
355 er32(LATECOL);
356 er32(COLC);
357 er32(DC);
358 er32(SEC);
359 er32(RLEC);
360 er32(XONRXC);
361 er32(XONTXC);
362 er32(XOFFRXC);
363 er32(XOFFTXC);
364 er32(FCRUC);
365 er32(GPRC);
366 er32(BPRC);
367 er32(MPRC);
368 er32(GPTC);
369 er32(GORCL);
370 er32(GORCH);
371 er32(GOTCL);
372 er32(GOTCH);
373 er32(RNBC);
374 er32(RUC);
375 er32(RFC);
376 er32(ROC);
377 er32(RJC);
378 er32(TORL);
379 er32(TORH);
380 er32(TOTL);
381 er32(TOTH);
382 er32(TPR);
383 er32(TPT);
384 er32(MPTC);
385 er32(BPTC);
386 }
387
388
389
390
391
392
393
394
395
396 s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
397 {
398 struct e1000_mac_info *mac = &hw->mac;
399 s32 ret_val;
400 bool link;
401
402
403
404
405
406
407 if (!mac->get_link_status)
408 return 0;
409 mac->get_link_status = false;
410
411
412
413
414
415 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
416 if (ret_val || !link)
417 goto out;
418
419
420
421
422 e1000e_check_downshift(hw);
423
424
425
426
427 if (!mac->autoneg)
428 return -E1000_ERR_CONFIG;
429
430
431
432
433
434 mac->ops.config_collision_dist(hw);
435
436
437
438
439
440
441 ret_val = e1000e_config_fc_after_link_up(hw);
442 if (ret_val)
443 e_dbg("Error configuring flow control\n");
444
445 return ret_val;
446
447 out:
448 mac->get_link_status = true;
449 return ret_val;
450 }
451
452
453
454
455
456
457
458
459 s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
460 {
461 struct e1000_mac_info *mac = &hw->mac;
462 u32 rxcw;
463 u32 ctrl;
464 u32 status;
465 s32 ret_val;
466
467 ctrl = er32(CTRL);
468 status = er32(STATUS);
469 rxcw = er32(RXCW);
470
471
472
473
474
475
476
477
478
479 if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) &&
480 !(rxcw & E1000_RXCW_C)) {
481 if (!mac->autoneg_failed) {
482 mac->autoneg_failed = true;
483 return 0;
484 }
485 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
486
487
488 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
489
490
491 ctrl = er32(CTRL);
492 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
493 ew32(CTRL, ctrl);
494
495
496 ret_val = e1000e_config_fc_after_link_up(hw);
497 if (ret_val) {
498 e_dbg("Error configuring flow control\n");
499 return ret_val;
500 }
501 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
502
503
504
505
506
507 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
508 ew32(TXCW, mac->txcw);
509 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
510
511 mac->serdes_has_link = true;
512 }
513
514 return 0;
515 }
516
517
518
519
520
521
522
523
524 s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
525 {
526 struct e1000_mac_info *mac = &hw->mac;
527 u32 rxcw;
528 u32 ctrl;
529 u32 status;
530 s32 ret_val;
531
532 ctrl = er32(CTRL);
533 status = er32(STATUS);
534 rxcw = er32(RXCW);
535
536
537
538
539
540
541
542
543 if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) {
544 if (!mac->autoneg_failed) {
545 mac->autoneg_failed = true;
546 return 0;
547 }
548 e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
549
550
551 ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
552
553
554 ctrl = er32(CTRL);
555 ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
556 ew32(CTRL, ctrl);
557
558
559 ret_val = e1000e_config_fc_after_link_up(hw);
560 if (ret_val) {
561 e_dbg("Error configuring flow control\n");
562 return ret_val;
563 }
564 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
565
566
567
568
569
570 e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
571 ew32(TXCW, mac->txcw);
572 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
573
574 mac->serdes_has_link = true;
575 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
576
577
578
579
580
581 usleep_range(10, 20);
582 rxcw = er32(RXCW);
583 if (rxcw & E1000_RXCW_SYNCH) {
584 if (!(rxcw & E1000_RXCW_IV)) {
585 mac->serdes_has_link = true;
586 e_dbg("SERDES: Link up - forced.\n");
587 }
588 } else {
589 mac->serdes_has_link = false;
590 e_dbg("SERDES: Link down - force failed.\n");
591 }
592 }
593
594 if (E1000_TXCW_ANE & er32(TXCW)) {
595 status = er32(STATUS);
596 if (status & E1000_STATUS_LU) {
597
598 usleep_range(10, 20);
599 rxcw = er32(RXCW);
600 if (rxcw & E1000_RXCW_SYNCH) {
601 if (!(rxcw & E1000_RXCW_IV)) {
602 mac->serdes_has_link = true;
603 e_dbg("SERDES: Link up - autoneg completed successfully.\n");
604 } else {
605 mac->serdes_has_link = false;
606 e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n");
607 }
608 } else {
609 mac->serdes_has_link = false;
610 e_dbg("SERDES: Link down - no sync.\n");
611 }
612 } else {
613 mac->serdes_has_link = false;
614 e_dbg("SERDES: Link down - autoneg failed\n");
615 }
616 }
617
618 return 0;
619 }
620
621
622
623
624
625
626
627
628 static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
629 {
630 s32 ret_val;
631 u16 nvm_data;
632
633
634
635
636
637
638
639
640
641 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
642
643 if (ret_val) {
644 e_dbg("NVM Read Error\n");
645 return ret_val;
646 }
647
648 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
649 hw->fc.requested_mode = e1000_fc_none;
650 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
651 hw->fc.requested_mode = e1000_fc_tx_pause;
652 else
653 hw->fc.requested_mode = e1000_fc_full;
654
655 return 0;
656 }
657
658
659
660
661
662
663
664
665
666
667
668 s32 e1000e_setup_link_generic(struct e1000_hw *hw)
669 {
670 s32 ret_val;
671
672
673
674
675 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
676 return 0;
677
678
679
680
681 if (hw->fc.requested_mode == e1000_fc_default) {
682 ret_val = e1000_set_default_fc_generic(hw);
683 if (ret_val)
684 return ret_val;
685 }
686
687
688
689
690 hw->fc.current_mode = hw->fc.requested_mode;
691
692 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
693
694
695 ret_val = hw->mac.ops.setup_physical_interface(hw);
696 if (ret_val)
697 return ret_val;
698
699
700
701
702
703
704 e_dbg("Initializing the Flow Control address, type and timer regs\n");
705 ew32(FCT, FLOW_CONTROL_TYPE);
706 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
707 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
708
709 ew32(FCTTV, hw->fc.pause_time);
710
711 return e1000e_set_fc_watermarks(hw);
712 }
713
714
715
716
717
718
719
720
721 static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
722 {
723 struct e1000_mac_info *mac = &hw->mac;
724 u32 txcw;
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742 switch (hw->fc.current_mode) {
743 case e1000_fc_none:
744
745 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
746 break;
747 case e1000_fc_rx_pause:
748
749
750
751
752
753
754
755 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
756 break;
757 case e1000_fc_tx_pause:
758
759
760
761 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
762 break;
763 case e1000_fc_full:
764
765
766
767 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
768 break;
769 default:
770 e_dbg("Flow control param set incorrectly\n");
771 return -E1000_ERR_CONFIG;
772 }
773
774 ew32(TXCW, txcw);
775 mac->txcw = txcw;
776
777 return 0;
778 }
779
780
781
782
783
784
785
786
787 static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
788 {
789 struct e1000_mac_info *mac = &hw->mac;
790 u32 i, status;
791 s32 ret_val;
792
793
794
795
796
797
798
799 for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
800 usleep_range(10000, 11000);
801 status = er32(STATUS);
802 if (status & E1000_STATUS_LU)
803 break;
804 }
805 if (i == FIBER_LINK_UP_LIMIT) {
806 e_dbg("Never got a valid link from auto-neg!!!\n");
807 mac->autoneg_failed = true;
808
809
810
811
812
813 ret_val = mac->ops.check_for_link(hw);
814 if (ret_val) {
815 e_dbg("Error while checking for link\n");
816 return ret_val;
817 }
818 mac->autoneg_failed = false;
819 } else {
820 mac->autoneg_failed = false;
821 e_dbg("Valid Link Found\n");
822 }
823
824 return 0;
825 }
826
827
828
829
830
831
832
833
834 s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
835 {
836 u32 ctrl;
837 s32 ret_val;
838
839 ctrl = er32(CTRL);
840
841
842 ctrl &= ~E1000_CTRL_LRST;
843
844 hw->mac.ops.config_collision_dist(hw);
845
846 ret_val = e1000_commit_fc_settings_generic(hw);
847 if (ret_val)
848 return ret_val;
849
850
851
852
853
854
855
856 e_dbg("Auto-negotiation enabled\n");
857
858 ew32(CTRL, ctrl);
859 e1e_flush();
860 usleep_range(1000, 2000);
861
862
863
864
865
866 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
867 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
868 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
869 } else {
870 e_dbg("No signal detected\n");
871 }
872
873 return ret_val;
874 }
875
876
877
878
879
880
881
882
883 void e1000e_config_collision_dist_generic(struct e1000_hw *hw)
884 {
885 u32 tctl;
886
887 tctl = er32(TCTL);
888
889 tctl &= ~E1000_TCTL_COLD;
890 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
891
892 ew32(TCTL, tctl);
893 e1e_flush();
894 }
895
896
897
898
899
900
901
902
903
904 s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
905 {
906 u32 fcrtl = 0, fcrth = 0;
907
908
909
910
911
912
913
914 if (hw->fc.current_mode & e1000_fc_tx_pause) {
915
916
917
918
919 fcrtl = hw->fc.low_water;
920 if (hw->fc.send_xon)
921 fcrtl |= E1000_FCRTL_XONE;
922
923 fcrth = hw->fc.high_water;
924 }
925 ew32(FCRTL, fcrtl);
926 ew32(FCRTH, fcrth);
927
928 return 0;
929 }
930
931
932
933
934
935
936
937
938
939
940
941 s32 e1000e_force_mac_fc(struct e1000_hw *hw)
942 {
943 u32 ctrl;
944
945 ctrl = er32(CTRL);
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964 e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
965
966 switch (hw->fc.current_mode) {
967 case e1000_fc_none:
968 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
969 break;
970 case e1000_fc_rx_pause:
971 ctrl &= (~E1000_CTRL_TFCE);
972 ctrl |= E1000_CTRL_RFCE;
973 break;
974 case e1000_fc_tx_pause:
975 ctrl &= (~E1000_CTRL_RFCE);
976 ctrl |= E1000_CTRL_TFCE;
977 break;
978 case e1000_fc_full:
979 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
980 break;
981 default:
982 e_dbg("Flow control param set incorrectly\n");
983 return -E1000_ERR_CONFIG;
984 }
985
986 ew32(CTRL, ctrl);
987
988 return 0;
989 }
990
991
992
993
994
995
996
997
998
999
1000
1001 s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1002 {
1003 struct e1000_mac_info *mac = &hw->mac;
1004 s32 ret_val = 0;
1005 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
1006 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
1007 u16 speed, duplex;
1008
1009
1010
1011
1012
1013 if (mac->autoneg_failed) {
1014 if (hw->phy.media_type == e1000_media_type_fiber ||
1015 hw->phy.media_type == e1000_media_type_internal_serdes)
1016 ret_val = e1000e_force_mac_fc(hw);
1017 } else {
1018 if (hw->phy.media_type == e1000_media_type_copper)
1019 ret_val = e1000e_force_mac_fc(hw);
1020 }
1021
1022 if (ret_val) {
1023 e_dbg("Error forcing flow control settings\n");
1024 return ret_val;
1025 }
1026
1027
1028
1029
1030
1031
1032 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
1033
1034
1035
1036
1037 ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
1038 if (ret_val)
1039 return ret_val;
1040 ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg);
1041 if (ret_val)
1042 return ret_val;
1043
1044 if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) {
1045 e_dbg("Copper PHY and Auto Neg has not completed.\n");
1046 return ret_val;
1047 }
1048
1049
1050
1051
1052
1053
1054
1055 ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg);
1056 if (ret_val)
1057 return ret_val;
1058 ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg);
1059 if (ret_val)
1060 return ret_val;
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095 if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1096 (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) {
1097
1098
1099
1100
1101
1102
1103 if (hw->fc.requested_mode == e1000_fc_full) {
1104 hw->fc.current_mode = e1000_fc_full;
1105 e_dbg("Flow Control = FULL.\n");
1106 } else {
1107 hw->fc.current_mode = e1000_fc_rx_pause;
1108 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1109 }
1110 }
1111
1112
1113
1114
1115
1116
1117
1118 else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1119 (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
1120 (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
1121 (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
1122 hw->fc.current_mode = e1000_fc_tx_pause;
1123 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1124 }
1125
1126
1127
1128
1129
1130
1131
1132 else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) &&
1133 (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) &&
1134 !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) &&
1135 (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) {
1136 hw->fc.current_mode = e1000_fc_rx_pause;
1137 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1138 } else {
1139
1140
1141
1142 hw->fc.current_mode = e1000_fc_none;
1143 e_dbg("Flow Control = NONE.\n");
1144 }
1145
1146
1147
1148
1149
1150 ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
1151 if (ret_val) {
1152 e_dbg("Error getting link speed and duplex\n");
1153 return ret_val;
1154 }
1155
1156 if (duplex == HALF_DUPLEX)
1157 hw->fc.current_mode = e1000_fc_none;
1158
1159
1160
1161
1162 ret_val = e1000e_force_mac_fc(hw);
1163 if (ret_val) {
1164 e_dbg("Error forcing flow control settings\n");
1165 return ret_val;
1166 }
1167 }
1168
1169
1170
1171
1172
1173
1174 if ((hw->phy.media_type == e1000_media_type_internal_serdes) &&
1175 mac->autoneg) {
1176
1177
1178
1179 pcs_status_reg = er32(PCS_LSTAT);
1180
1181 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1182 e_dbg("PCS Auto Neg has not completed.\n");
1183 return ret_val;
1184 }
1185
1186
1187
1188
1189
1190
1191
1192 pcs_adv_reg = er32(PCS_ANADV);
1193 pcs_lp_ability_reg = er32(PCS_LPAB);
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1229 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1230
1231
1232
1233
1234
1235
1236 if (hw->fc.requested_mode == e1000_fc_full) {
1237 hw->fc.current_mode = e1000_fc_full;
1238 e_dbg("Flow Control = FULL.\n");
1239 } else {
1240 hw->fc.current_mode = e1000_fc_rx_pause;
1241 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1242 }
1243 }
1244
1245
1246
1247
1248
1249
1250
1251 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1252 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1253 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1254 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1255 hw->fc.current_mode = e1000_fc_tx_pause;
1256 e_dbg("Flow Control = Tx PAUSE frames only.\n");
1257 }
1258
1259
1260
1261
1262
1263
1264
1265 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1266 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1267 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1268 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1269 hw->fc.current_mode = e1000_fc_rx_pause;
1270 e_dbg("Flow Control = Rx PAUSE frames only.\n");
1271 } else {
1272
1273
1274
1275 hw->fc.current_mode = e1000_fc_none;
1276 e_dbg("Flow Control = NONE.\n");
1277 }
1278
1279
1280
1281
1282 pcs_ctrl_reg = er32(PCS_LCTL);
1283 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1284 ew32(PCS_LCTL, pcs_ctrl_reg);
1285
1286 ret_val = e1000e_force_mac_fc(hw);
1287 if (ret_val) {
1288 e_dbg("Error forcing flow control settings\n");
1289 return ret_val;
1290 }
1291 }
1292
1293 return 0;
1294 }
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305 s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1306 u16 *duplex)
1307 {
1308 u32 status;
1309
1310 status = er32(STATUS);
1311 if (status & E1000_STATUS_SPEED_1000)
1312 *speed = SPEED_1000;
1313 else if (status & E1000_STATUS_SPEED_100)
1314 *speed = SPEED_100;
1315 else
1316 *speed = SPEED_10;
1317
1318 if (status & E1000_STATUS_FD)
1319 *duplex = FULL_DUPLEX;
1320 else
1321 *duplex = HALF_DUPLEX;
1322
1323 e_dbg("%u Mbps, %s Duplex\n",
1324 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
1325 *duplex == FULL_DUPLEX ? "Full" : "Half");
1326
1327 return 0;
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339 s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused
1340 *hw, u16 *speed, u16 *duplex)
1341 {
1342 *speed = SPEED_1000;
1343 *duplex = FULL_DUPLEX;
1344
1345 return 0;
1346 }
1347
1348
1349
1350
1351
1352
1353
1354 s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1355 {
1356 u32 swsm;
1357 s32 timeout = hw->nvm.word_size + 1;
1358 s32 i = 0;
1359
1360
1361 while (i < timeout) {
1362 swsm = er32(SWSM);
1363 if (!(swsm & E1000_SWSM_SMBI))
1364 break;
1365
1366 usleep_range(50, 100);
1367 i++;
1368 }
1369
1370 if (i == timeout) {
1371 e_dbg("Driver can't access device - SMBI bit is set.\n");
1372 return -E1000_ERR_NVM;
1373 }
1374
1375
1376 for (i = 0; i < timeout; i++) {
1377 swsm = er32(SWSM);
1378 ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
1379
1380
1381 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1382 break;
1383
1384 usleep_range(50, 100);
1385 }
1386
1387 if (i == timeout) {
1388
1389 e1000e_put_hw_semaphore(hw);
1390 e_dbg("Driver can't access the NVM\n");
1391 return -E1000_ERR_NVM;
1392 }
1393
1394 return 0;
1395 }
1396
1397
1398
1399
1400
1401
1402
1403 void e1000e_put_hw_semaphore(struct e1000_hw *hw)
1404 {
1405 u32 swsm;
1406
1407 swsm = er32(SWSM);
1408 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1409 ew32(SWSM, swsm);
1410 }
1411
1412
1413
1414
1415
1416
1417
1418 s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
1419 {
1420 s32 i = 0;
1421
1422 while (i < AUTO_READ_DONE_TIMEOUT) {
1423 if (er32(EECD) & E1000_EECD_AUTO_RD)
1424 break;
1425 usleep_range(1000, 2000);
1426 i++;
1427 }
1428
1429 if (i == AUTO_READ_DONE_TIMEOUT) {
1430 e_dbg("Auto read by HW from NVM has not completed.\n");
1431 return -E1000_ERR_RESET;
1432 }
1433
1434 return 0;
1435 }
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445 s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
1446 {
1447 s32 ret_val;
1448
1449 ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
1450 if (ret_val) {
1451 e_dbg("NVM Read Error\n");
1452 return ret_val;
1453 }
1454
1455 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
1456 *data = ID_LED_DEFAULT;
1457
1458 return 0;
1459 }
1460
1461
1462
1463
1464
1465
1466 s32 e1000e_id_led_init_generic(struct e1000_hw *hw)
1467 {
1468 struct e1000_mac_info *mac = &hw->mac;
1469 s32 ret_val;
1470 const u32 ledctl_mask = 0x000000FF;
1471 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1472 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1473 u16 data, i, temp;
1474 const u16 led_mask = 0x0F;
1475
1476 ret_val = hw->nvm.ops.valid_led_default(hw, &data);
1477 if (ret_val)
1478 return ret_val;
1479
1480 mac->ledctl_default = er32(LEDCTL);
1481 mac->ledctl_mode1 = mac->ledctl_default;
1482 mac->ledctl_mode2 = mac->ledctl_default;
1483
1484 for (i = 0; i < 4; i++) {
1485 temp = (data >> (i << 2)) & led_mask;
1486 switch (temp) {
1487 case ID_LED_ON1_DEF2:
1488 case ID_LED_ON1_ON2:
1489 case ID_LED_ON1_OFF2:
1490 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1491 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1492 break;
1493 case ID_LED_OFF1_DEF2:
1494 case ID_LED_OFF1_ON2:
1495 case ID_LED_OFF1_OFF2:
1496 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1497 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1498 break;
1499 default:
1500
1501 break;
1502 }
1503 switch (temp) {
1504 case ID_LED_DEF1_ON2:
1505 case ID_LED_ON1_ON2:
1506 case ID_LED_OFF1_ON2:
1507 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1508 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1509 break;
1510 case ID_LED_DEF1_OFF2:
1511 case ID_LED_ON1_OFF2:
1512 case ID_LED_OFF1_OFF2:
1513 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1514 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1515 break;
1516 default:
1517
1518 break;
1519 }
1520 }
1521
1522 return 0;
1523 }
1524
1525
1526
1527
1528
1529
1530
1531
1532 s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1533 {
1534 u32 ledctl;
1535
1536 if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
1537 return -E1000_ERR_CONFIG;
1538
1539 if (hw->phy.media_type == e1000_media_type_fiber) {
1540 ledctl = er32(LEDCTL);
1541 hw->mac.ledctl_default = ledctl;
1542
1543 ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK |
1544 E1000_LEDCTL_LED0_MODE_MASK);
1545 ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
1546 E1000_LEDCTL_LED0_MODE_SHIFT);
1547 ew32(LEDCTL, ledctl);
1548 } else if (hw->phy.media_type == e1000_media_type_copper) {
1549 ew32(LEDCTL, hw->mac.ledctl_mode1);
1550 }
1551
1552 return 0;
1553 }
1554
1555
1556
1557
1558
1559
1560
1561
1562 s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
1563 {
1564 ew32(LEDCTL, hw->mac.ledctl_default);
1565 return 0;
1566 }
1567
1568
1569
1570
1571
1572
1573
1574 s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1575 {
1576 u32 ledctl_blink = 0;
1577 u32 i;
1578
1579 if (hw->phy.media_type == e1000_media_type_fiber) {
1580
1581 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1582 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1583 } else {
1584
1585
1586
1587
1588
1589
1590 ledctl_blink = hw->mac.ledctl_mode2;
1591 for (i = 0; i < 32; i += 8) {
1592 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1593 E1000_LEDCTL_LED0_MODE_MASK;
1594 u32 led_default = hw->mac.ledctl_default >> i;
1595
1596 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1597 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1598 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1599 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1600 ledctl_blink &=
1601 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1602 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1603 E1000_LEDCTL_MODE_LED_ON) << i;
1604 }
1605 }
1606 }
1607
1608 ew32(LEDCTL, ledctl_blink);
1609
1610 return 0;
1611 }
1612
1613
1614
1615
1616
1617
1618
1619 s32 e1000e_led_on_generic(struct e1000_hw *hw)
1620 {
1621 u32 ctrl;
1622
1623 switch (hw->phy.media_type) {
1624 case e1000_media_type_fiber:
1625 ctrl = er32(CTRL);
1626 ctrl &= ~E1000_CTRL_SWDPIN0;
1627 ctrl |= E1000_CTRL_SWDPIO0;
1628 ew32(CTRL, ctrl);
1629 break;
1630 case e1000_media_type_copper:
1631 ew32(LEDCTL, hw->mac.ledctl_mode2);
1632 break;
1633 default:
1634 break;
1635 }
1636
1637 return 0;
1638 }
1639
1640
1641
1642
1643
1644
1645
1646 s32 e1000e_led_off_generic(struct e1000_hw *hw)
1647 {
1648 u32 ctrl;
1649
1650 switch (hw->phy.media_type) {
1651 case e1000_media_type_fiber:
1652 ctrl = er32(CTRL);
1653 ctrl |= E1000_CTRL_SWDPIN0;
1654 ctrl |= E1000_CTRL_SWDPIO0;
1655 ew32(CTRL, ctrl);
1656 break;
1657 case e1000_media_type_copper:
1658 ew32(LEDCTL, hw->mac.ledctl_mode1);
1659 break;
1660 default:
1661 break;
1662 }
1663
1664 return 0;
1665 }
1666
1667
1668
1669
1670
1671
1672
1673
1674 void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
1675 {
1676 u32 gcr;
1677
1678 if (no_snoop) {
1679 gcr = er32(GCR);
1680 gcr &= ~(PCIE_NO_SNOOP_ALL);
1681 gcr |= no_snoop;
1682 ew32(GCR, gcr);
1683 }
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1698 {
1699 u32 ctrl;
1700 s32 timeout = MASTER_DISABLE_TIMEOUT;
1701
1702 ctrl = er32(CTRL);
1703 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1704 ew32(CTRL, ctrl);
1705
1706 while (timeout) {
1707 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
1708 break;
1709 usleep_range(100, 200);
1710 timeout--;
1711 }
1712
1713 if (!timeout) {
1714 e_dbg("Master requests are pending.\n");
1715 return -E1000_ERR_MASTER_REQUESTS_PENDING;
1716 }
1717
1718 return 0;
1719 }
1720
1721
1722
1723
1724
1725
1726
1727 void e1000e_reset_adaptive(struct e1000_hw *hw)
1728 {
1729 struct e1000_mac_info *mac = &hw->mac;
1730
1731 if (!mac->adaptive_ifs) {
1732 e_dbg("Not in Adaptive IFS mode!\n");
1733 return;
1734 }
1735
1736 mac->current_ifs_val = 0;
1737 mac->ifs_min_val = IFS_MIN;
1738 mac->ifs_max_val = IFS_MAX;
1739 mac->ifs_step_size = IFS_STEP;
1740 mac->ifs_ratio = IFS_RATIO;
1741
1742 mac->in_ifs_mode = false;
1743 ew32(AIT, 0);
1744 }
1745
1746
1747
1748
1749
1750
1751
1752
1753 void e1000e_update_adaptive(struct e1000_hw *hw)
1754 {
1755 struct e1000_mac_info *mac = &hw->mac;
1756
1757 if (!mac->adaptive_ifs) {
1758 e_dbg("Not in Adaptive IFS mode!\n");
1759 return;
1760 }
1761
1762 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
1763 if (mac->tx_packet_delta > MIN_NUM_XMITS) {
1764 mac->in_ifs_mode = true;
1765 if (mac->current_ifs_val < mac->ifs_max_val) {
1766 if (!mac->current_ifs_val)
1767 mac->current_ifs_val = mac->ifs_min_val;
1768 else
1769 mac->current_ifs_val +=
1770 mac->ifs_step_size;
1771 ew32(AIT, mac->current_ifs_val);
1772 }
1773 }
1774 } else {
1775 if (mac->in_ifs_mode &&
1776 (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
1777 mac->current_ifs_val = 0;
1778 mac->in_ifs_mode = false;
1779 ew32(AIT, 0);
1780 }
1781 }
1782 }