This source file includes following definitions.
- igb_get_bus_info_pcie
- igb_clear_vfta
- igb_write_vfta
- igb_init_rx_addrs
- igb_find_vlvf_slot
- igb_vfta_set
- igb_check_alt_mac_addr
- igb_rar_set
- igb_mta_set
- igb_hash_mc_addr
- igb_update_mc_addr_list
- igb_clear_hw_cntrs_base
- igb_check_for_copper_link
- igb_setup_link
- igb_config_collision_dist
- igb_set_fc_watermarks
- igb_set_default_fc
- igb_force_mac_fc
- igb_config_fc_after_link_up
- igb_get_speed_and_duplex_copper
- igb_get_hw_semaphore
- igb_put_hw_semaphore
- igb_get_auto_rd_done
- igb_valid_led_default
- igb_id_led_init
- igb_cleanup_led
- igb_blink_led
- igb_led_off
- igb_disable_pcie_master
- igb_validate_mdi_setting
- igb_write_8bit_ctrl_reg
- igb_enable_mng_pass_thru
1
2
3
4 #include <linux/if_ether.h>
5 #include <linux/delay.h>
6 #include <linux/pci.h>
7 #include <linux/netdevice.h>
8 #include <linux/etherdevice.h>
9
10 #include "e1000_mac.h"
11
12 #include "igb.h"
13
14 static s32 igb_set_default_fc(struct e1000_hw *hw);
15 static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
16
17
18
19
20
21
22
23
24
25 s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
26 {
27 struct e1000_bus_info *bus = &hw->bus;
28 s32 ret_val;
29 u32 reg;
30 u16 pcie_link_status;
31
32 bus->type = e1000_bus_type_pci_express;
33
34 ret_val = igb_read_pcie_cap_reg(hw,
35 PCI_EXP_LNKSTA,
36 &pcie_link_status);
37 if (ret_val) {
38 bus->width = e1000_bus_width_unknown;
39 bus->speed = e1000_bus_speed_unknown;
40 } else {
41 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
42 case PCI_EXP_LNKSTA_CLS_2_5GB:
43 bus->speed = e1000_bus_speed_2500;
44 break;
45 case PCI_EXP_LNKSTA_CLS_5_0GB:
46 bus->speed = e1000_bus_speed_5000;
47 break;
48 default:
49 bus->speed = e1000_bus_speed_unknown;
50 break;
51 }
52
53 bus->width = (enum e1000_bus_width)((pcie_link_status &
54 PCI_EXP_LNKSTA_NLW) >>
55 PCI_EXP_LNKSTA_NLW_SHIFT);
56 }
57
58 reg = rd32(E1000_STATUS);
59 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
60
61 return 0;
62 }
63
64
65
66
67
68
69
70
71 void igb_clear_vfta(struct e1000_hw *hw)
72 {
73 u32 offset;
74
75 for (offset = E1000_VLAN_FILTER_TBL_SIZE; offset--;)
76 hw->mac.ops.write_vfta(hw, offset, 0);
77 }
78
79
80
81
82
83
84
85
86
87
88 void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
89 {
90 struct igb_adapter *adapter = hw->back;
91
92 array_wr32(E1000_VFTA, offset, value);
93 wrfl();
94
95 adapter->shadow_vfta[offset] = value;
96 }
97
98
99
100
101
102
103
104
105
106
107 void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
108 {
109 u32 i;
110 u8 mac_addr[ETH_ALEN] = {0};
111
112
113 hw_dbg("Programming MAC Address into RAR[0]\n");
114
115 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
116
117
118 hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
119 for (i = 1; i < rar_count; i++)
120 hw->mac.ops.rar_set(hw, mac_addr, i);
121 }
122
123
124
125
126
127
128
129
130
131
132 static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
133 {
134 s32 regindex, first_empty_slot;
135 u32 bits;
136
137
138 if (vlan == 0)
139 return 0;
140
141
142
143
144
145 first_empty_slot = vlvf_bypass ? -E1000_ERR_NO_SPACE : 0;
146
147
148
149
150
151
152 for (regindex = E1000_VLVF_ARRAY_SIZE; --regindex > 0;) {
153 bits = rd32(E1000_VLVF(regindex)) & E1000_VLVF_VLANID_MASK;
154 if (bits == vlan)
155 return regindex;
156 if (!first_empty_slot && !bits)
157 first_empty_slot = regindex;
158 }
159
160 return first_empty_slot ? : -E1000_ERR_NO_SPACE;
161 }
162
163
164
165
166
167
168
169
170
171
172
173 s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
174 bool vlan_on, bool vlvf_bypass)
175 {
176 struct igb_adapter *adapter = hw->back;
177 u32 regidx, vfta_delta, vfta, bits;
178 s32 vlvf_index;
179
180 if ((vlan > 4095) || (vind > 7))
181 return -E1000_ERR_PARAM;
182
183
184
185
186
187
188
189
190
191
192
193
194 regidx = vlan / 32;
195 vfta_delta = BIT(vlan % 32);
196 vfta = adapter->shadow_vfta[regidx];
197
198
199
200
201
202 vfta_delta &= vlan_on ? ~vfta : vfta;
203 vfta ^= vfta_delta;
204
205
206
207
208
209
210
211
212
213 if (!adapter->vfs_allocated_count)
214 goto vfta_update;
215
216 vlvf_index = igb_find_vlvf_slot(hw, vlan, vlvf_bypass);
217 if (vlvf_index < 0) {
218 if (vlvf_bypass)
219 goto vfta_update;
220 return vlvf_index;
221 }
222
223 bits = rd32(E1000_VLVF(vlvf_index));
224
225
226 bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
227 if (vlan_on)
228 goto vlvf_update;
229
230
231 bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
232
233 if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
234
235
236
237
238 if (vfta_delta)
239 hw->mac.ops.write_vfta(hw, regidx, vfta);
240
241
242 wr32(E1000_VLVF(vlvf_index), 0);
243
244 return 0;
245 }
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 vfta_delta = 0;
262
263 vlvf_update:
264
265 wr32(E1000_VLVF(vlvf_index), bits | vlan | E1000_VLVF_VLANID_ENABLE);
266
267 vfta_update:
268
269 if (vfta_delta)
270 hw->mac.ops.write_vfta(hw, regidx, vfta);
271
272 return 0;
273 }
274
275
276
277
278
279
280
281
282
283
284
285
286 s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
287 {
288 u32 i;
289 s32 ret_val = 0;
290 u16 offset, nvm_alt_mac_addr_offset, nvm_data;
291 u8 alt_mac_addr[ETH_ALEN];
292
293
294
295
296 if (hw->mac.type >= e1000_82580)
297 goto out;
298
299 ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
300 &nvm_alt_mac_addr_offset);
301 if (ret_val) {
302 hw_dbg("NVM Read Error\n");
303 goto out;
304 }
305
306 if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
307 (nvm_alt_mac_addr_offset == 0x0000))
308
309 goto out;
310
311 if (hw->bus.func == E1000_FUNC_1)
312 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
313 if (hw->bus.func == E1000_FUNC_2)
314 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
315
316 if (hw->bus.func == E1000_FUNC_3)
317 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
318 for (i = 0; i < ETH_ALEN; i += 2) {
319 offset = nvm_alt_mac_addr_offset + (i >> 1);
320 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
321 if (ret_val) {
322 hw_dbg("NVM Read Error\n");
323 goto out;
324 }
325
326 alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
327 alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
328 }
329
330
331 if (is_multicast_ether_addr(alt_mac_addr)) {
332 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
333 goto out;
334 }
335
336
337
338
339
340 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
341
342 out:
343 return ret_val;
344 }
345
346
347
348
349
350
351
352
353
354
355 void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
356 {
357 u32 rar_low, rar_high;
358
359
360
361
362 rar_low = ((u32) addr[0] |
363 ((u32) addr[1] << 8) |
364 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
365
366 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
367
368
369 if (rar_low || rar_high)
370 rar_high |= E1000_RAH_AV;
371
372
373
374
375
376 wr32(E1000_RAL(index), rar_low);
377 wrfl();
378 wr32(E1000_RAH(index), rar_high);
379 wrfl();
380 }
381
382
383
384
385
386
387
388
389
390
391
392 void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
393 {
394 u32 hash_bit, hash_reg, mta;
395
396
397
398
399
400
401
402
403
404
405 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
406 hash_bit = hash_value & 0x1F;
407
408 mta = array_rd32(E1000_MTA, hash_reg);
409
410 mta |= BIT(hash_bit);
411
412 array_wr32(E1000_MTA, hash_reg, mta);
413 wrfl();
414 }
415
416
417
418
419
420
421
422
423
424
425 static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
426 {
427 u32 hash_value, hash_mask;
428 u8 bit_shift = 0;
429
430
431 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
432
433
434
435
436 while (hash_mask >> bit_shift != 0xFF)
437 bit_shift++;
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464 switch (hw->mac.mc_filter_type) {
465 default:
466 case 0:
467 break;
468 case 1:
469 bit_shift += 1;
470 break;
471 case 2:
472 bit_shift += 2;
473 break;
474 case 3:
475 bit_shift += 4;
476 break;
477 }
478
479 hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
480 (((u16) mc_addr[5]) << bit_shift)));
481
482 return hash_value;
483 }
484
485
486
487
488
489
490
491
492
493
494 void igb_update_mc_addr_list(struct e1000_hw *hw,
495 u8 *mc_addr_list, u32 mc_addr_count)
496 {
497 u32 hash_value, hash_bit, hash_reg;
498 int i;
499
500
501 memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
502
503
504 for (i = 0; (u32) i < mc_addr_count; i++) {
505 hash_value = igb_hash_mc_addr(hw, mc_addr_list);
506
507 hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
508 hash_bit = hash_value & 0x1F;
509
510 hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
511 mc_addr_list += (ETH_ALEN);
512 }
513
514
515 for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
516 array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
517 wrfl();
518 }
519
520
521
522
523
524
525
526 void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
527 {
528 rd32(E1000_CRCERRS);
529 rd32(E1000_SYMERRS);
530 rd32(E1000_MPC);
531 rd32(E1000_SCC);
532 rd32(E1000_ECOL);
533 rd32(E1000_MCC);
534 rd32(E1000_LATECOL);
535 rd32(E1000_COLC);
536 rd32(E1000_DC);
537 rd32(E1000_SEC);
538 rd32(E1000_RLEC);
539 rd32(E1000_XONRXC);
540 rd32(E1000_XONTXC);
541 rd32(E1000_XOFFRXC);
542 rd32(E1000_XOFFTXC);
543 rd32(E1000_FCRUC);
544 rd32(E1000_GPRC);
545 rd32(E1000_BPRC);
546 rd32(E1000_MPRC);
547 rd32(E1000_GPTC);
548 rd32(E1000_GORCL);
549 rd32(E1000_GORCH);
550 rd32(E1000_GOTCL);
551 rd32(E1000_GOTCH);
552 rd32(E1000_RNBC);
553 rd32(E1000_RUC);
554 rd32(E1000_RFC);
555 rd32(E1000_ROC);
556 rd32(E1000_RJC);
557 rd32(E1000_TORL);
558 rd32(E1000_TORH);
559 rd32(E1000_TOTL);
560 rd32(E1000_TOTH);
561 rd32(E1000_TPR);
562 rd32(E1000_TPT);
563 rd32(E1000_MPTC);
564 rd32(E1000_BPTC);
565 }
566
567
568
569
570
571
572
573
574
575 s32 igb_check_for_copper_link(struct e1000_hw *hw)
576 {
577 struct e1000_mac_info *mac = &hw->mac;
578 s32 ret_val;
579 bool link;
580
581
582
583
584
585
586 if (!mac->get_link_status) {
587 ret_val = 0;
588 goto out;
589 }
590
591
592
593
594
595 ret_val = igb_phy_has_link(hw, 1, 0, &link);
596 if (ret_val)
597 goto out;
598
599 if (!link)
600 goto out;
601
602 mac->get_link_status = false;
603
604
605
606
607 igb_check_downshift(hw);
608
609
610
611
612 if (!mac->autoneg) {
613 ret_val = -E1000_ERR_CONFIG;
614 goto out;
615 }
616
617
618
619
620
621 igb_config_collision_dist(hw);
622
623
624
625
626
627
628 ret_val = igb_config_fc_after_link_up(hw);
629 if (ret_val)
630 hw_dbg("Error configuring flow control\n");
631
632 out:
633 return ret_val;
634 }
635
636
637
638
639
640
641
642
643
644
645
646 s32 igb_setup_link(struct e1000_hw *hw)
647 {
648 s32 ret_val = 0;
649
650
651
652
653 if (igb_check_reset_block(hw))
654 goto out;
655
656
657
658
659 if (hw->fc.requested_mode == e1000_fc_default) {
660 ret_val = igb_set_default_fc(hw);
661 if (ret_val)
662 goto out;
663 }
664
665
666
667
668
669 hw->fc.current_mode = hw->fc.requested_mode;
670
671 hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
672
673
674 ret_val = hw->mac.ops.setup_physical_interface(hw);
675 if (ret_val)
676 goto out;
677
678
679
680
681
682
683 hw_dbg("Initializing the Flow Control address, type and timer regs\n");
684 wr32(E1000_FCT, FLOW_CONTROL_TYPE);
685 wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
686 wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
687
688 wr32(E1000_FCTTV, hw->fc.pause_time);
689
690 ret_val = igb_set_fc_watermarks(hw);
691
692 out:
693
694 return ret_val;
695 }
696
697
698
699
700
701
702
703
704
705 void igb_config_collision_dist(struct e1000_hw *hw)
706 {
707 u32 tctl;
708
709 tctl = rd32(E1000_TCTL);
710
711 tctl &= ~E1000_TCTL_COLD;
712 tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
713
714 wr32(E1000_TCTL, tctl);
715 wrfl();
716 }
717
718
719
720
721
722
723
724
725
726 static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
727 {
728 s32 ret_val = 0;
729 u32 fcrtl = 0, fcrth = 0;
730
731
732
733
734
735
736
737 if (hw->fc.current_mode & e1000_fc_tx_pause) {
738
739
740
741
742 fcrtl = hw->fc.low_water;
743 if (hw->fc.send_xon)
744 fcrtl |= E1000_FCRTL_XONE;
745
746 fcrth = hw->fc.high_water;
747 }
748 wr32(E1000_FCRTL, fcrtl);
749 wr32(E1000_FCRTH, fcrth);
750
751 return ret_val;
752 }
753
754
755
756
757
758
759
760
761 static s32 igb_set_default_fc(struct e1000_hw *hw)
762 {
763 s32 ret_val = 0;
764 u16 lan_offset;
765 u16 nvm_data;
766
767
768
769
770
771
772
773
774
775 if (hw->mac.type == e1000_i350)
776 lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
777 else
778 lan_offset = 0;
779
780 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG + lan_offset,
781 1, &nvm_data);
782 if (ret_val) {
783 hw_dbg("NVM Read Error\n");
784 goto out;
785 }
786
787 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
788 hw->fc.requested_mode = e1000_fc_none;
789 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
790 hw->fc.requested_mode = e1000_fc_tx_pause;
791 else
792 hw->fc.requested_mode = e1000_fc_full;
793
794 out:
795 return ret_val;
796 }
797
798
799
800
801
802
803
804
805
806
807
808 s32 igb_force_mac_fc(struct e1000_hw *hw)
809 {
810 u32 ctrl;
811 s32 ret_val = 0;
812
813 ctrl = rd32(E1000_CTRL);
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832 hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
833
834 switch (hw->fc.current_mode) {
835 case e1000_fc_none:
836 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
837 break;
838 case e1000_fc_rx_pause:
839 ctrl &= (~E1000_CTRL_TFCE);
840 ctrl |= E1000_CTRL_RFCE;
841 break;
842 case e1000_fc_tx_pause:
843 ctrl &= (~E1000_CTRL_RFCE);
844 ctrl |= E1000_CTRL_TFCE;
845 break;
846 case e1000_fc_full:
847 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
848 break;
849 default:
850 hw_dbg("Flow control param set incorrectly\n");
851 ret_val = -E1000_ERR_CONFIG;
852 goto out;
853 }
854
855 wr32(E1000_CTRL, ctrl);
856
857 out:
858 return ret_val;
859 }
860
861
862
863
864
865
866
867
868
869
870
871 s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
872 {
873 struct e1000_mac_info *mac = &hw->mac;
874 s32 ret_val = 0;
875 u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
876 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
877 u16 speed, duplex;
878
879
880
881
882
883 if (mac->autoneg_failed) {
884 if (hw->phy.media_type == e1000_media_type_internal_serdes)
885 ret_val = igb_force_mac_fc(hw);
886 } else {
887 if (hw->phy.media_type == e1000_media_type_copper)
888 ret_val = igb_force_mac_fc(hw);
889 }
890
891 if (ret_val) {
892 hw_dbg("Error forcing flow control settings\n");
893 goto out;
894 }
895
896
897
898
899
900
901 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
902
903
904
905
906 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
907 &mii_status_reg);
908 if (ret_val)
909 goto out;
910 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
911 &mii_status_reg);
912 if (ret_val)
913 goto out;
914
915 if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
916 hw_dbg("Copper PHY and Auto Neg has not completed.\n");
917 goto out;
918 }
919
920
921
922
923
924
925
926 ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
927 &mii_nway_adv_reg);
928 if (ret_val)
929 goto out;
930 ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
931 &mii_nway_lp_ability_reg);
932 if (ret_val)
933 goto out;
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
969 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
970
971
972
973
974
975
976 if (hw->fc.requested_mode == e1000_fc_full) {
977 hw->fc.current_mode = e1000_fc_full;
978 hw_dbg("Flow Control = FULL.\n");
979 } else {
980 hw->fc.current_mode = e1000_fc_rx_pause;
981 hw_dbg("Flow Control = RX PAUSE frames only.\n");
982 }
983 }
984
985
986
987
988
989
990
991 else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
992 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
993 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
994 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
995 hw->fc.current_mode = e1000_fc_tx_pause;
996 hw_dbg("Flow Control = TX PAUSE frames only.\n");
997 }
998
999
1000
1001
1002
1003
1004
1005 else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1006 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1007 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1008 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1009 hw->fc.current_mode = e1000_fc_rx_pause;
1010 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1011 }
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 else if ((hw->fc.requested_mode == e1000_fc_none) ||
1033 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
1034 (hw->fc.strict_ieee)) {
1035 hw->fc.current_mode = e1000_fc_none;
1036 hw_dbg("Flow Control = NONE.\n");
1037 } else {
1038 hw->fc.current_mode = e1000_fc_rx_pause;
1039 hw_dbg("Flow Control = RX PAUSE frames only.\n");
1040 }
1041
1042
1043
1044
1045
1046 ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
1047 if (ret_val) {
1048 hw_dbg("Error getting link speed and duplex\n");
1049 goto out;
1050 }
1051
1052 if (duplex == HALF_DUPLEX)
1053 hw->fc.current_mode = e1000_fc_none;
1054
1055
1056
1057
1058 ret_val = igb_force_mac_fc(hw);
1059 if (ret_val) {
1060 hw_dbg("Error forcing flow control settings\n");
1061 goto out;
1062 }
1063 }
1064
1065
1066
1067
1068
1069 if ((hw->phy.media_type == e1000_media_type_internal_serdes)
1070 && mac->autoneg) {
1071
1072
1073
1074 pcs_status_reg = rd32(E1000_PCS_LSTAT);
1075
1076 if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
1077 hw_dbg("PCS Auto Neg has not completed.\n");
1078 return ret_val;
1079 }
1080
1081
1082
1083
1084
1085
1086
1087 pcs_adv_reg = rd32(E1000_PCS_ANADV);
1088 pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1124 (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
1125
1126
1127
1128
1129
1130
1131 if (hw->fc.requested_mode == e1000_fc_full) {
1132 hw->fc.current_mode = e1000_fc_full;
1133 hw_dbg("Flow Control = FULL.\n");
1134 } else {
1135 hw->fc.current_mode = e1000_fc_rx_pause;
1136 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1137 }
1138 }
1139
1140
1141
1142
1143
1144
1145
1146 else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
1147 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1148 (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1149 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1150 hw->fc.current_mode = e1000_fc_tx_pause;
1151 hw_dbg("Flow Control = Tx PAUSE frames only.\n");
1152 }
1153
1154
1155
1156
1157
1158
1159
1160 else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
1161 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
1162 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
1163 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
1164 hw->fc.current_mode = e1000_fc_rx_pause;
1165 hw_dbg("Flow Control = Rx PAUSE frames only.\n");
1166 } else {
1167
1168
1169
1170 hw->fc.current_mode = e1000_fc_none;
1171 hw_dbg("Flow Control = NONE.\n");
1172 }
1173
1174
1175
1176
1177 pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
1178 pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1179 wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
1180
1181 ret_val = igb_force_mac_fc(hw);
1182 if (ret_val) {
1183 hw_dbg("Error forcing flow control settings\n");
1184 return ret_val;
1185 }
1186 }
1187
1188 out:
1189 return ret_val;
1190 }
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201 s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
1202 u16 *duplex)
1203 {
1204 u32 status;
1205
1206 status = rd32(E1000_STATUS);
1207 if (status & E1000_STATUS_SPEED_1000) {
1208 *speed = SPEED_1000;
1209 hw_dbg("1000 Mbs, ");
1210 } else if (status & E1000_STATUS_SPEED_100) {
1211 *speed = SPEED_100;
1212 hw_dbg("100 Mbs, ");
1213 } else {
1214 *speed = SPEED_10;
1215 hw_dbg("10 Mbs, ");
1216 }
1217
1218 if (status & E1000_STATUS_FD) {
1219 *duplex = FULL_DUPLEX;
1220 hw_dbg("Full Duplex\n");
1221 } else {
1222 *duplex = HALF_DUPLEX;
1223 hw_dbg("Half Duplex\n");
1224 }
1225
1226 return 0;
1227 }
1228
1229
1230
1231
1232
1233
1234
1235 s32 igb_get_hw_semaphore(struct e1000_hw *hw)
1236 {
1237 u32 swsm;
1238 s32 ret_val = 0;
1239 s32 timeout = hw->nvm.word_size + 1;
1240 s32 i = 0;
1241
1242
1243 while (i < timeout) {
1244 swsm = rd32(E1000_SWSM);
1245 if (!(swsm & E1000_SWSM_SMBI))
1246 break;
1247
1248 udelay(50);
1249 i++;
1250 }
1251
1252 if (i == timeout) {
1253 hw_dbg("Driver can't access device - SMBI bit is set.\n");
1254 ret_val = -E1000_ERR_NVM;
1255 goto out;
1256 }
1257
1258
1259 for (i = 0; i < timeout; i++) {
1260 swsm = rd32(E1000_SWSM);
1261 wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
1262
1263
1264 if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
1265 break;
1266
1267 udelay(50);
1268 }
1269
1270 if (i == timeout) {
1271
1272 igb_put_hw_semaphore(hw);
1273 hw_dbg("Driver can't access the NVM\n");
1274 ret_val = -E1000_ERR_NVM;
1275 goto out;
1276 }
1277
1278 out:
1279 return ret_val;
1280 }
1281
1282
1283
1284
1285
1286
1287
1288 void igb_put_hw_semaphore(struct e1000_hw *hw)
1289 {
1290 u32 swsm;
1291
1292 swsm = rd32(E1000_SWSM);
1293
1294 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
1295
1296 wr32(E1000_SWSM, swsm);
1297 }
1298
1299
1300
1301
1302
1303
1304
1305 s32 igb_get_auto_rd_done(struct e1000_hw *hw)
1306 {
1307 s32 i = 0;
1308 s32 ret_val = 0;
1309
1310
1311 while (i < AUTO_READ_DONE_TIMEOUT) {
1312 if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
1313 break;
1314 usleep_range(1000, 2000);
1315 i++;
1316 }
1317
1318 if (i == AUTO_READ_DONE_TIMEOUT) {
1319 hw_dbg("Auto read by HW from NVM has not completed.\n");
1320 ret_val = -E1000_ERR_RESET;
1321 goto out;
1322 }
1323
1324 out:
1325 return ret_val;
1326 }
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336 static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
1337 {
1338 s32 ret_val;
1339
1340 ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
1341 if (ret_val) {
1342 hw_dbg("NVM Read Error\n");
1343 goto out;
1344 }
1345
1346 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
1347 switch (hw->phy.media_type) {
1348 case e1000_media_type_internal_serdes:
1349 *data = ID_LED_DEFAULT_82575_SERDES;
1350 break;
1351 case e1000_media_type_copper:
1352 default:
1353 *data = ID_LED_DEFAULT;
1354 break;
1355 }
1356 }
1357 out:
1358 return ret_val;
1359 }
1360
1361
1362
1363
1364
1365
1366 s32 igb_id_led_init(struct e1000_hw *hw)
1367 {
1368 struct e1000_mac_info *mac = &hw->mac;
1369 s32 ret_val;
1370 const u32 ledctl_mask = 0x000000FF;
1371 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
1372 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
1373 u16 data, i, temp;
1374 const u16 led_mask = 0x0F;
1375
1376
1377 if ((hw->mac.type == e1000_i210) ||
1378 (hw->mac.type == e1000_i211))
1379 ret_val = igb_valid_led_default_i210(hw, &data);
1380 else
1381 ret_val = igb_valid_led_default(hw, &data);
1382
1383 if (ret_val)
1384 goto out;
1385
1386 mac->ledctl_default = rd32(E1000_LEDCTL);
1387 mac->ledctl_mode1 = mac->ledctl_default;
1388 mac->ledctl_mode2 = mac->ledctl_default;
1389
1390 for (i = 0; i < 4; i++) {
1391 temp = (data >> (i << 2)) & led_mask;
1392 switch (temp) {
1393 case ID_LED_ON1_DEF2:
1394 case ID_LED_ON1_ON2:
1395 case ID_LED_ON1_OFF2:
1396 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1397 mac->ledctl_mode1 |= ledctl_on << (i << 3);
1398 break;
1399 case ID_LED_OFF1_DEF2:
1400 case ID_LED_OFF1_ON2:
1401 case ID_LED_OFF1_OFF2:
1402 mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
1403 mac->ledctl_mode1 |= ledctl_off << (i << 3);
1404 break;
1405 default:
1406
1407 break;
1408 }
1409 switch (temp) {
1410 case ID_LED_DEF1_ON2:
1411 case ID_LED_ON1_ON2:
1412 case ID_LED_OFF1_ON2:
1413 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1414 mac->ledctl_mode2 |= ledctl_on << (i << 3);
1415 break;
1416 case ID_LED_DEF1_OFF2:
1417 case ID_LED_ON1_OFF2:
1418 case ID_LED_OFF1_OFF2:
1419 mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
1420 mac->ledctl_mode2 |= ledctl_off << (i << 3);
1421 break;
1422 default:
1423
1424 break;
1425 }
1426 }
1427
1428 out:
1429 return ret_val;
1430 }
1431
1432
1433
1434
1435
1436
1437
1438
1439 s32 igb_cleanup_led(struct e1000_hw *hw)
1440 {
1441 wr32(E1000_LEDCTL, hw->mac.ledctl_default);
1442 return 0;
1443 }
1444
1445
1446
1447
1448
1449
1450
1451 s32 igb_blink_led(struct e1000_hw *hw)
1452 {
1453 u32 ledctl_blink = 0;
1454 u32 i;
1455
1456 if (hw->phy.media_type == e1000_media_type_fiber) {
1457
1458 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1459 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1460 } else {
1461
1462
1463
1464
1465
1466
1467 ledctl_blink = hw->mac.ledctl_mode2;
1468 for (i = 0; i < 32; i += 8) {
1469 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1470 E1000_LEDCTL_LED0_MODE_MASK;
1471 u32 led_default = hw->mac.ledctl_default >> i;
1472
1473 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1474 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1475 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1476 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1477 ledctl_blink &=
1478 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1479 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1480 E1000_LEDCTL_MODE_LED_ON) << i;
1481 }
1482 }
1483 }
1484
1485 wr32(E1000_LEDCTL, ledctl_blink);
1486
1487 return 0;
1488 }
1489
1490
1491
1492
1493
1494
1495
1496 s32 igb_led_off(struct e1000_hw *hw)
1497 {
1498 switch (hw->phy.media_type) {
1499 case e1000_media_type_copper:
1500 wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
1501 break;
1502 default:
1503 break;
1504 }
1505
1506 return 0;
1507 }
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520 s32 igb_disable_pcie_master(struct e1000_hw *hw)
1521 {
1522 u32 ctrl;
1523 s32 timeout = MASTER_DISABLE_TIMEOUT;
1524 s32 ret_val = 0;
1525
1526 if (hw->bus.type != e1000_bus_type_pci_express)
1527 goto out;
1528
1529 ctrl = rd32(E1000_CTRL);
1530 ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
1531 wr32(E1000_CTRL, ctrl);
1532
1533 while (timeout) {
1534 if (!(rd32(E1000_STATUS) &
1535 E1000_STATUS_GIO_MASTER_ENABLE))
1536 break;
1537 udelay(100);
1538 timeout--;
1539 }
1540
1541 if (!timeout) {
1542 hw_dbg("Master requests are pending.\n");
1543 ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
1544 goto out;
1545 }
1546
1547 out:
1548 return ret_val;
1549 }
1550
1551
1552
1553
1554
1555
1556
1557
1558 s32 igb_validate_mdi_setting(struct e1000_hw *hw)
1559 {
1560 s32 ret_val = 0;
1561
1562
1563 if (hw->mac.type >= e1000_82580)
1564 goto out;
1565
1566 if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
1567 hw_dbg("Invalid MDI setting detected\n");
1568 hw->phy.mdix = 1;
1569 ret_val = -E1000_ERR_CONFIG;
1570 goto out;
1571 }
1572
1573 out:
1574 return ret_val;
1575 }
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588 s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
1589 u32 offset, u8 data)
1590 {
1591 u32 i, regvalue = 0;
1592 s32 ret_val = 0;
1593
1594
1595 regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
1596 wr32(reg, regvalue);
1597
1598
1599 for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
1600 udelay(5);
1601 regvalue = rd32(reg);
1602 if (regvalue & E1000_GEN_CTL_READY)
1603 break;
1604 }
1605 if (!(regvalue & E1000_GEN_CTL_READY)) {
1606 hw_dbg("Reg %08x did not indicate ready\n", reg);
1607 ret_val = -E1000_ERR_PHY;
1608 goto out;
1609 }
1610
1611 out:
1612 return ret_val;
1613 }
1614
1615
1616
1617
1618
1619
1620
1621
1622 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
1623 {
1624 u32 manc;
1625 u32 fwsm, factps;
1626 bool ret_val = false;
1627
1628 if (!hw->mac.asf_firmware_present)
1629 goto out;
1630
1631 manc = rd32(E1000_MANC);
1632
1633 if (!(manc & E1000_MANC_RCV_TCO_EN))
1634 goto out;
1635
1636 if (hw->mac.arc_subsystem_valid) {
1637 fwsm = rd32(E1000_FWSM);
1638 factps = rd32(E1000_FACTPS);
1639
1640 if (!(factps & E1000_FACTPS_MNGCG) &&
1641 ((fwsm & E1000_FWSM_MODE_MASK) ==
1642 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
1643 ret_val = true;
1644 goto out;
1645 }
1646 } else {
1647 if ((manc & E1000_MANC_SMBUS_EN) &&
1648 !(manc & E1000_MANC_ASF_EN)) {
1649 ret_val = true;
1650 goto out;
1651 }
1652 }
1653
1654 out:
1655 return ret_val;
1656 }