1 /* Intel(R) Gigabit Ethernet Linux driver
2 * Copyright(c) 2007-2015 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * The full GNU General Public License is included in this distribution in
17 * the file called "COPYING".
18 *
19 * Contact Information:
20 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
21 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 */
23
24 /* e1000_82575
25 * e1000_82576
26 */
27
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30 #include <linux/types.h>
31 #include <linux/if_ether.h>
32 #include <linux/i2c.h>
33
34 #include "e1000_mac.h"
35 #include "e1000_82575.h"
36 #include "e1000_i210.h"
37
38 static s32 igb_get_invariants_82575(struct e1000_hw *);
39 static s32 igb_acquire_phy_82575(struct e1000_hw *);
40 static void igb_release_phy_82575(struct e1000_hw *);
41 static s32 igb_acquire_nvm_82575(struct e1000_hw *);
42 static void igb_release_nvm_82575(struct e1000_hw *);
43 static s32 igb_check_for_link_82575(struct e1000_hw *);
44 static s32 igb_get_cfg_done_82575(struct e1000_hw *);
45 static s32 igb_init_hw_82575(struct e1000_hw *);
46 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
47 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
48 static s32 igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
49 static s32 igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
50 static s32 igb_reset_hw_82575(struct e1000_hw *);
51 static s32 igb_reset_hw_82580(struct e1000_hw *);
52 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
53 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
54 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
55 static s32 igb_setup_copper_link_82575(struct e1000_hw *);
56 static s32 igb_setup_serdes_link_82575(struct e1000_hw *);
57 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
58 static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
59 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
60 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
61 u16 *);
62 static s32 igb_get_phy_id_82575(struct e1000_hw *);
63 static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
64 static bool igb_sgmii_active_82575(struct e1000_hw *);
65 static s32 igb_reset_init_script_82575(struct e1000_hw *);
66 static s32 igb_read_mac_addr_82575(struct e1000_hw *);
67 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw);
68 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw);
69 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
70 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
71 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
72 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
73 static const u16 e1000_82580_rxpbs_table[] = {
74 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
75
76 /**
77 * igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
78 * @hw: pointer to the HW structure
79 *
80 * Called to determine if the I2C pins are being used for I2C or as an
81 * external MDIO interface since the two options are mutually exclusive.
82 **/
igb_sgmii_uses_mdio_82575(struct e1000_hw * hw)83 static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
84 {
85 u32 reg = 0;
86 bool ext_mdio = false;
87
88 switch (hw->mac.type) {
89 case e1000_82575:
90 case e1000_82576:
91 reg = rd32(E1000_MDIC);
92 ext_mdio = !!(reg & E1000_MDIC_DEST);
93 break;
94 case e1000_82580:
95 case e1000_i350:
96 case e1000_i354:
97 case e1000_i210:
98 case e1000_i211:
99 reg = rd32(E1000_MDICNFG);
100 ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
101 break;
102 default:
103 break;
104 }
105 return ext_mdio;
106 }
107
108 /**
109 * igb_check_for_link_media_swap - Check which M88E1112 interface linked
110 * @hw: pointer to the HW structure
111 *
112 * Poll the M88E1112 interfaces to see which interface achieved link.
113 */
igb_check_for_link_media_swap(struct e1000_hw * hw)114 static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
115 {
116 struct e1000_phy_info *phy = &hw->phy;
117 s32 ret_val;
118 u16 data;
119 u8 port = 0;
120
121 /* Check the copper medium. */
122 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
123 if (ret_val)
124 return ret_val;
125
126 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
127 if (ret_val)
128 return ret_val;
129
130 if (data & E1000_M88E1112_STATUS_LINK)
131 port = E1000_MEDIA_PORT_COPPER;
132
133 /* Check the other medium. */
134 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
135 if (ret_val)
136 return ret_val;
137
138 ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
139 if (ret_val)
140 return ret_val;
141
142
143 if (data & E1000_M88E1112_STATUS_LINK)
144 port = E1000_MEDIA_PORT_OTHER;
145
146 /* Determine if a swap needs to happen. */
147 if (port && (hw->dev_spec._82575.media_port != port)) {
148 hw->dev_spec._82575.media_port = port;
149 hw->dev_spec._82575.media_changed = true;
150 }
151
152 if (port == E1000_MEDIA_PORT_COPPER) {
153 /* reset page to 0 */
154 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
155 if (ret_val)
156 return ret_val;
157 igb_check_for_link_82575(hw);
158 } else {
159 igb_check_for_link_82575(hw);
160 /* reset page to 0 */
161 ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
162 if (ret_val)
163 return ret_val;
164 }
165
166 return 0;
167 }
168
169 /**
170 * igb_init_phy_params_82575 - Init PHY func ptrs.
171 * @hw: pointer to the HW structure
172 **/
igb_init_phy_params_82575(struct e1000_hw * hw)173 static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
174 {
175 struct e1000_phy_info *phy = &hw->phy;
176 s32 ret_val = 0;
177 u32 ctrl_ext;
178
179 if (hw->phy.media_type != e1000_media_type_copper) {
180 phy->type = e1000_phy_none;
181 goto out;
182 }
183
184 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
185 phy->reset_delay_us = 100;
186
187 ctrl_ext = rd32(E1000_CTRL_EXT);
188
189 if (igb_sgmii_active_82575(hw)) {
190 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
191 ctrl_ext |= E1000_CTRL_I2C_ENA;
192 } else {
193 phy->ops.reset = igb_phy_hw_reset;
194 ctrl_ext &= ~E1000_CTRL_I2C_ENA;
195 }
196
197 wr32(E1000_CTRL_EXT, ctrl_ext);
198 igb_reset_mdicnfg_82580(hw);
199
200 if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
201 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
202 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
203 } else {
204 switch (hw->mac.type) {
205 case e1000_82580:
206 case e1000_i350:
207 case e1000_i354:
208 phy->ops.read_reg = igb_read_phy_reg_82580;
209 phy->ops.write_reg = igb_write_phy_reg_82580;
210 break;
211 case e1000_i210:
212 case e1000_i211:
213 phy->ops.read_reg = igb_read_phy_reg_gs40g;
214 phy->ops.write_reg = igb_write_phy_reg_gs40g;
215 break;
216 default:
217 phy->ops.read_reg = igb_read_phy_reg_igp;
218 phy->ops.write_reg = igb_write_phy_reg_igp;
219 }
220 }
221
222 /* set lan id */
223 hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
224 E1000_STATUS_FUNC_SHIFT;
225
226 /* Set phy->phy_addr and phy->id. */
227 ret_val = igb_get_phy_id_82575(hw);
228 if (ret_val)
229 return ret_val;
230
231 /* Verify phy id and set remaining function pointers */
232 switch (phy->id) {
233 case M88E1543_E_PHY_ID:
234 case M88E1512_E_PHY_ID:
235 case I347AT4_E_PHY_ID:
236 case M88E1112_E_PHY_ID:
237 case M88E1111_I_PHY_ID:
238 phy->type = e1000_phy_m88;
239 phy->ops.check_polarity = igb_check_polarity_m88;
240 phy->ops.get_phy_info = igb_get_phy_info_m88;
241 if (phy->id != M88E1111_I_PHY_ID)
242 phy->ops.get_cable_length =
243 igb_get_cable_length_m88_gen2;
244 else
245 phy->ops.get_cable_length = igb_get_cable_length_m88;
246 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
247 /* Check if this PHY is configured for media swap. */
248 if (phy->id == M88E1112_E_PHY_ID) {
249 u16 data;
250
251 ret_val = phy->ops.write_reg(hw,
252 E1000_M88E1112_PAGE_ADDR,
253 2);
254 if (ret_val)
255 goto out;
256
257 ret_val = phy->ops.read_reg(hw,
258 E1000_M88E1112_MAC_CTRL_1,
259 &data);
260 if (ret_val)
261 goto out;
262
263 data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
264 E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
265 if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
266 data == E1000_M88E1112_AUTO_COPPER_BASEX)
267 hw->mac.ops.check_for_link =
268 igb_check_for_link_media_swap;
269 }
270 if (phy->id == M88E1512_E_PHY_ID) {
271 ret_val = igb_initialize_M88E1512_phy(hw);
272 if (ret_val)
273 goto out;
274 }
275 break;
276 case IGP03E1000_E_PHY_ID:
277 phy->type = e1000_phy_igp_3;
278 phy->ops.get_phy_info = igb_get_phy_info_igp;
279 phy->ops.get_cable_length = igb_get_cable_length_igp_2;
280 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
281 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
282 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
283 break;
284 case I82580_I_PHY_ID:
285 case I350_I_PHY_ID:
286 phy->type = e1000_phy_82580;
287 phy->ops.force_speed_duplex =
288 igb_phy_force_speed_duplex_82580;
289 phy->ops.get_cable_length = igb_get_cable_length_82580;
290 phy->ops.get_phy_info = igb_get_phy_info_82580;
291 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
292 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
293 break;
294 case I210_I_PHY_ID:
295 phy->type = e1000_phy_i210;
296 phy->ops.check_polarity = igb_check_polarity_m88;
297 phy->ops.get_phy_info = igb_get_phy_info_m88;
298 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
299 phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
300 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
301 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
302 break;
303 default:
304 ret_val = -E1000_ERR_PHY;
305 goto out;
306 }
307
308 out:
309 return ret_val;
310 }
311
312 /**
313 * igb_init_nvm_params_82575 - Init NVM func ptrs.
314 * @hw: pointer to the HW structure
315 **/
igb_init_nvm_params_82575(struct e1000_hw * hw)316 static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
317 {
318 struct e1000_nvm_info *nvm = &hw->nvm;
319 u32 eecd = rd32(E1000_EECD);
320 u16 size;
321
322 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
323 E1000_EECD_SIZE_EX_SHIFT);
324
325 /* Added to a constant, "size" becomes the left-shift value
326 * for setting word_size.
327 */
328 size += NVM_WORD_SIZE_BASE_SHIFT;
329
330 /* Just in case size is out of range, cap it to the largest
331 * EEPROM size supported
332 */
333 if (size > 15)
334 size = 15;
335
336 nvm->word_size = 1 << size;
337 nvm->opcode_bits = 8;
338 nvm->delay_usec = 1;
339
340 switch (nvm->override) {
341 case e1000_nvm_override_spi_large:
342 nvm->page_size = 32;
343 nvm->address_bits = 16;
344 break;
345 case e1000_nvm_override_spi_small:
346 nvm->page_size = 8;
347 nvm->address_bits = 8;
348 break;
349 default:
350 nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
351 nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
352 16 : 8;
353 break;
354 }
355 if (nvm->word_size == (1 << 15))
356 nvm->page_size = 128;
357
358 nvm->type = e1000_nvm_eeprom_spi;
359
360 /* NVM Function Pointers */
361 nvm->ops.acquire = igb_acquire_nvm_82575;
362 nvm->ops.release = igb_release_nvm_82575;
363 nvm->ops.write = igb_write_nvm_spi;
364 nvm->ops.validate = igb_validate_nvm_checksum;
365 nvm->ops.update = igb_update_nvm_checksum;
366 if (nvm->word_size < (1 << 15))
367 nvm->ops.read = igb_read_nvm_eerd;
368 else
369 nvm->ops.read = igb_read_nvm_spi;
370
371 /* override generic family function pointers for specific descendants */
372 switch (hw->mac.type) {
373 case e1000_82580:
374 nvm->ops.validate = igb_validate_nvm_checksum_82580;
375 nvm->ops.update = igb_update_nvm_checksum_82580;
376 break;
377 case e1000_i354:
378 case e1000_i350:
379 nvm->ops.validate = igb_validate_nvm_checksum_i350;
380 nvm->ops.update = igb_update_nvm_checksum_i350;
381 break;
382 default:
383 break;
384 }
385
386 return 0;
387 }
388
389 /**
390 * igb_init_mac_params_82575 - Init MAC func ptrs.
391 * @hw: pointer to the HW structure
392 **/
igb_init_mac_params_82575(struct e1000_hw * hw)393 static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
394 {
395 struct e1000_mac_info *mac = &hw->mac;
396 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
397
398 /* Set mta register count */
399 mac->mta_reg_count = 128;
400 /* Set rar entry count */
401 switch (mac->type) {
402 case e1000_82576:
403 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
404 break;
405 case e1000_82580:
406 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
407 break;
408 case e1000_i350:
409 case e1000_i354:
410 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
411 break;
412 default:
413 mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
414 break;
415 }
416 /* reset */
417 if (mac->type >= e1000_82580)
418 mac->ops.reset_hw = igb_reset_hw_82580;
419 else
420 mac->ops.reset_hw = igb_reset_hw_82575;
421
422 if (mac->type >= e1000_i210) {
423 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
424 mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
425
426 } else {
427 mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
428 mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
429 }
430
431 /* Set if part includes ASF firmware */
432 mac->asf_firmware_present = true;
433 /* Set if manageability features are enabled. */
434 mac->arc_subsystem_valid =
435 (rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
436 ? true : false;
437 /* enable EEE on i350 parts and later parts */
438 if (mac->type >= e1000_i350)
439 dev_spec->eee_disable = false;
440 else
441 dev_spec->eee_disable = true;
442 /* Allow a single clear of the SW semaphore on I210 and newer */
443 if (mac->type >= e1000_i210)
444 dev_spec->clear_semaphore_once = true;
445 /* physical interface link setup */
446 mac->ops.setup_physical_interface =
447 (hw->phy.media_type == e1000_media_type_copper)
448 ? igb_setup_copper_link_82575
449 : igb_setup_serdes_link_82575;
450
451 if (mac->type == e1000_82580) {
452 switch (hw->device_id) {
453 /* feature not supported on these id's */
454 case E1000_DEV_ID_DH89XXCC_SGMII:
455 case E1000_DEV_ID_DH89XXCC_SERDES:
456 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
457 case E1000_DEV_ID_DH89XXCC_SFP:
458 break;
459 default:
460 hw->dev_spec._82575.mas_capable = true;
461 break;
462 }
463 }
464 return 0;
465 }
466
467 /**
468 * igb_set_sfp_media_type_82575 - derives SFP module media type.
469 * @hw: pointer to the HW structure
470 *
471 * The media type is chosen based on SFP module.
472 * compatibility flags retrieved from SFP ID EEPROM.
473 **/
igb_set_sfp_media_type_82575(struct e1000_hw * hw)474 static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
475 {
476 s32 ret_val = E1000_ERR_CONFIG;
477 u32 ctrl_ext = 0;
478 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
479 struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
480 u8 tranceiver_type = 0;
481 s32 timeout = 3;
482
483 /* Turn I2C interface ON and power on sfp cage */
484 ctrl_ext = rd32(E1000_CTRL_EXT);
485 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
486 wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
487
488 wrfl();
489
490 /* Read SFP module data */
491 while (timeout) {
492 ret_val = igb_read_sfp_data_byte(hw,
493 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
494 &tranceiver_type);
495 if (ret_val == 0)
496 break;
497 msleep(100);
498 timeout--;
499 }
500 if (ret_val != 0)
501 goto out;
502
503 ret_val = igb_read_sfp_data_byte(hw,
504 E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
505 (u8 *)eth_flags);
506 if (ret_val != 0)
507 goto out;
508
509 /* Check if there is some SFP module plugged and powered */
510 if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
511 (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
512 dev_spec->module_plugged = true;
513 if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
514 hw->phy.media_type = e1000_media_type_internal_serdes;
515 } else if (eth_flags->e100_base_fx) {
516 dev_spec->sgmii_active = true;
517 hw->phy.media_type = e1000_media_type_internal_serdes;
518 } else if (eth_flags->e1000_base_t) {
519 dev_spec->sgmii_active = true;
520 hw->phy.media_type = e1000_media_type_copper;
521 } else {
522 hw->phy.media_type = e1000_media_type_unknown;
523 hw_dbg("PHY module has not been recognized\n");
524 goto out;
525 }
526 } else {
527 hw->phy.media_type = e1000_media_type_unknown;
528 }
529 ret_val = 0;
530 out:
531 /* Restore I2C interface setting */
532 wr32(E1000_CTRL_EXT, ctrl_ext);
533 return ret_val;
534 }
535
igb_get_invariants_82575(struct e1000_hw * hw)536 static s32 igb_get_invariants_82575(struct e1000_hw *hw)
537 {
538 struct e1000_mac_info *mac = &hw->mac;
539 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
540 s32 ret_val;
541 u32 ctrl_ext = 0;
542 u32 link_mode = 0;
543
544 switch (hw->device_id) {
545 case E1000_DEV_ID_82575EB_COPPER:
546 case E1000_DEV_ID_82575EB_FIBER_SERDES:
547 case E1000_DEV_ID_82575GB_QUAD_COPPER:
548 mac->type = e1000_82575;
549 break;
550 case E1000_DEV_ID_82576:
551 case E1000_DEV_ID_82576_NS:
552 case E1000_DEV_ID_82576_NS_SERDES:
553 case E1000_DEV_ID_82576_FIBER:
554 case E1000_DEV_ID_82576_SERDES:
555 case E1000_DEV_ID_82576_QUAD_COPPER:
556 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
557 case E1000_DEV_ID_82576_SERDES_QUAD:
558 mac->type = e1000_82576;
559 break;
560 case E1000_DEV_ID_82580_COPPER:
561 case E1000_DEV_ID_82580_FIBER:
562 case E1000_DEV_ID_82580_QUAD_FIBER:
563 case E1000_DEV_ID_82580_SERDES:
564 case E1000_DEV_ID_82580_SGMII:
565 case E1000_DEV_ID_82580_COPPER_DUAL:
566 case E1000_DEV_ID_DH89XXCC_SGMII:
567 case E1000_DEV_ID_DH89XXCC_SERDES:
568 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
569 case E1000_DEV_ID_DH89XXCC_SFP:
570 mac->type = e1000_82580;
571 break;
572 case E1000_DEV_ID_I350_COPPER:
573 case E1000_DEV_ID_I350_FIBER:
574 case E1000_DEV_ID_I350_SERDES:
575 case E1000_DEV_ID_I350_SGMII:
576 mac->type = e1000_i350;
577 break;
578 case E1000_DEV_ID_I210_COPPER:
579 case E1000_DEV_ID_I210_FIBER:
580 case E1000_DEV_ID_I210_SERDES:
581 case E1000_DEV_ID_I210_SGMII:
582 case E1000_DEV_ID_I210_COPPER_FLASHLESS:
583 case E1000_DEV_ID_I210_SERDES_FLASHLESS:
584 mac->type = e1000_i210;
585 break;
586 case E1000_DEV_ID_I211_COPPER:
587 mac->type = e1000_i211;
588 break;
589 case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
590 case E1000_DEV_ID_I354_SGMII:
591 case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
592 mac->type = e1000_i354;
593 break;
594 default:
595 return -E1000_ERR_MAC_INIT;
596 }
597
598 /* Set media type */
599 /* The 82575 uses bits 22:23 for link mode. The mode can be changed
600 * based on the EEPROM. We cannot rely upon device ID. There
601 * is no distinguishable difference between fiber and internal
602 * SerDes mode on the 82575. There can be an external PHY attached
603 * on the SGMII interface. For this, we'll set sgmii_active to true.
604 */
605 hw->phy.media_type = e1000_media_type_copper;
606 dev_spec->sgmii_active = false;
607 dev_spec->module_plugged = false;
608
609 ctrl_ext = rd32(E1000_CTRL_EXT);
610
611 link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
612 switch (link_mode) {
613 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
614 hw->phy.media_type = e1000_media_type_internal_serdes;
615 break;
616 case E1000_CTRL_EXT_LINK_MODE_SGMII:
617 /* Get phy control interface type set (MDIO vs. I2C)*/
618 if (igb_sgmii_uses_mdio_82575(hw)) {
619 hw->phy.media_type = e1000_media_type_copper;
620 dev_spec->sgmii_active = true;
621 break;
622 }
623 /* fall through for I2C based SGMII */
624 case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
625 /* read media type from SFP EEPROM */
626 ret_val = igb_set_sfp_media_type_82575(hw);
627 if ((ret_val != 0) ||
628 (hw->phy.media_type == e1000_media_type_unknown)) {
629 /* If media type was not identified then return media
630 * type defined by the CTRL_EXT settings.
631 */
632 hw->phy.media_type = e1000_media_type_internal_serdes;
633
634 if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
635 hw->phy.media_type = e1000_media_type_copper;
636 dev_spec->sgmii_active = true;
637 }
638
639 break;
640 }
641
642 /* do not change link mode for 100BaseFX */
643 if (dev_spec->eth_flags.e100_base_fx)
644 break;
645
646 /* change current link mode setting */
647 ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
648
649 if (hw->phy.media_type == e1000_media_type_copper)
650 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
651 else
652 ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
653
654 wr32(E1000_CTRL_EXT, ctrl_ext);
655
656 break;
657 default:
658 break;
659 }
660
661 /* mac initialization and operations */
662 ret_val = igb_init_mac_params_82575(hw);
663 if (ret_val)
664 goto out;
665
666 /* NVM initialization */
667 ret_val = igb_init_nvm_params_82575(hw);
668 switch (hw->mac.type) {
669 case e1000_i210:
670 case e1000_i211:
671 ret_val = igb_init_nvm_params_i210(hw);
672 break;
673 default:
674 break;
675 }
676
677 if (ret_val)
678 goto out;
679
680 /* if part supports SR-IOV then initialize mailbox parameters */
681 switch (mac->type) {
682 case e1000_82576:
683 case e1000_i350:
684 igb_init_mbx_params_pf(hw);
685 break;
686 default:
687 break;
688 }
689
690 /* setup PHY parameters */
691 ret_val = igb_init_phy_params_82575(hw);
692
693 out:
694 return ret_val;
695 }
696
697 /**
698 * igb_acquire_phy_82575 - Acquire rights to access PHY
699 * @hw: pointer to the HW structure
700 *
701 * Acquire access rights to the correct PHY. This is a
702 * function pointer entry point called by the api module.
703 **/
igb_acquire_phy_82575(struct e1000_hw * hw)704 static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
705 {
706 u16 mask = E1000_SWFW_PHY0_SM;
707
708 if (hw->bus.func == E1000_FUNC_1)
709 mask = E1000_SWFW_PHY1_SM;
710 else if (hw->bus.func == E1000_FUNC_2)
711 mask = E1000_SWFW_PHY2_SM;
712 else if (hw->bus.func == E1000_FUNC_3)
713 mask = E1000_SWFW_PHY3_SM;
714
715 return hw->mac.ops.acquire_swfw_sync(hw, mask);
716 }
717
718 /**
719 * igb_release_phy_82575 - Release rights to access PHY
720 * @hw: pointer to the HW structure
721 *
722 * A wrapper to release access rights to the correct PHY. This is a
723 * function pointer entry point called by the api module.
724 **/
igb_release_phy_82575(struct e1000_hw * hw)725 static void igb_release_phy_82575(struct e1000_hw *hw)
726 {
727 u16 mask = E1000_SWFW_PHY0_SM;
728
729 if (hw->bus.func == E1000_FUNC_1)
730 mask = E1000_SWFW_PHY1_SM;
731 else if (hw->bus.func == E1000_FUNC_2)
732 mask = E1000_SWFW_PHY2_SM;
733 else if (hw->bus.func == E1000_FUNC_3)
734 mask = E1000_SWFW_PHY3_SM;
735
736 hw->mac.ops.release_swfw_sync(hw, mask);
737 }
738
739 /**
740 * igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
741 * @hw: pointer to the HW structure
742 * @offset: register offset to be read
743 * @data: pointer to the read data
744 *
745 * Reads the PHY register at offset using the serial gigabit media independent
746 * interface and stores the retrieved information in data.
747 **/
igb_read_phy_reg_sgmii_82575(struct e1000_hw * hw,u32 offset,u16 * data)748 static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
749 u16 *data)
750 {
751 s32 ret_val = -E1000_ERR_PARAM;
752
753 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
754 hw_dbg("PHY Address %u is out of range\n", offset);
755 goto out;
756 }
757
758 ret_val = hw->phy.ops.acquire(hw);
759 if (ret_val)
760 goto out;
761
762 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
763
764 hw->phy.ops.release(hw);
765
766 out:
767 return ret_val;
768 }
769
770 /**
771 * igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
772 * @hw: pointer to the HW structure
773 * @offset: register offset to write to
774 * @data: data to write at register offset
775 *
776 * Writes the data to PHY register at the offset using the serial gigabit
777 * media independent interface.
778 **/
igb_write_phy_reg_sgmii_82575(struct e1000_hw * hw,u32 offset,u16 data)779 static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
780 u16 data)
781 {
782 s32 ret_val = -E1000_ERR_PARAM;
783
784
785 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
786 hw_dbg("PHY Address %d is out of range\n", offset);
787 goto out;
788 }
789
790 ret_val = hw->phy.ops.acquire(hw);
791 if (ret_val)
792 goto out;
793
794 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
795
796 hw->phy.ops.release(hw);
797
798 out:
799 return ret_val;
800 }
801
802 /**
803 * igb_get_phy_id_82575 - Retrieve PHY addr and id
804 * @hw: pointer to the HW structure
805 *
806 * Retrieves the PHY address and ID for both PHY's which do and do not use
807 * sgmi interface.
808 **/
igb_get_phy_id_82575(struct e1000_hw * hw)809 static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
810 {
811 struct e1000_phy_info *phy = &hw->phy;
812 s32 ret_val = 0;
813 u16 phy_id;
814 u32 ctrl_ext;
815 u32 mdic;
816
817 /* Extra read required for some PHY's on i354 */
818 if (hw->mac.type == e1000_i354)
819 igb_get_phy_id(hw);
820
821 /* For SGMII PHYs, we try the list of possible addresses until
822 * we find one that works. For non-SGMII PHYs
823 * (e.g. integrated copper PHYs), an address of 1 should
824 * work. The result of this function should mean phy->phy_addr
825 * and phy->id are set correctly.
826 */
827 if (!(igb_sgmii_active_82575(hw))) {
828 phy->addr = 1;
829 ret_val = igb_get_phy_id(hw);
830 goto out;
831 }
832
833 if (igb_sgmii_uses_mdio_82575(hw)) {
834 switch (hw->mac.type) {
835 case e1000_82575:
836 case e1000_82576:
837 mdic = rd32(E1000_MDIC);
838 mdic &= E1000_MDIC_PHY_MASK;
839 phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
840 break;
841 case e1000_82580:
842 case e1000_i350:
843 case e1000_i354:
844 case e1000_i210:
845 case e1000_i211:
846 mdic = rd32(E1000_MDICNFG);
847 mdic &= E1000_MDICNFG_PHY_MASK;
848 phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
849 break;
850 default:
851 ret_val = -E1000_ERR_PHY;
852 goto out;
853 }
854 ret_val = igb_get_phy_id(hw);
855 goto out;
856 }
857
858 /* Power on sgmii phy if it is disabled */
859 ctrl_ext = rd32(E1000_CTRL_EXT);
860 wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
861 wrfl();
862 msleep(300);
863
864 /* The address field in the I2CCMD register is 3 bits and 0 is invalid.
865 * Therefore, we need to test 1-7
866 */
867 for (phy->addr = 1; phy->addr < 8; phy->addr++) {
868 ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
869 if (ret_val == 0) {
870 hw_dbg("Vendor ID 0x%08X read at address %u\n",
871 phy_id, phy->addr);
872 /* At the time of this writing, The M88 part is
873 * the only supported SGMII PHY product.
874 */
875 if (phy_id == M88_VENDOR)
876 break;
877 } else {
878 hw_dbg("PHY address %u was unreadable\n", phy->addr);
879 }
880 }
881
882 /* A valid PHY type couldn't be found. */
883 if (phy->addr == 8) {
884 phy->addr = 0;
885 ret_val = -E1000_ERR_PHY;
886 goto out;
887 } else {
888 ret_val = igb_get_phy_id(hw);
889 }
890
891 /* restore previous sfp cage power state */
892 wr32(E1000_CTRL_EXT, ctrl_ext);
893
894 out:
895 return ret_val;
896 }
897
898 /**
899 * igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
900 * @hw: pointer to the HW structure
901 *
902 * Resets the PHY using the serial gigabit media independent interface.
903 **/
igb_phy_hw_reset_sgmii_82575(struct e1000_hw * hw)904 static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
905 {
906 struct e1000_phy_info *phy = &hw->phy;
907 s32 ret_val;
908
909 /* This isn't a true "hard" reset, but is the only reset
910 * available to us at this time.
911 */
912
913 hw_dbg("Soft resetting SGMII attached PHY...\n");
914
915 /* SFP documentation requires the following to configure the SPF module
916 * to work on SGMII. No further documentation is given.
917 */
918 ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
919 if (ret_val)
920 goto out;
921
922 ret_val = igb_phy_sw_reset(hw);
923 if (ret_val)
924 goto out;
925
926 if (phy->id == M88E1512_E_PHY_ID)
927 ret_val = igb_initialize_M88E1512_phy(hw);
928 out:
929 return ret_val;
930 }
931
932 /**
933 * igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
934 * @hw: pointer to the HW structure
935 * @active: true to enable LPLU, false to disable
936 *
937 * Sets the LPLU D0 state according to the active flag. When
938 * activating LPLU this function also disables smart speed
939 * and vice versa. LPLU will not be activated unless the
940 * device autonegotiation advertisement meets standards of
941 * either 10 or 10/100 or 10/100/1000 at all duplexes.
942 * This is a function pointer entry point only called by
943 * PHY setup routines.
944 **/
igb_set_d0_lplu_state_82575(struct e1000_hw * hw,bool active)945 static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
946 {
947 struct e1000_phy_info *phy = &hw->phy;
948 s32 ret_val;
949 u16 data;
950
951 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
952 if (ret_val)
953 goto out;
954
955 if (active) {
956 data |= IGP02E1000_PM_D0_LPLU;
957 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
958 data);
959 if (ret_val)
960 goto out;
961
962 /* When LPLU is enabled, we should disable SmartSpeed */
963 ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
964 &data);
965 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
966 ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
967 data);
968 if (ret_val)
969 goto out;
970 } else {
971 data &= ~IGP02E1000_PM_D0_LPLU;
972 ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
973 data);
974 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
975 * during Dx states where the power conservation is most
976 * important. During driver activity we should enable
977 * SmartSpeed, so performance is maintained.
978 */
979 if (phy->smart_speed == e1000_smart_speed_on) {
980 ret_val = phy->ops.read_reg(hw,
981 IGP01E1000_PHY_PORT_CONFIG, &data);
982 if (ret_val)
983 goto out;
984
985 data |= IGP01E1000_PSCFR_SMART_SPEED;
986 ret_val = phy->ops.write_reg(hw,
987 IGP01E1000_PHY_PORT_CONFIG, data);
988 if (ret_val)
989 goto out;
990 } else if (phy->smart_speed == e1000_smart_speed_off) {
991 ret_val = phy->ops.read_reg(hw,
992 IGP01E1000_PHY_PORT_CONFIG, &data);
993 if (ret_val)
994 goto out;
995
996 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
997 ret_val = phy->ops.write_reg(hw,
998 IGP01E1000_PHY_PORT_CONFIG, data);
999 if (ret_val)
1000 goto out;
1001 }
1002 }
1003
1004 out:
1005 return ret_val;
1006 }
1007
1008 /**
1009 * igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
1010 * @hw: pointer to the HW structure
1011 * @active: true to enable LPLU, false to disable
1012 *
1013 * Sets the LPLU D0 state according to the active flag. When
1014 * activating LPLU this function also disables smart speed
1015 * and vice versa. LPLU will not be activated unless the
1016 * device autonegotiation advertisement meets standards of
1017 * either 10 or 10/100 or 10/100/1000 at all duplexes.
1018 * This is a function pointer entry point only called by
1019 * PHY setup routines.
1020 **/
igb_set_d0_lplu_state_82580(struct e1000_hw * hw,bool active)1021 static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
1022 {
1023 struct e1000_phy_info *phy = &hw->phy;
1024 u16 data;
1025
1026 data = rd32(E1000_82580_PHY_POWER_MGMT);
1027
1028 if (active) {
1029 data |= E1000_82580_PM_D0_LPLU;
1030
1031 /* When LPLU is enabled, we should disable SmartSpeed */
1032 data &= ~E1000_82580_PM_SPD;
1033 } else {
1034 data &= ~E1000_82580_PM_D0_LPLU;
1035
1036 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1037 * during Dx states where the power conservation is most
1038 * important. During driver activity we should enable
1039 * SmartSpeed, so performance is maintained.
1040 */
1041 if (phy->smart_speed == e1000_smart_speed_on)
1042 data |= E1000_82580_PM_SPD;
1043 else if (phy->smart_speed == e1000_smart_speed_off)
1044 data &= ~E1000_82580_PM_SPD; }
1045
1046 wr32(E1000_82580_PHY_POWER_MGMT, data);
1047 return 0;
1048 }
1049
1050 /**
1051 * igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
1052 * @hw: pointer to the HW structure
1053 * @active: boolean used to enable/disable lplu
1054 *
1055 * Success returns 0, Failure returns 1
1056 *
1057 * The low power link up (lplu) state is set to the power management level D3
1058 * and SmartSpeed is disabled when active is true, else clear lplu for D3
1059 * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU
1060 * is used during Dx states where the power conservation is most important.
1061 * During driver activity, SmartSpeed should be enabled so performance is
1062 * maintained.
1063 **/
igb_set_d3_lplu_state_82580(struct e1000_hw * hw,bool active)1064 static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
1065 {
1066 struct e1000_phy_info *phy = &hw->phy;
1067 u16 data;
1068
1069 data = rd32(E1000_82580_PHY_POWER_MGMT);
1070
1071 if (!active) {
1072 data &= ~E1000_82580_PM_D3_LPLU;
1073 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used
1074 * during Dx states where the power conservation is most
1075 * important. During driver activity we should enable
1076 * SmartSpeed, so performance is maintained.
1077 */
1078 if (phy->smart_speed == e1000_smart_speed_on)
1079 data |= E1000_82580_PM_SPD;
1080 else if (phy->smart_speed == e1000_smart_speed_off)
1081 data &= ~E1000_82580_PM_SPD;
1082 } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
1083 (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
1084 (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
1085 data |= E1000_82580_PM_D3_LPLU;
1086 /* When LPLU is enabled, we should disable SmartSpeed */
1087 data &= ~E1000_82580_PM_SPD;
1088 }
1089
1090 wr32(E1000_82580_PHY_POWER_MGMT, data);
1091 return 0;
1092 }
1093
1094 /**
1095 * igb_acquire_nvm_82575 - Request for access to EEPROM
1096 * @hw: pointer to the HW structure
1097 *
1098 * Acquire the necessary semaphores for exclusive access to the EEPROM.
1099 * Set the EEPROM access request bit and wait for EEPROM access grant bit.
1100 * Return successful if access grant bit set, else clear the request for
1101 * EEPROM access and return -E1000_ERR_NVM (-1).
1102 **/
igb_acquire_nvm_82575(struct e1000_hw * hw)1103 static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
1104 {
1105 s32 ret_val;
1106
1107 ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
1108 if (ret_val)
1109 goto out;
1110
1111 ret_val = igb_acquire_nvm(hw);
1112
1113 if (ret_val)
1114 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1115
1116 out:
1117 return ret_val;
1118 }
1119
1120 /**
1121 * igb_release_nvm_82575 - Release exclusive access to EEPROM
1122 * @hw: pointer to the HW structure
1123 *
1124 * Stop any current commands to the EEPROM and clear the EEPROM request bit,
1125 * then release the semaphores acquired.
1126 **/
igb_release_nvm_82575(struct e1000_hw * hw)1127 static void igb_release_nvm_82575(struct e1000_hw *hw)
1128 {
1129 igb_release_nvm(hw);
1130 hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
1131 }
1132
1133 /**
1134 * igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
1135 * @hw: pointer to the HW structure
1136 * @mask: specifies which semaphore to acquire
1137 *
1138 * Acquire the SW/FW semaphore to access the PHY or NVM. The mask
1139 * will also specify which port we're acquiring the lock for.
1140 **/
igb_acquire_swfw_sync_82575(struct e1000_hw * hw,u16 mask)1141 static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1142 {
1143 u32 swfw_sync;
1144 u32 swmask = mask;
1145 u32 fwmask = mask << 16;
1146 s32 ret_val = 0;
1147 s32 i = 0, timeout = 200;
1148
1149 while (i < timeout) {
1150 if (igb_get_hw_semaphore(hw)) {
1151 ret_val = -E1000_ERR_SWFW_SYNC;
1152 goto out;
1153 }
1154
1155 swfw_sync = rd32(E1000_SW_FW_SYNC);
1156 if (!(swfw_sync & (fwmask | swmask)))
1157 break;
1158
1159 /* Firmware currently using resource (fwmask)
1160 * or other software thread using resource (swmask)
1161 */
1162 igb_put_hw_semaphore(hw);
1163 mdelay(5);
1164 i++;
1165 }
1166
1167 if (i == timeout) {
1168 hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
1169 ret_val = -E1000_ERR_SWFW_SYNC;
1170 goto out;
1171 }
1172
1173 swfw_sync |= swmask;
1174 wr32(E1000_SW_FW_SYNC, swfw_sync);
1175
1176 igb_put_hw_semaphore(hw);
1177
1178 out:
1179 return ret_val;
1180 }
1181
1182 /**
1183 * igb_release_swfw_sync_82575 - Release SW/FW semaphore
1184 * @hw: pointer to the HW structure
1185 * @mask: specifies which semaphore to acquire
1186 *
1187 * Release the SW/FW semaphore used to access the PHY or NVM. The mask
1188 * will also specify which port we're releasing the lock for.
1189 **/
igb_release_swfw_sync_82575(struct e1000_hw * hw,u16 mask)1190 static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
1191 {
1192 u32 swfw_sync;
1193
1194 while (igb_get_hw_semaphore(hw) != 0)
1195 ; /* Empty */
1196
1197 swfw_sync = rd32(E1000_SW_FW_SYNC);
1198 swfw_sync &= ~mask;
1199 wr32(E1000_SW_FW_SYNC, swfw_sync);
1200
1201 igb_put_hw_semaphore(hw);
1202 }
1203
1204 /**
1205 * igb_get_cfg_done_82575 - Read config done bit
1206 * @hw: pointer to the HW structure
1207 *
1208 * Read the management control register for the config done bit for
1209 * completion status. NOTE: silicon which is EEPROM-less will fail trying
1210 * to read the config done bit, so an error is *ONLY* logged and returns
1211 * 0. If we were to return with error, EEPROM-less silicon
1212 * would not be able to be reset or change link.
1213 **/
igb_get_cfg_done_82575(struct e1000_hw * hw)1214 static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
1215 {
1216 s32 timeout = PHY_CFG_TIMEOUT;
1217 u32 mask = E1000_NVM_CFG_DONE_PORT_0;
1218
1219 if (hw->bus.func == 1)
1220 mask = E1000_NVM_CFG_DONE_PORT_1;
1221 else if (hw->bus.func == E1000_FUNC_2)
1222 mask = E1000_NVM_CFG_DONE_PORT_2;
1223 else if (hw->bus.func == E1000_FUNC_3)
1224 mask = E1000_NVM_CFG_DONE_PORT_3;
1225
1226 while (timeout) {
1227 if (rd32(E1000_EEMNGCTL) & mask)
1228 break;
1229 usleep_range(1000, 2000);
1230 timeout--;
1231 }
1232 if (!timeout)
1233 hw_dbg("MNG configuration cycle has not completed.\n");
1234
1235 /* If EEPROM is not marked present, init the PHY manually */
1236 if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
1237 (hw->phy.type == e1000_phy_igp_3))
1238 igb_phy_init_script_igp3(hw);
1239
1240 return 0;
1241 }
1242
1243 /**
1244 * igb_get_link_up_info_82575 - Get link speed/duplex info
1245 * @hw: pointer to the HW structure
1246 * @speed: stores the current speed
1247 * @duplex: stores the current duplex
1248 *
1249 * This is a wrapper function, if using the serial gigabit media independent
1250 * interface, use PCS to retrieve the link speed and duplex information.
1251 * Otherwise, use the generic function to get the link speed and duplex info.
1252 **/
igb_get_link_up_info_82575(struct e1000_hw * hw,u16 * speed,u16 * duplex)1253 static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
1254 u16 *duplex)
1255 {
1256 s32 ret_val;
1257
1258 if (hw->phy.media_type != e1000_media_type_copper)
1259 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
1260 duplex);
1261 else
1262 ret_val = igb_get_speed_and_duplex_copper(hw, speed,
1263 duplex);
1264
1265 return ret_val;
1266 }
1267
1268 /**
1269 * igb_check_for_link_82575 - Check for link
1270 * @hw: pointer to the HW structure
1271 *
1272 * If sgmii is enabled, then use the pcs register to determine link, otherwise
1273 * use the generic interface for determining link.
1274 **/
igb_check_for_link_82575(struct e1000_hw * hw)1275 static s32 igb_check_for_link_82575(struct e1000_hw *hw)
1276 {
1277 s32 ret_val;
1278 u16 speed, duplex;
1279
1280 if (hw->phy.media_type != e1000_media_type_copper) {
1281 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
1282 &duplex);
1283 /* Use this flag to determine if link needs to be checked or
1284 * not. If we have link clear the flag so that we do not
1285 * continue to check for link.
1286 */
1287 hw->mac.get_link_status = !hw->mac.serdes_has_link;
1288
1289 /* Configure Flow Control now that Auto-Neg has completed.
1290 * First, we need to restore the desired flow control
1291 * settings because we may have had to re-autoneg with a
1292 * different link partner.
1293 */
1294 ret_val = igb_config_fc_after_link_up(hw);
1295 if (ret_val)
1296 hw_dbg("Error configuring flow control\n");
1297 } else {
1298 ret_val = igb_check_for_copper_link(hw);
1299 }
1300
1301 return ret_val;
1302 }
1303
1304 /**
1305 * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
1306 * @hw: pointer to the HW structure
1307 **/
igb_power_up_serdes_link_82575(struct e1000_hw * hw)1308 void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
1309 {
1310 u32 reg;
1311
1312
1313 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1314 !igb_sgmii_active_82575(hw))
1315 return;
1316
1317 /* Enable PCS to turn on link */
1318 reg = rd32(E1000_PCS_CFG0);
1319 reg |= E1000_PCS_CFG_PCS_EN;
1320 wr32(E1000_PCS_CFG0, reg);
1321
1322 /* Power up the laser */
1323 reg = rd32(E1000_CTRL_EXT);
1324 reg &= ~E1000_CTRL_EXT_SDP3_DATA;
1325 wr32(E1000_CTRL_EXT, reg);
1326
1327 /* flush the write to verify completion */
1328 wrfl();
1329 usleep_range(1000, 2000);
1330 }
1331
1332 /**
1333 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
1334 * @hw: pointer to the HW structure
1335 * @speed: stores the current speed
1336 * @duplex: stores the current duplex
1337 *
1338 * Using the physical coding sub-layer (PCS), retrieve the current speed and
1339 * duplex, then store the values in the pointers provided.
1340 **/
igb_get_pcs_speed_and_duplex_82575(struct e1000_hw * hw,u16 * speed,u16 * duplex)1341 static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
1342 u16 *duplex)
1343 {
1344 struct e1000_mac_info *mac = &hw->mac;
1345 u32 pcs, status;
1346
1347 /* Set up defaults for the return values of this function */
1348 mac->serdes_has_link = false;
1349 *speed = 0;
1350 *duplex = 0;
1351
1352 /* Read the PCS Status register for link state. For non-copper mode,
1353 * the status register is not accurate. The PCS status register is
1354 * used instead.
1355 */
1356 pcs = rd32(E1000_PCS_LSTAT);
1357
1358 /* The link up bit determines when link is up on autoneg. The sync ok
1359 * gets set once both sides sync up and agree upon link. Stable link
1360 * can be determined by checking for both link up and link sync ok
1361 */
1362 if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
1363 mac->serdes_has_link = true;
1364
1365 /* Detect and store PCS speed */
1366 if (pcs & E1000_PCS_LSTS_SPEED_1000)
1367 *speed = SPEED_1000;
1368 else if (pcs & E1000_PCS_LSTS_SPEED_100)
1369 *speed = SPEED_100;
1370 else
1371 *speed = SPEED_10;
1372
1373 /* Detect and store PCS duplex */
1374 if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
1375 *duplex = FULL_DUPLEX;
1376 else
1377 *duplex = HALF_DUPLEX;
1378
1379 /* Check if it is an I354 2.5Gb backplane connection. */
1380 if (mac->type == e1000_i354) {
1381 status = rd32(E1000_STATUS);
1382 if ((status & E1000_STATUS_2P5_SKU) &&
1383 !(status & E1000_STATUS_2P5_SKU_OVER)) {
1384 *speed = SPEED_2500;
1385 *duplex = FULL_DUPLEX;
1386 hw_dbg("2500 Mbs, ");
1387 hw_dbg("Full Duplex\n");
1388 }
1389 }
1390
1391 }
1392
1393 return 0;
1394 }
1395
1396 /**
1397 * igb_shutdown_serdes_link_82575 - Remove link during power down
1398 * @hw: pointer to the HW structure
1399 *
1400 * In the case of fiber serdes, shut down optics and PCS on driver unload
1401 * when management pass thru is not enabled.
1402 **/
igb_shutdown_serdes_link_82575(struct e1000_hw * hw)1403 void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
1404 {
1405 u32 reg;
1406
1407 if (hw->phy.media_type != e1000_media_type_internal_serdes &&
1408 igb_sgmii_active_82575(hw))
1409 return;
1410
1411 if (!igb_enable_mng_pass_thru(hw)) {
1412 /* Disable PCS to turn off link */
1413 reg = rd32(E1000_PCS_CFG0);
1414 reg &= ~E1000_PCS_CFG_PCS_EN;
1415 wr32(E1000_PCS_CFG0, reg);
1416
1417 /* shutdown the laser */
1418 reg = rd32(E1000_CTRL_EXT);
1419 reg |= E1000_CTRL_EXT_SDP3_DATA;
1420 wr32(E1000_CTRL_EXT, reg);
1421
1422 /* flush the write to verify completion */
1423 wrfl();
1424 usleep_range(1000, 2000);
1425 }
1426 }
1427
1428 /**
1429 * igb_reset_hw_82575 - Reset hardware
1430 * @hw: pointer to the HW structure
1431 *
1432 * This resets the hardware into a known state. This is a
1433 * function pointer entry point called by the api module.
1434 **/
igb_reset_hw_82575(struct e1000_hw * hw)1435 static s32 igb_reset_hw_82575(struct e1000_hw *hw)
1436 {
1437 u32 ctrl;
1438 s32 ret_val;
1439
1440 /* Prevent the PCI-E bus from sticking if there is no TLP connection
1441 * on the last TLP read/write transaction when MAC is reset.
1442 */
1443 ret_val = igb_disable_pcie_master(hw);
1444 if (ret_val)
1445 hw_dbg("PCI-E Master disable polling has failed.\n");
1446
1447 /* set the completion timeout for interface */
1448 ret_val = igb_set_pcie_completion_timeout(hw);
1449 if (ret_val)
1450 hw_dbg("PCI-E Set completion timeout has failed.\n");
1451
1452 hw_dbg("Masking off all interrupts\n");
1453 wr32(E1000_IMC, 0xffffffff);
1454
1455 wr32(E1000_RCTL, 0);
1456 wr32(E1000_TCTL, E1000_TCTL_PSP);
1457 wrfl();
1458
1459 usleep_range(10000, 20000);
1460
1461 ctrl = rd32(E1000_CTRL);
1462
1463 hw_dbg("Issuing a global reset to MAC\n");
1464 wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
1465
1466 ret_val = igb_get_auto_rd_done(hw);
1467 if (ret_val) {
1468 /* When auto config read does not complete, do not
1469 * return with an error. This can happen in situations
1470 * where there is no eeprom and prevents getting link.
1471 */
1472 hw_dbg("Auto Read Done did not complete\n");
1473 }
1474
1475 /* If EEPROM is not present, run manual init scripts */
1476 if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
1477 igb_reset_init_script_82575(hw);
1478
1479 /* Clear any pending interrupt events. */
1480 wr32(E1000_IMC, 0xffffffff);
1481 rd32(E1000_ICR);
1482
1483 /* Install any alternate MAC address into RAR0 */
1484 ret_val = igb_check_alt_mac_addr(hw);
1485
1486 return ret_val;
1487 }
1488
1489 /**
1490 * igb_init_hw_82575 - Initialize hardware
1491 * @hw: pointer to the HW structure
1492 *
1493 * This inits the hardware readying it for operation.
1494 **/
igb_init_hw_82575(struct e1000_hw * hw)1495 static s32 igb_init_hw_82575(struct e1000_hw *hw)
1496 {
1497 struct e1000_mac_info *mac = &hw->mac;
1498 s32 ret_val;
1499 u16 i, rar_count = mac->rar_entry_count;
1500
1501 if ((hw->mac.type >= e1000_i210) &&
1502 !(igb_get_flash_presence_i210(hw))) {
1503 ret_val = igb_pll_workaround_i210(hw);
1504 if (ret_val)
1505 return ret_val;
1506 }
1507
1508 /* Initialize identification LED */
1509 ret_val = igb_id_led_init(hw);
1510 if (ret_val) {
1511 hw_dbg("Error initializing identification LED\n");
1512 /* This is not fatal and we should not stop init due to this */
1513 }
1514
1515 /* Disabling VLAN filtering */
1516 hw_dbg("Initializing the IEEE VLAN\n");
1517 if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
1518 igb_clear_vfta_i350(hw);
1519 else
1520 igb_clear_vfta(hw);
1521
1522 /* Setup the receive address */
1523 igb_init_rx_addrs(hw, rar_count);
1524
1525 /* Zero out the Multicast HASH table */
1526 hw_dbg("Zeroing the MTA\n");
1527 for (i = 0; i < mac->mta_reg_count; i++)
1528 array_wr32(E1000_MTA, i, 0);
1529
1530 /* Zero out the Unicast HASH table */
1531 hw_dbg("Zeroing the UTA\n");
1532 for (i = 0; i < mac->uta_reg_count; i++)
1533 array_wr32(E1000_UTA, i, 0);
1534
1535 /* Setup link and flow control */
1536 ret_val = igb_setup_link(hw);
1537
1538 /* Clear all of the statistics registers (clear on read). It is
1539 * important that we do this after we have tried to establish link
1540 * because the symbol error count will increment wildly if there
1541 * is no link.
1542 */
1543 igb_clear_hw_cntrs_82575(hw);
1544 return ret_val;
1545 }
1546
1547 /**
1548 * igb_setup_copper_link_82575 - Configure copper link settings
1549 * @hw: pointer to the HW structure
1550 *
1551 * Configures the link for auto-neg or forced speed and duplex. Then we check
1552 * for link, once link is established calls to configure collision distance
1553 * and flow control are called.
1554 **/
igb_setup_copper_link_82575(struct e1000_hw * hw)1555 static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1556 {
1557 u32 ctrl;
1558 s32 ret_val;
1559 u32 phpm_reg;
1560
1561 ctrl = rd32(E1000_CTRL);
1562 ctrl |= E1000_CTRL_SLU;
1563 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1564 wr32(E1000_CTRL, ctrl);
1565
1566 /* Clear Go Link Disconnect bit on supported devices */
1567 switch (hw->mac.type) {
1568 case e1000_82580:
1569 case e1000_i350:
1570 case e1000_i210:
1571 case e1000_i211:
1572 phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
1573 phpm_reg &= ~E1000_82580_PM_GO_LINKD;
1574 wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
1575 break;
1576 default:
1577 break;
1578 }
1579
1580 ret_val = igb_setup_serdes_link_82575(hw);
1581 if (ret_val)
1582 goto out;
1583
1584 if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
1585 /* allow time for SFP cage time to power up phy */
1586 msleep(300);
1587
1588 ret_val = hw->phy.ops.reset(hw);
1589 if (ret_val) {
1590 hw_dbg("Error resetting the PHY.\n");
1591 goto out;
1592 }
1593 }
1594 switch (hw->phy.type) {
1595 case e1000_phy_i210:
1596 case e1000_phy_m88:
1597 switch (hw->phy.id) {
1598 case I347AT4_E_PHY_ID:
1599 case M88E1112_E_PHY_ID:
1600 case M88E1543_E_PHY_ID:
1601 case M88E1512_E_PHY_ID:
1602 case I210_I_PHY_ID:
1603 ret_val = igb_copper_link_setup_m88_gen2(hw);
1604 break;
1605 default:
1606 ret_val = igb_copper_link_setup_m88(hw);
1607 break;
1608 }
1609 break;
1610 case e1000_phy_igp_3:
1611 ret_val = igb_copper_link_setup_igp(hw);
1612 break;
1613 case e1000_phy_82580:
1614 ret_val = igb_copper_link_setup_82580(hw);
1615 break;
1616 default:
1617 ret_val = -E1000_ERR_PHY;
1618 break;
1619 }
1620
1621 if (ret_val)
1622 goto out;
1623
1624 ret_val = igb_setup_copper_link(hw);
1625 out:
1626 return ret_val;
1627 }
1628
1629 /**
1630 * igb_setup_serdes_link_82575 - Setup link for serdes
1631 * @hw: pointer to the HW structure
1632 *
1633 * Configure the physical coding sub-layer (PCS) link. The PCS link is
1634 * used on copper connections where the serialized gigabit media independent
1635 * interface (sgmii), or serdes fiber is being used. Configures the link
1636 * for auto-negotiation or forces speed/duplex.
1637 **/
igb_setup_serdes_link_82575(struct e1000_hw * hw)1638 static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1639 {
1640 u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
1641 bool pcs_autoneg;
1642 s32 ret_val = 0;
1643 u16 data;
1644
1645 if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
1646 !igb_sgmii_active_82575(hw))
1647 return ret_val;
1648
1649
1650 /* On the 82575, SerDes loopback mode persists until it is
1651 * explicitly turned off or a power cycle is performed. A read to
1652 * the register does not indicate its status. Therefore, we ensure
1653 * loopback mode is disabled during initialization.
1654 */
1655 wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1656
1657 /* power on the sfp cage if present and turn on I2C */
1658 ctrl_ext = rd32(E1000_CTRL_EXT);
1659 ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
1660 ctrl_ext |= E1000_CTRL_I2C_ENA;
1661 wr32(E1000_CTRL_EXT, ctrl_ext);
1662
1663 ctrl_reg = rd32(E1000_CTRL);
1664 ctrl_reg |= E1000_CTRL_SLU;
1665
1666 if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
1667 /* set both sw defined pins */
1668 ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
1669
1670 /* Set switch control to serdes energy detect */
1671 reg = rd32(E1000_CONNSW);
1672 reg |= E1000_CONNSW_ENRGSRC;
1673 wr32(E1000_CONNSW, reg);
1674 }
1675
1676 reg = rd32(E1000_PCS_LCTL);
1677
1678 /* default pcs_autoneg to the same setting as mac autoneg */
1679 pcs_autoneg = hw->mac.autoneg;
1680
1681 switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
1682 case E1000_CTRL_EXT_LINK_MODE_SGMII:
1683 /* sgmii mode lets the phy handle forcing speed/duplex */
1684 pcs_autoneg = true;
1685 /* autoneg time out should be disabled for SGMII mode */
1686 reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
1687 break;
1688 case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
1689 /* disable PCS autoneg and support parallel detect only */
1690 pcs_autoneg = false;
1691 default:
1692 if (hw->mac.type == e1000_82575 ||
1693 hw->mac.type == e1000_82576) {
1694 ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
1695 if (ret_val) {
1696 hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
1697 return ret_val;
1698 }
1699
1700 if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
1701 pcs_autoneg = false;
1702 }
1703
1704 /* non-SGMII modes only supports a speed of 1000/Full for the
1705 * link so it is best to just force the MAC and let the pcs
1706 * link either autoneg or be forced to 1000/Full
1707 */
1708 ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
1709 E1000_CTRL_FD | E1000_CTRL_FRCDPX;
1710
1711 /* set speed of 1000/Full if speed/duplex is forced */
1712 reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
1713 break;
1714 }
1715
1716 wr32(E1000_CTRL, ctrl_reg);
1717
1718 /* New SerDes mode allows for forcing speed or autonegotiating speed
1719 * at 1gb. Autoneg should be default set by most drivers. This is the
1720 * mode that will be compatible with older link partners and switches.
1721 * However, both are supported by the hardware and some drivers/tools.
1722 */
1723 reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
1724 E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
1725
1726 if (pcs_autoneg) {
1727 /* Set PCS register for autoneg */
1728 reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1729 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1730
1731 /* Disable force flow control for autoneg */
1732 reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
1733
1734 /* Configure flow control advertisement for autoneg */
1735 anadv_reg = rd32(E1000_PCS_ANADV);
1736 anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
1737 switch (hw->fc.requested_mode) {
1738 case e1000_fc_full:
1739 case e1000_fc_rx_pause:
1740 anadv_reg |= E1000_TXCW_ASM_DIR;
1741 anadv_reg |= E1000_TXCW_PAUSE;
1742 break;
1743 case e1000_fc_tx_pause:
1744 anadv_reg |= E1000_TXCW_ASM_DIR;
1745 break;
1746 default:
1747 break;
1748 }
1749 wr32(E1000_PCS_ANADV, anadv_reg);
1750
1751 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
1752 } else {
1753 /* Set PCS register for forced link */
1754 reg |= E1000_PCS_LCTL_FSD; /* Force Speed */
1755
1756 /* Force flow control for forced link */
1757 reg |= E1000_PCS_LCTL_FORCE_FCTRL;
1758
1759 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
1760 }
1761
1762 wr32(E1000_PCS_LCTL, reg);
1763
1764 if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
1765 igb_force_mac_fc(hw);
1766
1767 return ret_val;
1768 }
1769
1770 /**
1771 * igb_sgmii_active_82575 - Return sgmii state
1772 * @hw: pointer to the HW structure
1773 *
1774 * 82575 silicon has a serialized gigabit media independent interface (sgmii)
1775 * which can be enabled for use in the embedded applications. Simply
1776 * return the current state of the sgmii interface.
1777 **/
igb_sgmii_active_82575(struct e1000_hw * hw)1778 static bool igb_sgmii_active_82575(struct e1000_hw *hw)
1779 {
1780 struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
1781 return dev_spec->sgmii_active;
1782 }
1783
1784 /**
1785 * igb_reset_init_script_82575 - Inits HW defaults after reset
1786 * @hw: pointer to the HW structure
1787 *
1788 * Inits recommended HW defaults after a reset when there is no EEPROM
1789 * detected. This is only for the 82575.
1790 **/
igb_reset_init_script_82575(struct e1000_hw * hw)1791 static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
1792 {
1793 if (hw->mac.type == e1000_82575) {
1794 hw_dbg("Running reset init script for 82575\n");
1795 /* SerDes configuration via SERDESCTRL */
1796 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
1797 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
1798 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
1799 igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
1800
1801 /* CCM configuration via CCMCTL register */
1802 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
1803 igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
1804
1805 /* PCIe lanes configuration */
1806 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
1807 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
1808 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
1809 igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
1810
1811 /* PCIe PLL Configuration */
1812 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
1813 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
1814 igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
1815 }
1816
1817 return 0;
1818 }
1819
1820 /**
1821 * igb_read_mac_addr_82575 - Read device MAC address
1822 * @hw: pointer to the HW structure
1823 **/
igb_read_mac_addr_82575(struct e1000_hw * hw)1824 static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1825 {
1826 s32 ret_val = 0;
1827
1828 /* If there's an alternate MAC address place it in RAR0
1829 * so that it will override the Si installed default perm
1830 * address.
1831 */
1832 ret_val = igb_check_alt_mac_addr(hw);
1833 if (ret_val)
1834 goto out;
1835
1836 ret_val = igb_read_mac_addr(hw);
1837
1838 out:
1839 return ret_val;
1840 }
1841
1842 /**
1843 * igb_power_down_phy_copper_82575 - Remove link during PHY power down
1844 * @hw: pointer to the HW structure
1845 *
1846 * In the case of a PHY power down to save power, or to turn off link during a
1847 * driver unload, or wake on lan is not enabled, remove the link.
1848 **/
igb_power_down_phy_copper_82575(struct e1000_hw * hw)1849 void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1850 {
1851 /* If the management interface is not enabled, then power down */
1852 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1853 igb_power_down_phy_copper(hw);
1854 }
1855
1856 /**
1857 * igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
1858 * @hw: pointer to the HW structure
1859 *
1860 * Clears the hardware counters by reading the counter registers.
1861 **/
igb_clear_hw_cntrs_82575(struct e1000_hw * hw)1862 static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1863 {
1864 igb_clear_hw_cntrs_base(hw);
1865
1866 rd32(E1000_PRC64);
1867 rd32(E1000_PRC127);
1868 rd32(E1000_PRC255);
1869 rd32(E1000_PRC511);
1870 rd32(E1000_PRC1023);
1871 rd32(E1000_PRC1522);
1872 rd32(E1000_PTC64);
1873 rd32(E1000_PTC127);
1874 rd32(E1000_PTC255);
1875 rd32(E1000_PTC511);
1876 rd32(E1000_PTC1023);
1877 rd32(E1000_PTC1522);
1878
1879 rd32(E1000_ALGNERRC);
1880 rd32(E1000_RXERRC);
1881 rd32(E1000_TNCRS);
1882 rd32(E1000_CEXTERR);
1883 rd32(E1000_TSCTC);
1884 rd32(E1000_TSCTFC);
1885
1886 rd32(E1000_MGTPRC);
1887 rd32(E1000_MGTPDC);
1888 rd32(E1000_MGTPTC);
1889
1890 rd32(E1000_IAC);
1891 rd32(E1000_ICRXOC);
1892
1893 rd32(E1000_ICRXPTC);
1894 rd32(E1000_ICRXATC);
1895 rd32(E1000_ICTXPTC);
1896 rd32(E1000_ICTXATC);
1897 rd32(E1000_ICTXQEC);
1898 rd32(E1000_ICTXQMTC);
1899 rd32(E1000_ICRXDMTC);
1900
1901 rd32(E1000_CBTMPC);
1902 rd32(E1000_HTDPMC);
1903 rd32(E1000_CBRMPC);
1904 rd32(E1000_RPTHC);
1905 rd32(E1000_HGPTC);
1906 rd32(E1000_HTCBDPC);
1907 rd32(E1000_HGORCL);
1908 rd32(E1000_HGORCH);
1909 rd32(E1000_HGOTCL);
1910 rd32(E1000_HGOTCH);
1911 rd32(E1000_LENERRS);
1912
1913 /* This register should not be read in copper configurations */
1914 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1915 igb_sgmii_active_82575(hw))
1916 rd32(E1000_SCVPC);
1917 }
1918
1919 /**
1920 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
1921 * @hw: pointer to the HW structure
1922 *
1923 * After rx enable if manageability is enabled then there is likely some
1924 * bad data at the start of the fifo and possibly in the DMA fifo. This
1925 * function clears the fifos and flushes any packets that came in as rx was
1926 * being enabled.
1927 **/
igb_rx_fifo_flush_82575(struct e1000_hw * hw)1928 void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
1929 {
1930 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
1931 int i, ms_wait;
1932
1933 /* disable IPv6 options as per hardware errata */
1934 rfctl = rd32(E1000_RFCTL);
1935 rfctl |= E1000_RFCTL_IPV6_EX_DIS;
1936 wr32(E1000_RFCTL, rfctl);
1937
1938 if (hw->mac.type != e1000_82575 ||
1939 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
1940 return;
1941
1942 /* Disable all RX queues */
1943 for (i = 0; i < 4; i++) {
1944 rxdctl[i] = rd32(E1000_RXDCTL(i));
1945 wr32(E1000_RXDCTL(i),
1946 rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
1947 }
1948 /* Poll all queues to verify they have shut down */
1949 for (ms_wait = 0; ms_wait < 10; ms_wait++) {
1950 usleep_range(1000, 2000);
1951 rx_enabled = 0;
1952 for (i = 0; i < 4; i++)
1953 rx_enabled |= rd32(E1000_RXDCTL(i));
1954 if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
1955 break;
1956 }
1957
1958 if (ms_wait == 10)
1959 hw_dbg("Queue disable timed out after 10ms\n");
1960
1961 /* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
1962 * incoming packets are rejected. Set enable and wait 2ms so that
1963 * any packet that was coming in as RCTL.EN was set is flushed
1964 */
1965 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
1966
1967 rlpml = rd32(E1000_RLPML);
1968 wr32(E1000_RLPML, 0);
1969
1970 rctl = rd32(E1000_RCTL);
1971 temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
1972 temp_rctl |= E1000_RCTL_LPE;
1973
1974 wr32(E1000_RCTL, temp_rctl);
1975 wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
1976 wrfl();
1977 usleep_range(2000, 3000);
1978
1979 /* Enable RX queues that were previously enabled and restore our
1980 * previous state
1981 */
1982 for (i = 0; i < 4; i++)
1983 wr32(E1000_RXDCTL(i), rxdctl[i]);
1984 wr32(E1000_RCTL, rctl);
1985 wrfl();
1986
1987 wr32(E1000_RLPML, rlpml);
1988 wr32(E1000_RFCTL, rfctl);
1989
1990 /* Flush receive errors generated by workaround */
1991 rd32(E1000_ROC);
1992 rd32(E1000_RNBC);
1993 rd32(E1000_MPC);
1994 }
1995
1996 /**
1997 * igb_set_pcie_completion_timeout - set pci-e completion timeout
1998 * @hw: pointer to the HW structure
1999 *
2000 * The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
2001 * however the hardware default for these parts is 500us to 1ms which is less
2002 * than the 10ms recommended by the pci-e spec. To address this we need to
2003 * increase the value to either 10ms to 200ms for capability version 1 config,
2004 * or 16ms to 55ms for version 2.
2005 **/
igb_set_pcie_completion_timeout(struct e1000_hw * hw)2006 static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
2007 {
2008 u32 gcr = rd32(E1000_GCR);
2009 s32 ret_val = 0;
2010 u16 pcie_devctl2;
2011
2012 /* only take action if timeout value is defaulted to 0 */
2013 if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
2014 goto out;
2015
2016 /* if capabilities version is type 1 we can write the
2017 * timeout of 10ms to 200ms through the GCR register
2018 */
2019 if (!(gcr & E1000_GCR_CAP_VER2)) {
2020 gcr |= E1000_GCR_CMPL_TMOUT_10ms;
2021 goto out;
2022 }
2023
2024 /* for version 2 capabilities we need to write the config space
2025 * directly in order to set the completion timeout value for
2026 * 16ms to 55ms
2027 */
2028 ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2029 &pcie_devctl2);
2030 if (ret_val)
2031 goto out;
2032
2033 pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
2034
2035 ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
2036 &pcie_devctl2);
2037 out:
2038 /* disable completion timeout resend */
2039 gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
2040
2041 wr32(E1000_GCR, gcr);
2042 return ret_val;
2043 }
2044
2045 /**
2046 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
2047 * @hw: pointer to the hardware struct
2048 * @enable: state to enter, either enabled or disabled
2049 * @pf: Physical Function pool - do not set anti-spoofing for the PF
2050 *
2051 * enables/disables L2 switch anti-spoofing functionality.
2052 **/
igb_vmdq_set_anti_spoofing_pf(struct e1000_hw * hw,bool enable,int pf)2053 void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
2054 {
2055 u32 reg_val, reg_offset;
2056
2057 switch (hw->mac.type) {
2058 case e1000_82576:
2059 reg_offset = E1000_DTXSWC;
2060 break;
2061 case e1000_i350:
2062 case e1000_i354:
2063 reg_offset = E1000_TXSWC;
2064 break;
2065 default:
2066 return;
2067 }
2068
2069 reg_val = rd32(reg_offset);
2070 if (enable) {
2071 reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
2072 E1000_DTXSWC_VLAN_SPOOF_MASK);
2073 /* The PF can spoof - it has to in order to
2074 * support emulation mode NICs
2075 */
2076 reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
2077 } else {
2078 reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
2079 E1000_DTXSWC_VLAN_SPOOF_MASK);
2080 }
2081 wr32(reg_offset, reg_val);
2082 }
2083
2084 /**
2085 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
2086 * @hw: pointer to the hardware struct
2087 * @enable: state to enter, either enabled or disabled
2088 *
2089 * enables/disables L2 switch loopback functionality.
2090 **/
igb_vmdq_set_loopback_pf(struct e1000_hw * hw,bool enable)2091 void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
2092 {
2093 u32 dtxswc;
2094
2095 switch (hw->mac.type) {
2096 case e1000_82576:
2097 dtxswc = rd32(E1000_DTXSWC);
2098 if (enable)
2099 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2100 else
2101 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2102 wr32(E1000_DTXSWC, dtxswc);
2103 break;
2104 case e1000_i354:
2105 case e1000_i350:
2106 dtxswc = rd32(E1000_TXSWC);
2107 if (enable)
2108 dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2109 else
2110 dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
2111 wr32(E1000_TXSWC, dtxswc);
2112 break;
2113 default:
2114 /* Currently no other hardware supports loopback */
2115 break;
2116 }
2117
2118 }
2119
2120 /**
2121 * igb_vmdq_set_replication_pf - enable or disable vmdq replication
2122 * @hw: pointer to the hardware struct
2123 * @enable: state to enter, either enabled or disabled
2124 *
2125 * enables/disables replication of packets across multiple pools.
2126 **/
igb_vmdq_set_replication_pf(struct e1000_hw * hw,bool enable)2127 void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
2128 {
2129 u32 vt_ctl = rd32(E1000_VT_CTL);
2130
2131 if (enable)
2132 vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
2133 else
2134 vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
2135
2136 wr32(E1000_VT_CTL, vt_ctl);
2137 }
2138
2139 /**
2140 * igb_read_phy_reg_82580 - Read 82580 MDI control register
2141 * @hw: pointer to the HW structure
2142 * @offset: register offset to be read
2143 * @data: pointer to the read data
2144 *
2145 * Reads the MDI control register in the PHY at offset and stores the
2146 * information read to data.
2147 **/
igb_read_phy_reg_82580(struct e1000_hw * hw,u32 offset,u16 * data)2148 static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
2149 {
2150 s32 ret_val;
2151
2152 ret_val = hw->phy.ops.acquire(hw);
2153 if (ret_val)
2154 goto out;
2155
2156 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
2157
2158 hw->phy.ops.release(hw);
2159
2160 out:
2161 return ret_val;
2162 }
2163
2164 /**
2165 * igb_write_phy_reg_82580 - Write 82580 MDI control register
2166 * @hw: pointer to the HW structure
2167 * @offset: register offset to write to
2168 * @data: data to write to register at offset
2169 *
2170 * Writes data to MDI control register in the PHY at offset.
2171 **/
igb_write_phy_reg_82580(struct e1000_hw * hw,u32 offset,u16 data)2172 static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
2173 {
2174 s32 ret_val;
2175
2176
2177 ret_val = hw->phy.ops.acquire(hw);
2178 if (ret_val)
2179 goto out;
2180
2181 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
2182
2183 hw->phy.ops.release(hw);
2184
2185 out:
2186 return ret_val;
2187 }
2188
2189 /**
2190 * igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
2191 * @hw: pointer to the HW structure
2192 *
2193 * This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
2194 * the values found in the EEPROM. This addresses an issue in which these
2195 * bits are not restored from EEPROM after reset.
2196 **/
igb_reset_mdicnfg_82580(struct e1000_hw * hw)2197 static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
2198 {
2199 s32 ret_val = 0;
2200 u32 mdicnfg;
2201 u16 nvm_data = 0;
2202
2203 if (hw->mac.type != e1000_82580)
2204 goto out;
2205 if (!igb_sgmii_active_82575(hw))
2206 goto out;
2207
2208 ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2209 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2210 &nvm_data);
2211 if (ret_val) {
2212 hw_dbg("NVM Read Error\n");
2213 goto out;
2214 }
2215
2216 mdicnfg = rd32(E1000_MDICNFG);
2217 if (nvm_data & NVM_WORD24_EXT_MDIO)
2218 mdicnfg |= E1000_MDICNFG_EXT_MDIO;
2219 if (nvm_data & NVM_WORD24_COM_MDIO)
2220 mdicnfg |= E1000_MDICNFG_COM_MDIO;
2221 wr32(E1000_MDICNFG, mdicnfg);
2222 out:
2223 return ret_val;
2224 }
2225
2226 /**
2227 * igb_reset_hw_82580 - Reset hardware
2228 * @hw: pointer to the HW structure
2229 *
2230 * This resets function or entire device (all ports, etc.)
2231 * to a known state.
2232 **/
igb_reset_hw_82580(struct e1000_hw * hw)2233 static s32 igb_reset_hw_82580(struct e1000_hw *hw)
2234 {
2235 s32 ret_val = 0;
2236 /* BH SW mailbox bit in SW_FW_SYNC */
2237 u16 swmbsw_mask = E1000_SW_SYNCH_MB;
2238 u32 ctrl;
2239 bool global_device_reset = hw->dev_spec._82575.global_device_reset;
2240
2241 hw->dev_spec._82575.global_device_reset = false;
2242
2243 /* due to hw errata, global device reset doesn't always
2244 * work on 82580
2245 */
2246 if (hw->mac.type == e1000_82580)
2247 global_device_reset = false;
2248
2249 /* Get current control state. */
2250 ctrl = rd32(E1000_CTRL);
2251
2252 /* Prevent the PCI-E bus from sticking if there is no TLP connection
2253 * on the last TLP read/write transaction when MAC is reset.
2254 */
2255 ret_val = igb_disable_pcie_master(hw);
2256 if (ret_val)
2257 hw_dbg("PCI-E Master disable polling has failed.\n");
2258
2259 hw_dbg("Masking off all interrupts\n");
2260 wr32(E1000_IMC, 0xffffffff);
2261 wr32(E1000_RCTL, 0);
2262 wr32(E1000_TCTL, E1000_TCTL_PSP);
2263 wrfl();
2264
2265 usleep_range(10000, 11000);
2266
2267 /* Determine whether or not a global dev reset is requested */
2268 if (global_device_reset &&
2269 hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
2270 global_device_reset = false;
2271
2272 if (global_device_reset &&
2273 !(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
2274 ctrl |= E1000_CTRL_DEV_RST;
2275 else
2276 ctrl |= E1000_CTRL_RST;
2277
2278 wr32(E1000_CTRL, ctrl);
2279 wrfl();
2280
2281 /* Add delay to insure DEV_RST has time to complete */
2282 if (global_device_reset)
2283 usleep_range(5000, 6000);
2284
2285 ret_val = igb_get_auto_rd_done(hw);
2286 if (ret_val) {
2287 /* When auto config read does not complete, do not
2288 * return with an error. This can happen in situations
2289 * where there is no eeprom and prevents getting link.
2290 */
2291 hw_dbg("Auto Read Done did not complete\n");
2292 }
2293
2294 /* clear global device reset status bit */
2295 wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
2296
2297 /* Clear any pending interrupt events. */
2298 wr32(E1000_IMC, 0xffffffff);
2299 rd32(E1000_ICR);
2300
2301 ret_val = igb_reset_mdicnfg_82580(hw);
2302 if (ret_val)
2303 hw_dbg("Could not reset MDICNFG based on EEPROM\n");
2304
2305 /* Install any alternate MAC address into RAR0 */
2306 ret_val = igb_check_alt_mac_addr(hw);
2307
2308 /* Release semaphore */
2309 if (global_device_reset)
2310 hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
2311
2312 return ret_val;
2313 }
2314
2315 /**
2316 * igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
2317 * @data: data received by reading RXPBS register
2318 *
2319 * The 82580 uses a table based approach for packet buffer allocation sizes.
2320 * This function converts the retrieved value into the correct table value
2321 * 0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
2322 * 0x0 36 72 144 1 2 4 8 16
2323 * 0x8 35 70 140 rsv rsv rsv rsv rsv
2324 */
igb_rxpbs_adjust_82580(u32 data)2325 u16 igb_rxpbs_adjust_82580(u32 data)
2326 {
2327 u16 ret_val = 0;
2328
2329 if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
2330 ret_val = e1000_82580_rxpbs_table[data];
2331
2332 return ret_val;
2333 }
2334
2335 /**
2336 * igb_validate_nvm_checksum_with_offset - Validate EEPROM
2337 * checksum
2338 * @hw: pointer to the HW structure
2339 * @offset: offset in words of the checksum protected region
2340 *
2341 * Calculates the EEPROM checksum by reading/adding each word of the EEPROM
2342 * and then verifies that the sum of the EEPROM is equal to 0xBABA.
2343 **/
igb_validate_nvm_checksum_with_offset(struct e1000_hw * hw,u16 offset)2344 static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
2345 u16 offset)
2346 {
2347 s32 ret_val = 0;
2348 u16 checksum = 0;
2349 u16 i, nvm_data;
2350
2351 for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
2352 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2353 if (ret_val) {
2354 hw_dbg("NVM Read Error\n");
2355 goto out;
2356 }
2357 checksum += nvm_data;
2358 }
2359
2360 if (checksum != (u16) NVM_SUM) {
2361 hw_dbg("NVM Checksum Invalid\n");
2362 ret_val = -E1000_ERR_NVM;
2363 goto out;
2364 }
2365
2366 out:
2367 return ret_val;
2368 }
2369
2370 /**
2371 * igb_update_nvm_checksum_with_offset - Update EEPROM
2372 * checksum
2373 * @hw: pointer to the HW structure
2374 * @offset: offset in words of the checksum protected region
2375 *
2376 * Updates the EEPROM checksum by reading/adding each word of the EEPROM
2377 * up to the checksum. Then calculates the EEPROM checksum and writes the
2378 * value to the EEPROM.
2379 **/
igb_update_nvm_checksum_with_offset(struct e1000_hw * hw,u16 offset)2380 static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
2381 {
2382 s32 ret_val;
2383 u16 checksum = 0;
2384 u16 i, nvm_data;
2385
2386 for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
2387 ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
2388 if (ret_val) {
2389 hw_dbg("NVM Read Error while updating checksum.\n");
2390 goto out;
2391 }
2392 checksum += nvm_data;
2393 }
2394 checksum = (u16) NVM_SUM - checksum;
2395 ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
2396 &checksum);
2397 if (ret_val)
2398 hw_dbg("NVM Write Error while updating checksum.\n");
2399
2400 out:
2401 return ret_val;
2402 }
2403
2404 /**
2405 * igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
2406 * @hw: pointer to the HW structure
2407 *
2408 * Calculates the EEPROM section checksum by reading/adding each word of
2409 * the EEPROM and then verifies that the sum of the EEPROM is
2410 * equal to 0xBABA.
2411 **/
igb_validate_nvm_checksum_82580(struct e1000_hw * hw)2412 static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
2413 {
2414 s32 ret_val = 0;
2415 u16 eeprom_regions_count = 1;
2416 u16 j, nvm_data;
2417 u16 nvm_offset;
2418
2419 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2420 if (ret_val) {
2421 hw_dbg("NVM Read Error\n");
2422 goto out;
2423 }
2424
2425 if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
2426 /* if checksums compatibility bit is set validate checksums
2427 * for all 4 ports.
2428 */
2429 eeprom_regions_count = 4;
2430 }
2431
2432 for (j = 0; j < eeprom_regions_count; j++) {
2433 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2434 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2435 nvm_offset);
2436 if (ret_val != 0)
2437 goto out;
2438 }
2439
2440 out:
2441 return ret_val;
2442 }
2443
2444 /**
2445 * igb_update_nvm_checksum_82580 - Update EEPROM checksum
2446 * @hw: pointer to the HW structure
2447 *
2448 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2449 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2450 * checksum and writes the value to the EEPROM.
2451 **/
igb_update_nvm_checksum_82580(struct e1000_hw * hw)2452 static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
2453 {
2454 s32 ret_val;
2455 u16 j, nvm_data;
2456 u16 nvm_offset;
2457
2458 ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
2459 if (ret_val) {
2460 hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
2461 goto out;
2462 }
2463
2464 if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
2465 /* set compatibility bit to validate checksums appropriately */
2466 nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
2467 ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
2468 &nvm_data);
2469 if (ret_val) {
2470 hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
2471 goto out;
2472 }
2473 }
2474
2475 for (j = 0; j < 4; j++) {
2476 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2477 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2478 if (ret_val)
2479 goto out;
2480 }
2481
2482 out:
2483 return ret_val;
2484 }
2485
2486 /**
2487 * igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
2488 * @hw: pointer to the HW structure
2489 *
2490 * Calculates the EEPROM section checksum by reading/adding each word of
2491 * the EEPROM and then verifies that the sum of the EEPROM is
2492 * equal to 0xBABA.
2493 **/
igb_validate_nvm_checksum_i350(struct e1000_hw * hw)2494 static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
2495 {
2496 s32 ret_val = 0;
2497 u16 j;
2498 u16 nvm_offset;
2499
2500 for (j = 0; j < 4; j++) {
2501 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2502 ret_val = igb_validate_nvm_checksum_with_offset(hw,
2503 nvm_offset);
2504 if (ret_val != 0)
2505 goto out;
2506 }
2507
2508 out:
2509 return ret_val;
2510 }
2511
2512 /**
2513 * igb_update_nvm_checksum_i350 - Update EEPROM checksum
2514 * @hw: pointer to the HW structure
2515 *
2516 * Updates the EEPROM section checksums for all 4 ports by reading/adding
2517 * each word of the EEPROM up to the checksum. Then calculates the EEPROM
2518 * checksum and writes the value to the EEPROM.
2519 **/
igb_update_nvm_checksum_i350(struct e1000_hw * hw)2520 static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
2521 {
2522 s32 ret_val = 0;
2523 u16 j;
2524 u16 nvm_offset;
2525
2526 for (j = 0; j < 4; j++) {
2527 nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
2528 ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
2529 if (ret_val != 0)
2530 goto out;
2531 }
2532
2533 out:
2534 return ret_val;
2535 }
2536
2537 /**
2538 * __igb_access_emi_reg - Read/write EMI register
2539 * @hw: pointer to the HW structure
2540 * @addr: EMI address to program
2541 * @data: pointer to value to read/write from/to the EMI address
2542 * @read: boolean flag to indicate read or write
2543 **/
__igb_access_emi_reg(struct e1000_hw * hw,u16 address,u16 * data,bool read)2544 static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
2545 u16 *data, bool read)
2546 {
2547 s32 ret_val = 0;
2548
2549 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
2550 if (ret_val)
2551 return ret_val;
2552
2553 if (read)
2554 ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
2555 else
2556 ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
2557
2558 return ret_val;
2559 }
2560
2561 /**
2562 * igb_read_emi_reg - Read Extended Management Interface register
2563 * @hw: pointer to the HW structure
2564 * @addr: EMI address to program
2565 * @data: value to be read from the EMI address
2566 **/
igb_read_emi_reg(struct e1000_hw * hw,u16 addr,u16 * data)2567 s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
2568 {
2569 return __igb_access_emi_reg(hw, addr, data, true);
2570 }
2571
2572 /**
2573 * igb_set_eee_i350 - Enable/disable EEE support
2574 * @hw: pointer to the HW structure
2575 * @adv1G: boolean flag enabling 1G EEE advertisement
2576 * @adv100m: boolean flag enabling 100M EEE advertisement
2577 *
2578 * Enable/disable EEE based on setting in dev_spec structure.
2579 *
2580 **/
igb_set_eee_i350(struct e1000_hw * hw,bool adv1G,bool adv100M)2581 s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
2582 {
2583 u32 ipcnfg, eeer;
2584
2585 if ((hw->mac.type < e1000_i350) ||
2586 (hw->phy.media_type != e1000_media_type_copper))
2587 goto out;
2588 ipcnfg = rd32(E1000_IPCNFG);
2589 eeer = rd32(E1000_EEER);
2590
2591 /* enable or disable per user setting */
2592 if (!(hw->dev_spec._82575.eee_disable)) {
2593 u32 eee_su = rd32(E1000_EEE_SU);
2594
2595 if (adv100M)
2596 ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
2597 else
2598 ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
2599
2600 if (adv1G)
2601 ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
2602 else
2603 ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
2604
2605 eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
2606 E1000_EEER_LPI_FC);
2607
2608 /* This bit should not be set in normal operation. */
2609 if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
2610 hw_dbg("LPI Clock Stop Bit should not be set!\n");
2611
2612 } else {
2613 ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
2614 E1000_IPCNFG_EEE_100M_AN);
2615 eeer &= ~(E1000_EEER_TX_LPI_EN |
2616 E1000_EEER_RX_LPI_EN |
2617 E1000_EEER_LPI_FC);
2618 }
2619 wr32(E1000_IPCNFG, ipcnfg);
2620 wr32(E1000_EEER, eeer);
2621 rd32(E1000_IPCNFG);
2622 rd32(E1000_EEER);
2623 out:
2624
2625 return 0;
2626 }
2627
2628 /**
2629 * igb_set_eee_i354 - Enable/disable EEE support
2630 * @hw: pointer to the HW structure
2631 * @adv1G: boolean flag enabling 1G EEE advertisement
2632 * @adv100m: boolean flag enabling 100M EEE advertisement
2633 *
2634 * Enable/disable EEE legacy mode based on setting in dev_spec structure.
2635 *
2636 **/
igb_set_eee_i354(struct e1000_hw * hw,bool adv1G,bool adv100M)2637 s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
2638 {
2639 struct e1000_phy_info *phy = &hw->phy;
2640 s32 ret_val = 0;
2641 u16 phy_data;
2642
2643 if ((hw->phy.media_type != e1000_media_type_copper) ||
2644 ((phy->id != M88E1543_E_PHY_ID) &&
2645 (phy->id != M88E1512_E_PHY_ID)))
2646 goto out;
2647
2648 if (!hw->dev_spec._82575.eee_disable) {
2649 /* Switch to PHY page 18. */
2650 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
2651 if (ret_val)
2652 goto out;
2653
2654 ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2655 &phy_data);
2656 if (ret_val)
2657 goto out;
2658
2659 phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
2660 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
2661 phy_data);
2662 if (ret_val)
2663 goto out;
2664
2665 /* Return the PHY to page 0. */
2666 ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
2667 if (ret_val)
2668 goto out;
2669
2670 /* Turn on EEE advertisement. */
2671 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2672 E1000_EEE_ADV_DEV_I354,
2673 &phy_data);
2674 if (ret_val)
2675 goto out;
2676
2677 if (adv100M)
2678 phy_data |= E1000_EEE_ADV_100_SUPPORTED;
2679 else
2680 phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
2681
2682 if (adv1G)
2683 phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
2684 else
2685 phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
2686
2687 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2688 E1000_EEE_ADV_DEV_I354,
2689 phy_data);
2690 } else {
2691 /* Turn off EEE advertisement. */
2692 ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2693 E1000_EEE_ADV_DEV_I354,
2694 &phy_data);
2695 if (ret_val)
2696 goto out;
2697
2698 phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
2699 E1000_EEE_ADV_1000_SUPPORTED);
2700 ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
2701 E1000_EEE_ADV_DEV_I354,
2702 phy_data);
2703 }
2704
2705 out:
2706 return ret_val;
2707 }
2708
2709 /**
2710 * igb_get_eee_status_i354 - Get EEE status
2711 * @hw: pointer to the HW structure
2712 * @status: EEE status
2713 *
2714 * Get EEE status by guessing based on whether Tx or Rx LPI indications have
2715 * been received.
2716 **/
igb_get_eee_status_i354(struct e1000_hw * hw,bool * status)2717 s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
2718 {
2719 struct e1000_phy_info *phy = &hw->phy;
2720 s32 ret_val = 0;
2721 u16 phy_data;
2722
2723 /* Check if EEE is supported on this device. */
2724 if ((hw->phy.media_type != e1000_media_type_copper) ||
2725 ((phy->id != M88E1543_E_PHY_ID) &&
2726 (phy->id != M88E1512_E_PHY_ID)))
2727 goto out;
2728
2729 ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
2730 E1000_PCS_STATUS_DEV_I354,
2731 &phy_data);
2732 if (ret_val)
2733 goto out;
2734
2735 *status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
2736 E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
2737
2738 out:
2739 return ret_val;
2740 }
2741
2742 static const u8 e1000_emc_temp_data[4] = {
2743 E1000_EMC_INTERNAL_DATA,
2744 E1000_EMC_DIODE1_DATA,
2745 E1000_EMC_DIODE2_DATA,
2746 E1000_EMC_DIODE3_DATA
2747 };
2748 static const u8 e1000_emc_therm_limit[4] = {
2749 E1000_EMC_INTERNAL_THERM_LIMIT,
2750 E1000_EMC_DIODE1_THERM_LIMIT,
2751 E1000_EMC_DIODE2_THERM_LIMIT,
2752 E1000_EMC_DIODE3_THERM_LIMIT
2753 };
2754
2755 #ifdef CONFIG_IGB_HWMON
2756 /**
2757 * igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
2758 * @hw: pointer to hardware structure
2759 *
2760 * Updates the temperatures in mac.thermal_sensor_data
2761 **/
igb_get_thermal_sensor_data_generic(struct e1000_hw * hw)2762 static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
2763 {
2764 u16 ets_offset;
2765 u16 ets_cfg;
2766 u16 ets_sensor;
2767 u8 num_sensors;
2768 u8 sensor_index;
2769 u8 sensor_location;
2770 u8 i;
2771 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2772
2773 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2774 return E1000_NOT_IMPLEMENTED;
2775
2776 data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
2777
2778 /* Return the internal sensor only if ETS is unsupported */
2779 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2780 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2781 return 0;
2782
2783 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2784 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2785 != NVM_ETS_TYPE_EMC)
2786 return E1000_NOT_IMPLEMENTED;
2787
2788 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2789 if (num_sensors > E1000_MAX_SENSORS)
2790 num_sensors = E1000_MAX_SENSORS;
2791
2792 for (i = 1; i < num_sensors; i++) {
2793 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2794 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2795 NVM_ETS_DATA_INDEX_SHIFT);
2796 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2797 NVM_ETS_DATA_LOC_SHIFT);
2798
2799 if (sensor_location != 0)
2800 hw->phy.ops.read_i2c_byte(hw,
2801 e1000_emc_temp_data[sensor_index],
2802 E1000_I2C_THERMAL_SENSOR_ADDR,
2803 &data->sensor[i].temp);
2804 }
2805 return 0;
2806 }
2807
2808 /**
2809 * igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
2810 * @hw: pointer to hardware structure
2811 *
2812 * Sets the thermal sensor thresholds according to the NVM map
2813 * and save off the threshold and location values into mac.thermal_sensor_data
2814 **/
igb_init_thermal_sensor_thresh_generic(struct e1000_hw * hw)2815 static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
2816 {
2817 u16 ets_offset;
2818 u16 ets_cfg;
2819 u16 ets_sensor;
2820 u8 low_thresh_delta;
2821 u8 num_sensors;
2822 u8 sensor_index;
2823 u8 sensor_location;
2824 u8 therm_limit;
2825 u8 i;
2826 struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
2827
2828 if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
2829 return E1000_NOT_IMPLEMENTED;
2830
2831 memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
2832
2833 data->sensor[0].location = 0x1;
2834 data->sensor[0].caution_thresh =
2835 (rd32(E1000_THHIGHTC) & 0xFF);
2836 data->sensor[0].max_op_thresh =
2837 (rd32(E1000_THLOWTC) & 0xFF);
2838
2839 /* Return the internal sensor only if ETS is unsupported */
2840 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
2841 if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
2842 return 0;
2843
2844 hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
2845 if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
2846 != NVM_ETS_TYPE_EMC)
2847 return E1000_NOT_IMPLEMENTED;
2848
2849 low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
2850 NVM_ETS_LTHRES_DELTA_SHIFT);
2851 num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
2852
2853 for (i = 1; i <= num_sensors; i++) {
2854 hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
2855 sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
2856 NVM_ETS_DATA_INDEX_SHIFT);
2857 sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
2858 NVM_ETS_DATA_LOC_SHIFT);
2859 therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
2860
2861 hw->phy.ops.write_i2c_byte(hw,
2862 e1000_emc_therm_limit[sensor_index],
2863 E1000_I2C_THERMAL_SENSOR_ADDR,
2864 therm_limit);
2865
2866 if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
2867 data->sensor[i].location = sensor_location;
2868 data->sensor[i].caution_thresh = therm_limit;
2869 data->sensor[i].max_op_thresh = therm_limit -
2870 low_thresh_delta;
2871 }
2872 }
2873 return 0;
2874 }
2875
2876 #endif
2877 static struct e1000_mac_operations e1000_mac_ops_82575 = {
2878 .init_hw = igb_init_hw_82575,
2879 .check_for_link = igb_check_for_link_82575,
2880 .rar_set = igb_rar_set,
2881 .read_mac_addr = igb_read_mac_addr_82575,
2882 .get_speed_and_duplex = igb_get_link_up_info_82575,
2883 #ifdef CONFIG_IGB_HWMON
2884 .get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
2885 .init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
2886 #endif
2887 };
2888
2889 static struct e1000_phy_operations e1000_phy_ops_82575 = {
2890 .acquire = igb_acquire_phy_82575,
2891 .get_cfg_done = igb_get_cfg_done_82575,
2892 .release = igb_release_phy_82575,
2893 .write_i2c_byte = igb_write_i2c_byte,
2894 .read_i2c_byte = igb_read_i2c_byte,
2895 };
2896
2897 static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
2898 .acquire = igb_acquire_nvm_82575,
2899 .read = igb_read_nvm_eerd,
2900 .release = igb_release_nvm_82575,
2901 .write = igb_write_nvm_spi,
2902 };
2903
2904 const struct e1000_info e1000_82575_info = {
2905 .get_invariants = igb_get_invariants_82575,
2906 .mac_ops = &e1000_mac_ops_82575,
2907 .phy_ops = &e1000_phy_ops_82575,
2908 .nvm_ops = &e1000_nvm_ops_82575,
2909 };
2910
2911