1/* Intel PRO/1000 Linux driver
2 * Copyright(c) 1999 - 2014 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * Linux NICS <linux.nics@intel.com>
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
20 */
21
22/* 82562G 10/100 Network Connection
23 * 82562G-2 10/100 Network Connection
24 * 82562GT 10/100 Network Connection
25 * 82562GT-2 10/100 Network Connection
26 * 82562V 10/100 Network Connection
27 * 82562V-2 10/100 Network Connection
28 * 82566DC-2 Gigabit Network Connection
29 * 82566DC Gigabit Network Connection
30 * 82566DM-2 Gigabit Network Connection
31 * 82566DM Gigabit Network Connection
32 * 82566MC Gigabit Network Connection
33 * 82566MM Gigabit Network Connection
34 * 82567LM Gigabit Network Connection
35 * 82567LF Gigabit Network Connection
36 * 82567V Gigabit Network Connection
37 * 82567LM-2 Gigabit Network Connection
38 * 82567LF-2 Gigabit Network Connection
39 * 82567V-2 Gigabit Network Connection
40 * 82567LF-3 Gigabit Network Connection
41 * 82567LM-3 Gigabit Network Connection
42 * 82567LM-4 Gigabit Network Connection
43 * 82577LM Gigabit Network Connection
44 * 82577LC Gigabit Network Connection
45 * 82578DM Gigabit Network Connection
46 * 82578DC Gigabit Network Connection
47 * 82579LM Gigabit Network Connection
48 * 82579V Gigabit Network Connection
49 * Ethernet Connection I217-LM
50 * Ethernet Connection I217-V
51 * Ethernet Connection I218-V
52 * Ethernet Connection I218-LM
53 * Ethernet Connection (2) I218-LM
54 * Ethernet Connection (2) I218-V
55 * Ethernet Connection (3) I218-LM
56 * Ethernet Connection (3) I218-V
57 */
58
59#include "e1000.h"
60
61/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
62/* Offset 04h HSFSTS */
63union ich8_hws_flash_status {
64	struct ich8_hsfsts {
65		u16 flcdone:1;	/* bit 0 Flash Cycle Done */
66		u16 flcerr:1;	/* bit 1 Flash Cycle Error */
67		u16 dael:1;	/* bit 2 Direct Access error Log */
68		u16 berasesz:2;	/* bit 4:3 Sector Erase Size */
69		u16 flcinprog:1;	/* bit 5 flash cycle in Progress */
70		u16 reserved1:2;	/* bit 13:6 Reserved */
71		u16 reserved2:6;	/* bit 13:6 Reserved */
72		u16 fldesvalid:1;	/* bit 14 Flash Descriptor Valid */
73		u16 flockdn:1;	/* bit 15 Flash Config Lock-Down */
74	} hsf_status;
75	u16 regval;
76};
77
78/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
79/* Offset 06h FLCTL */
80union ich8_hws_flash_ctrl {
81	struct ich8_hsflctl {
82		u16 flcgo:1;	/* 0 Flash Cycle Go */
83		u16 flcycle:2;	/* 2:1 Flash Cycle */
84		u16 reserved:5;	/* 7:3 Reserved  */
85		u16 fldbcount:2;	/* 9:8 Flash Data Byte Count */
86		u16 flockdn:6;	/* 15:10 Reserved */
87	} hsf_ctrl;
88	u16 regval;
89};
90
91/* ICH Flash Region Access Permissions */
92union ich8_hws_flash_regacc {
93	struct ich8_flracc {
94		u32 grra:8;	/* 0:7 GbE region Read Access */
95		u32 grwa:8;	/* 8:15 GbE region Write Access */
96		u32 gmrag:8;	/* 23:16 GbE Master Read Access Grant */
97		u32 gmwag:8;	/* 31:24 GbE Master Write Access Grant */
98	} hsf_flregacc;
99	u16 regval;
100};
101
102/* ICH Flash Protected Region */
103union ich8_flash_protected_range {
104	struct ich8_pr {
105		u32 base:13;	/* 0:12 Protected Range Base */
106		u32 reserved1:2;	/* 13:14 Reserved */
107		u32 rpe:1;	/* 15 Read Protection Enable */
108		u32 limit:13;	/* 16:28 Protected Range Limit */
109		u32 reserved2:2;	/* 29:30 Reserved */
110		u32 wpe:1;	/* 31 Write Protection Enable */
111	} range;
112	u32 regval;
113};
114
115static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
116static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
117static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
118static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
119						u32 offset, u8 byte);
120static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
121					 u8 *data);
122static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
123					 u16 *data);
124static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
125					 u8 size, u16 *data);
126static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
127					   u32 *data);
128static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw,
129					  u32 offset, u32 *data);
130static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw,
131					    u32 offset, u32 data);
132static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
133						 u32 offset, u32 dword);
134static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
135static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
136static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
137static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
138static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
139static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
140static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
141static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
142static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
143static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
144static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
145static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
146static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
147static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
148static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
149static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
150static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
151static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
152static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
153static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
154static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
155static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
156static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
157static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state);
158
159static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
160{
161	return readw(hw->flash_address + reg);
162}
163
164static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
165{
166	return readl(hw->flash_address + reg);
167}
168
169static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
170{
171	writew(val, hw->flash_address + reg);
172}
173
174static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
175{
176	writel(val, hw->flash_address + reg);
177}
178
179#define er16flash(reg)		__er16flash(hw, (reg))
180#define er32flash(reg)		__er32flash(hw, (reg))
181#define ew16flash(reg, val)	__ew16flash(hw, (reg), (val))
182#define ew32flash(reg, val)	__ew32flash(hw, (reg), (val))
183
184/**
185 *  e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
186 *  @hw: pointer to the HW structure
187 *
188 *  Test access to the PHY registers by reading the PHY ID registers.  If
189 *  the PHY ID is already known (e.g. resume path) compare it with known ID,
190 *  otherwise assume the read PHY ID is correct if it is valid.
191 *
192 *  Assumes the sw/fw/hw semaphore is already acquired.
193 **/
194static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
195{
196	u16 phy_reg = 0;
197	u32 phy_id = 0;
198	s32 ret_val = 0;
199	u16 retry_count;
200	u32 mac_reg = 0;
201
202	for (retry_count = 0; retry_count < 2; retry_count++) {
203		ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
204		if (ret_val || (phy_reg == 0xFFFF))
205			continue;
206		phy_id = (u32)(phy_reg << 16);
207
208		ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg);
209		if (ret_val || (phy_reg == 0xFFFF)) {
210			phy_id = 0;
211			continue;
212		}
213		phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
214		break;
215	}
216
217	if (hw->phy.id) {
218		if (hw->phy.id == phy_id)
219			goto out;
220	} else if (phy_id) {
221		hw->phy.id = phy_id;
222		hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
223		goto out;
224	}
225
226	/* In case the PHY needs to be in mdio slow mode,
227	 * set slow mode and try to get the PHY id again.
228	 */
229	if (hw->mac.type < e1000_pch_lpt) {
230		hw->phy.ops.release(hw);
231		ret_val = e1000_set_mdio_slow_mode_hv(hw);
232		if (!ret_val)
233			ret_val = e1000e_get_phy_id(hw);
234		hw->phy.ops.acquire(hw);
235	}
236
237	if (ret_val)
238		return false;
239out:
240	if ((hw->mac.type == e1000_pch_lpt) ||
241	    (hw->mac.type == e1000_pch_spt)) {
242		/* Unforce SMBus mode in PHY */
243		e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
244		phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
245		e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
246
247		/* Unforce SMBus mode in MAC */
248		mac_reg = er32(CTRL_EXT);
249		mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
250		ew32(CTRL_EXT, mac_reg);
251	}
252
253	return true;
254}
255
256/**
257 *  e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value
258 *  @hw: pointer to the HW structure
259 *
260 *  Toggling the LANPHYPC pin value fully power-cycles the PHY and is
261 *  used to reset the PHY to a quiescent state when necessary.
262 **/
263static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
264{
265	u32 mac_reg;
266
267	/* Set Phy Config Counter to 50msec */
268	mac_reg = er32(FEXTNVM3);
269	mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
270	mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
271	ew32(FEXTNVM3, mac_reg);
272
273	/* Toggle LANPHYPC Value bit */
274	mac_reg = er32(CTRL);
275	mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
276	mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
277	ew32(CTRL, mac_reg);
278	e1e_flush();
279	usleep_range(10, 20);
280	mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
281	ew32(CTRL, mac_reg);
282	e1e_flush();
283
284	if (hw->mac.type < e1000_pch_lpt) {
285		msleep(50);
286	} else {
287		u16 count = 20;
288
289		do {
290			usleep_range(5000, 10000);
291		} while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--);
292
293		msleep(30);
294	}
295}
296
297/**
298 *  e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
299 *  @hw: pointer to the HW structure
300 *
301 *  Workarounds/flow necessary for PHY initialization during driver load
302 *  and resume paths.
303 **/
304static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
305{
306	struct e1000_adapter *adapter = hw->adapter;
307	u32 mac_reg, fwsm = er32(FWSM);
308	s32 ret_val;
309
310	/* Gate automatic PHY configuration by hardware on managed and
311	 * non-managed 82579 and newer adapters.
312	 */
313	e1000_gate_hw_phy_config_ich8lan(hw, true);
314
315	/* It is not possible to be certain of the current state of ULP
316	 * so forcibly disable it.
317	 */
318	hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
319	e1000_disable_ulp_lpt_lp(hw, true);
320
321	ret_val = hw->phy.ops.acquire(hw);
322	if (ret_val) {
323		e_dbg("Failed to initialize PHY flow\n");
324		goto out;
325	}
326
327	/* The MAC-PHY interconnect may be in SMBus mode.  If the PHY is
328	 * inaccessible and resetting the PHY is not blocked, toggle the
329	 * LANPHYPC Value bit to force the interconnect to PCIe mode.
330	 */
331	switch (hw->mac.type) {
332	case e1000_pch_lpt:
333	case e1000_pch_spt:
334		if (e1000_phy_is_accessible_pchlan(hw))
335			break;
336
337		/* Before toggling LANPHYPC, see if PHY is accessible by
338		 * forcing MAC to SMBus mode first.
339		 */
340		mac_reg = er32(CTRL_EXT);
341		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
342		ew32(CTRL_EXT, mac_reg);
343
344		/* Wait 50 milliseconds for MAC to finish any retries
345		 * that it might be trying to perform from previous
346		 * attempts to acknowledge any phy read requests.
347		 */
348		msleep(50);
349
350		/* fall-through */
351	case e1000_pch2lan:
352		if (e1000_phy_is_accessible_pchlan(hw))
353			break;
354
355		/* fall-through */
356	case e1000_pchlan:
357		if ((hw->mac.type == e1000_pchlan) &&
358		    (fwsm & E1000_ICH_FWSM_FW_VALID))
359			break;
360
361		if (hw->phy.ops.check_reset_block(hw)) {
362			e_dbg("Required LANPHYPC toggle blocked by ME\n");
363			ret_val = -E1000_ERR_PHY;
364			break;
365		}
366
367		/* Toggle LANPHYPC Value bit */
368		e1000_toggle_lanphypc_pch_lpt(hw);
369		if (hw->mac.type >= e1000_pch_lpt) {
370			if (e1000_phy_is_accessible_pchlan(hw))
371				break;
372
373			/* Toggling LANPHYPC brings the PHY out of SMBus mode
374			 * so ensure that the MAC is also out of SMBus mode
375			 */
376			mac_reg = er32(CTRL_EXT);
377			mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
378			ew32(CTRL_EXT, mac_reg);
379
380			if (e1000_phy_is_accessible_pchlan(hw))
381				break;
382
383			ret_val = -E1000_ERR_PHY;
384		}
385		break;
386	default:
387		break;
388	}
389
390	hw->phy.ops.release(hw);
391	if (!ret_val) {
392
393		/* Check to see if able to reset PHY.  Print error if not */
394		if (hw->phy.ops.check_reset_block(hw)) {
395			e_err("Reset blocked by ME\n");
396			goto out;
397		}
398
399		/* Reset the PHY before any access to it.  Doing so, ensures
400		 * that the PHY is in a known good state before we read/write
401		 * PHY registers.  The generic reset is sufficient here,
402		 * because we haven't determined the PHY type yet.
403		 */
404		ret_val = e1000e_phy_hw_reset_generic(hw);
405		if (ret_val)
406			goto out;
407
408		/* On a successful reset, possibly need to wait for the PHY
409		 * to quiesce to an accessible state before returning control
410		 * to the calling function.  If the PHY does not quiesce, then
411		 * return E1000E_BLK_PHY_RESET, as this is the condition that
412		 *  the PHY is in.
413		 */
414		ret_val = hw->phy.ops.check_reset_block(hw);
415		if (ret_val)
416			e_err("ME blocked access to PHY after reset\n");
417	}
418
419out:
420	/* Ungate automatic PHY configuration on non-managed 82579 */
421	if ((hw->mac.type == e1000_pch2lan) &&
422	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
423		usleep_range(10000, 20000);
424		e1000_gate_hw_phy_config_ich8lan(hw, false);
425	}
426
427	return ret_val;
428}
429
430/**
431 *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
432 *  @hw: pointer to the HW structure
433 *
434 *  Initialize family-specific PHY parameters and function pointers.
435 **/
436static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
437{
438	struct e1000_phy_info *phy = &hw->phy;
439	s32 ret_val;
440
441	phy->addr = 1;
442	phy->reset_delay_us = 100;
443
444	phy->ops.set_page = e1000_set_page_igp;
445	phy->ops.read_reg = e1000_read_phy_reg_hv;
446	phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked;
447	phy->ops.read_reg_page = e1000_read_phy_reg_page_hv;
448	phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
449	phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
450	phy->ops.write_reg = e1000_write_phy_reg_hv;
451	phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked;
452	phy->ops.write_reg_page = e1000_write_phy_reg_page_hv;
453	phy->ops.power_up = e1000_power_up_phy_copper;
454	phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
455	phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
456
457	phy->id = e1000_phy_unknown;
458
459	ret_val = e1000_init_phy_workarounds_pchlan(hw);
460	if (ret_val)
461		return ret_val;
462
463	if (phy->id == e1000_phy_unknown)
464		switch (hw->mac.type) {
465		default:
466			ret_val = e1000e_get_phy_id(hw);
467			if (ret_val)
468				return ret_val;
469			if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
470				break;
471			/* fall-through */
472		case e1000_pch2lan:
473		case e1000_pch_lpt:
474		case e1000_pch_spt:
475			/* In case the PHY needs to be in mdio slow mode,
476			 * set slow mode and try to get the PHY id again.
477			 */
478			ret_val = e1000_set_mdio_slow_mode_hv(hw);
479			if (ret_val)
480				return ret_val;
481			ret_val = e1000e_get_phy_id(hw);
482			if (ret_val)
483				return ret_val;
484			break;
485		}
486	phy->type = e1000e_get_phy_type_from_id(phy->id);
487
488	switch (phy->type) {
489	case e1000_phy_82577:
490	case e1000_phy_82579:
491	case e1000_phy_i217:
492		phy->ops.check_polarity = e1000_check_polarity_82577;
493		phy->ops.force_speed_duplex =
494		    e1000_phy_force_speed_duplex_82577;
495		phy->ops.get_cable_length = e1000_get_cable_length_82577;
496		phy->ops.get_info = e1000_get_phy_info_82577;
497		phy->ops.commit = e1000e_phy_sw_reset;
498		break;
499	case e1000_phy_82578:
500		phy->ops.check_polarity = e1000_check_polarity_m88;
501		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
502		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
503		phy->ops.get_info = e1000e_get_phy_info_m88;
504		break;
505	default:
506		ret_val = -E1000_ERR_PHY;
507		break;
508	}
509
510	return ret_val;
511}
512
513/**
514 *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
515 *  @hw: pointer to the HW structure
516 *
517 *  Initialize family-specific PHY parameters and function pointers.
518 **/
519static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
520{
521	struct e1000_phy_info *phy = &hw->phy;
522	s32 ret_val;
523	u16 i = 0;
524
525	phy->addr = 1;
526	phy->reset_delay_us = 100;
527
528	phy->ops.power_up = e1000_power_up_phy_copper;
529	phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
530
531	/* We may need to do this twice - once for IGP and if that fails,
532	 * we'll set BM func pointers and try again
533	 */
534	ret_val = e1000e_determine_phy_address(hw);
535	if (ret_val) {
536		phy->ops.write_reg = e1000e_write_phy_reg_bm;
537		phy->ops.read_reg = e1000e_read_phy_reg_bm;
538		ret_val = e1000e_determine_phy_address(hw);
539		if (ret_val) {
540			e_dbg("Cannot determine PHY addr. Erroring out\n");
541			return ret_val;
542		}
543	}
544
545	phy->id = 0;
546	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
547	       (i++ < 100)) {
548		usleep_range(1000, 2000);
549		ret_val = e1000e_get_phy_id(hw);
550		if (ret_val)
551			return ret_val;
552	}
553
554	/* Verify phy id */
555	switch (phy->id) {
556	case IGP03E1000_E_PHY_ID:
557		phy->type = e1000_phy_igp_3;
558		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
559		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
560		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
561		phy->ops.get_info = e1000e_get_phy_info_igp;
562		phy->ops.check_polarity = e1000_check_polarity_igp;
563		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
564		break;
565	case IFE_E_PHY_ID:
566	case IFE_PLUS_E_PHY_ID:
567	case IFE_C_E_PHY_ID:
568		phy->type = e1000_phy_ife;
569		phy->autoneg_mask = E1000_ALL_NOT_GIG;
570		phy->ops.get_info = e1000_get_phy_info_ife;
571		phy->ops.check_polarity = e1000_check_polarity_ife;
572		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
573		break;
574	case BME1000_E_PHY_ID:
575		phy->type = e1000_phy_bm;
576		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
577		phy->ops.read_reg = e1000e_read_phy_reg_bm;
578		phy->ops.write_reg = e1000e_write_phy_reg_bm;
579		phy->ops.commit = e1000e_phy_sw_reset;
580		phy->ops.get_info = e1000e_get_phy_info_m88;
581		phy->ops.check_polarity = e1000_check_polarity_m88;
582		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
583		break;
584	default:
585		return -E1000_ERR_PHY;
586	}
587
588	return 0;
589}
590
591/**
592 *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
593 *  @hw: pointer to the HW structure
594 *
595 *  Initialize family-specific NVM parameters and function
596 *  pointers.
597 **/
598static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
599{
600	struct e1000_nvm_info *nvm = &hw->nvm;
601	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
602	u32 gfpreg, sector_base_addr, sector_end_addr;
603	u16 i;
604	u32 nvm_size;
605
606	nvm->type = e1000_nvm_flash_sw;
607
608	if (hw->mac.type == e1000_pch_spt) {
609		/* in SPT, gfpreg doesn't exist. NVM size is taken from the
610		 * STRAP register. This is because in SPT the GbE Flash region
611		 * is no longer accessed through the flash registers. Instead,
612		 * the mechanism has changed, and the Flash region access
613		 * registers are now implemented in GbE memory space.
614		 */
615		nvm->flash_base_addr = 0;
616		nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1)
617		    * NVM_SIZE_MULTIPLIER;
618		nvm->flash_bank_size = nvm_size / 2;
619		/* Adjust to word count */
620		nvm->flash_bank_size /= sizeof(u16);
621		/* Set the base address for flash register access */
622		hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR;
623	} else {
624		/* Can't read flash registers if register set isn't mapped. */
625		if (!hw->flash_address) {
626			e_dbg("ERROR: Flash registers not mapped\n");
627			return -E1000_ERR_CONFIG;
628		}
629
630		gfpreg = er32flash(ICH_FLASH_GFPREG);
631
632		/* sector_X_addr is a "sector"-aligned address (4096 bytes)
633		 * Add 1 to sector_end_addr since this sector is included in
634		 * the overall size.
635		 */
636		sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
637		sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
638
639		/* flash_base_addr is byte-aligned */
640		nvm->flash_base_addr = sector_base_addr
641		    << FLASH_SECTOR_ADDR_SHIFT;
642
643		/* find total size of the NVM, then cut in half since the total
644		 * size represents two separate NVM banks.
645		 */
646		nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
647					<< FLASH_SECTOR_ADDR_SHIFT);
648		nvm->flash_bank_size /= 2;
649		/* Adjust to word count */
650		nvm->flash_bank_size /= sizeof(u16);
651	}
652
653	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
654
655	/* Clear shadow ram */
656	for (i = 0; i < nvm->word_size; i++) {
657		dev_spec->shadow_ram[i].modified = false;
658		dev_spec->shadow_ram[i].value = 0xFFFF;
659	}
660
661	return 0;
662}
663
664/**
665 *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
666 *  @hw: pointer to the HW structure
667 *
668 *  Initialize family-specific MAC parameters and function
669 *  pointers.
670 **/
671static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
672{
673	struct e1000_mac_info *mac = &hw->mac;
674
675	/* Set media type function pointer */
676	hw->phy.media_type = e1000_media_type_copper;
677
678	/* Set mta register count */
679	mac->mta_reg_count = 32;
680	/* Set rar entry count */
681	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
682	if (mac->type == e1000_ich8lan)
683		mac->rar_entry_count--;
684	/* FWSM register */
685	mac->has_fwsm = true;
686	/* ARC subsystem not supported */
687	mac->arc_subsystem_valid = false;
688	/* Adaptive IFS supported */
689	mac->adaptive_ifs = true;
690
691	/* LED and other operations */
692	switch (mac->type) {
693	case e1000_ich8lan:
694	case e1000_ich9lan:
695	case e1000_ich10lan:
696		/* check management mode */
697		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
698		/* ID LED init */
699		mac->ops.id_led_init = e1000e_id_led_init_generic;
700		/* blink LED */
701		mac->ops.blink_led = e1000e_blink_led_generic;
702		/* setup LED */
703		mac->ops.setup_led = e1000e_setup_led_generic;
704		/* cleanup LED */
705		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
706		/* turn on/off LED */
707		mac->ops.led_on = e1000_led_on_ich8lan;
708		mac->ops.led_off = e1000_led_off_ich8lan;
709		break;
710	case e1000_pch2lan:
711		mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
712		mac->ops.rar_set = e1000_rar_set_pch2lan;
713		/* fall-through */
714	case e1000_pch_lpt:
715	case e1000_pch_spt:
716	case e1000_pchlan:
717		/* check management mode */
718		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
719		/* ID LED init */
720		mac->ops.id_led_init = e1000_id_led_init_pchlan;
721		/* setup LED */
722		mac->ops.setup_led = e1000_setup_led_pchlan;
723		/* cleanup LED */
724		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
725		/* turn on/off LED */
726		mac->ops.led_on = e1000_led_on_pchlan;
727		mac->ops.led_off = e1000_led_off_pchlan;
728		break;
729	default:
730		break;
731	}
732
733	if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) {
734		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
735		mac->ops.rar_set = e1000_rar_set_pch_lpt;
736		mac->ops.setup_physical_interface =
737		    e1000_setup_copper_link_pch_lpt;
738		mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
739	}
740
741	/* Enable PCS Lock-loss workaround for ICH8 */
742	if (mac->type == e1000_ich8lan)
743		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
744
745	return 0;
746}
747
748/**
749 *  __e1000_access_emi_reg_locked - Read/write EMI register
750 *  @hw: pointer to the HW structure
751 *  @addr: EMI address to program
752 *  @data: pointer to value to read/write from/to the EMI address
753 *  @read: boolean flag to indicate read or write
754 *
755 *  This helper function assumes the SW/FW/HW Semaphore is already acquired.
756 **/
757static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address,
758					 u16 *data, bool read)
759{
760	s32 ret_val;
761
762	ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address);
763	if (ret_val)
764		return ret_val;
765
766	if (read)
767		ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data);
768	else
769		ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data);
770
771	return ret_val;
772}
773
774/**
775 *  e1000_read_emi_reg_locked - Read Extended Management Interface register
776 *  @hw: pointer to the HW structure
777 *  @addr: EMI address to program
778 *  @data: value to be read from the EMI address
779 *
780 *  Assumes the SW/FW/HW Semaphore is already acquired.
781 **/
782s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
783{
784	return __e1000_access_emi_reg_locked(hw, addr, data, true);
785}
786
787/**
788 *  e1000_write_emi_reg_locked - Write Extended Management Interface register
789 *  @hw: pointer to the HW structure
790 *  @addr: EMI address to program
791 *  @data: value to be written to the EMI address
792 *
793 *  Assumes the SW/FW/HW Semaphore is already acquired.
794 **/
795s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
796{
797	return __e1000_access_emi_reg_locked(hw, addr, &data, false);
798}
799
800/**
801 *  e1000_set_eee_pchlan - Enable/disable EEE support
802 *  @hw: pointer to the HW structure
803 *
804 *  Enable/disable EEE based on setting in dev_spec structure, the duplex of
805 *  the link and the EEE capabilities of the link partner.  The LPI Control
806 *  register bits will remain set only if/when link is up.
807 *
808 *  EEE LPI must not be asserted earlier than one second after link is up.
809 *  On 82579, EEE LPI should not be enabled until such time otherwise there
810 *  can be link issues with some switches.  Other devices can have EEE LPI
811 *  enabled immediately upon link up since they have a timer in hardware which
812 *  prevents LPI from being asserted too early.
813 **/
814s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
815{
816	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
817	s32 ret_val;
818	u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
819
820	switch (hw->phy.type) {
821	case e1000_phy_82579:
822		lpa = I82579_EEE_LP_ABILITY;
823		pcs_status = I82579_EEE_PCS_STATUS;
824		adv_addr = I82579_EEE_ADVERTISEMENT;
825		break;
826	case e1000_phy_i217:
827		lpa = I217_EEE_LP_ABILITY;
828		pcs_status = I217_EEE_PCS_STATUS;
829		adv_addr = I217_EEE_ADVERTISEMENT;
830		break;
831	default:
832		return 0;
833	}
834
835	ret_val = hw->phy.ops.acquire(hw);
836	if (ret_val)
837		return ret_val;
838
839	ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl);
840	if (ret_val)
841		goto release;
842
843	/* Clear bits that enable EEE in various speeds */
844	lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK;
845
846	/* Enable EEE if not disabled by user */
847	if (!dev_spec->eee_disable) {
848		/* Save off link partner's EEE ability */
849		ret_val = e1000_read_emi_reg_locked(hw, lpa,
850						    &dev_spec->eee_lp_ability);
851		if (ret_val)
852			goto release;
853
854		/* Read EEE advertisement */
855		ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
856		if (ret_val)
857			goto release;
858
859		/* Enable EEE only for speeds in which the link partner is
860		 * EEE capable and for which we advertise EEE.
861		 */
862		if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
863			lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
864
865		if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
866			e1e_rphy_locked(hw, MII_LPA, &data);
867			if (data & LPA_100FULL)
868				lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
869			else
870				/* EEE is not supported in 100Half, so ignore
871				 * partner's EEE in 100 ability if full-duplex
872				 * is not advertised.
873				 */
874				dev_spec->eee_lp_ability &=
875				    ~I82579_EEE_100_SUPPORTED;
876		}
877	}
878
879	if (hw->phy.type == e1000_phy_82579) {
880		ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
881						    &data);
882		if (ret_val)
883			goto release;
884
885		data &= ~I82579_LPI_100_PLL_SHUT;
886		ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
887						     data);
888	}
889
890	/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
891	ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
892	if (ret_val)
893		goto release;
894
895	ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
896release:
897	hw->phy.ops.release(hw);
898
899	return ret_val;
900}
901
902/**
903 *  e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP
904 *  @hw:   pointer to the HW structure
905 *  @link: link up bool flag
906 *
907 *  When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
908 *  preventing further DMA write requests.  Workaround the issue by disabling
909 *  the de-assertion of the clock request when in 1Gpbs mode.
910 *  Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
911 *  speeds in order to avoid Tx hangs.
912 **/
913static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
914{
915	u32 fextnvm6 = er32(FEXTNVM6);
916	u32 status = er32(STATUS);
917	s32 ret_val = 0;
918	u16 reg;
919
920	if (link && (status & E1000_STATUS_SPEED_1000)) {
921		ret_val = hw->phy.ops.acquire(hw);
922		if (ret_val)
923			return ret_val;
924
925		ret_val =
926		    e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
927						&reg);
928		if (ret_val)
929			goto release;
930
931		ret_val =
932		    e1000e_write_kmrn_reg_locked(hw,
933						 E1000_KMRNCTRLSTA_K1_CONFIG,
934						 reg &
935						 ~E1000_KMRNCTRLSTA_K1_ENABLE);
936		if (ret_val)
937			goto release;
938
939		usleep_range(10, 20);
940
941		ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK);
942
943		ret_val =
944		    e1000e_write_kmrn_reg_locked(hw,
945						 E1000_KMRNCTRLSTA_K1_CONFIG,
946						 reg);
947release:
948		hw->phy.ops.release(hw);
949	} else {
950		/* clear FEXTNVM6 bit 8 on link down or 10/100 */
951		fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
952
953		if ((hw->phy.revision > 5) || !link ||
954		    ((status & E1000_STATUS_SPEED_100) &&
955		     (status & E1000_STATUS_FD)))
956			goto update_fextnvm6;
957
958		ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
959		if (ret_val)
960			return ret_val;
961
962		/* Clear link status transmit timeout */
963		reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
964
965		if (status & E1000_STATUS_SPEED_100) {
966			/* Set inband Tx timeout to 5x10us for 100Half */
967			reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
968
969			/* Do not extend the K1 entry latency for 100Half */
970			fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
971		} else {
972			/* Set inband Tx timeout to 50x10us for 10Full/Half */
973			reg |= 50 <<
974			    I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
975
976			/* Extend the K1 entry latency for 10 Mbps */
977			fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
978		}
979
980		ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
981		if (ret_val)
982			return ret_val;
983
984update_fextnvm6:
985		ew32(FEXTNVM6, fextnvm6);
986	}
987
988	return ret_val;
989}
990
991/**
992 *  e1000_platform_pm_pch_lpt - Set platform power management values
993 *  @hw: pointer to the HW structure
994 *  @link: bool indicating link status
995 *
996 *  Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
997 *  GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
998 *  when link is up (which must not exceed the maximum latency supported
999 *  by the platform), otherwise specify there is no LTR requirement.
1000 *  Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
1001 *  latencies in the LTR Extended Capability Structure in the PCIe Extended
1002 *  Capability register set, on this device LTR is set by writing the
1003 *  equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
1004 *  set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
1005 *  message to the PMC.
1006 **/
1007static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
1008{
1009	u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
1010	    link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
1011	u16 lat_enc = 0;	/* latency encoded */
1012
1013	if (link) {
1014		u16 speed, duplex, scale = 0;
1015		u16 max_snoop, max_nosnoop;
1016		u16 max_ltr_enc;	/* max LTR latency encoded */
1017		s64 lat_ns;	/* latency (ns) */
1018		s64 value;
1019		u32 rxa;
1020
1021		if (!hw->adapter->max_frame_size) {
1022			e_dbg("max_frame_size not set.\n");
1023			return -E1000_ERR_CONFIG;
1024		}
1025
1026		hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
1027		if (!speed) {
1028			e_dbg("Speed not set.\n");
1029			return -E1000_ERR_CONFIG;
1030		}
1031
1032		/* Rx Packet Buffer Allocation size (KB) */
1033		rxa = er32(PBA) & E1000_PBA_RXA_MASK;
1034
1035		/* Determine the maximum latency tolerated by the device.
1036		 *
1037		 * Per the PCIe spec, the tolerated latencies are encoded as
1038		 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
1039		 * a 10-bit value (0-1023) to provide a range from 1 ns to
1040		 * 2^25*(2^10-1) ns.  The scale is encoded as 0=2^0ns,
1041		 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
1042		 */
1043		lat_ns = ((s64)rxa * 1024 -
1044			  (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
1045		if (lat_ns < 0)
1046			lat_ns = 0;
1047		else
1048			do_div(lat_ns, speed);
1049
1050		value = lat_ns;
1051		while (value > PCI_LTR_VALUE_MASK) {
1052			scale++;
1053			value = DIV_ROUND_UP(value, (1 << 5));
1054		}
1055		if (scale > E1000_LTRV_SCALE_MAX) {
1056			e_dbg("Invalid LTR latency scale %d\n", scale);
1057			return -E1000_ERR_CONFIG;
1058		}
1059		lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
1060
1061		/* Determine the maximum latency tolerated by the platform */
1062		pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
1063				     &max_snoop);
1064		pci_read_config_word(hw->adapter->pdev,
1065				     E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
1066		max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
1067
1068		if (lat_enc > max_ltr_enc)
1069			lat_enc = max_ltr_enc;
1070	}
1071
1072	/* Set Snoop and No-Snoop latencies the same */
1073	reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
1074	ew32(LTRV, reg);
1075
1076	return 0;
1077}
1078
1079/**
1080 *  e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
1081 *  @hw: pointer to the HW structure
1082 *  @to_sx: boolean indicating a system power state transition to Sx
1083 *
1084 *  When link is down, configure ULP mode to significantly reduce the power
1085 *  to the PHY.  If on a Manageability Engine (ME) enabled system, tell the
1086 *  ME firmware to start the ULP configuration.  If not on an ME enabled
1087 *  system, configure the ULP mode by software.
1088 */
1089s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
1090{
1091	u32 mac_reg;
1092	s32 ret_val = 0;
1093	u16 phy_reg;
1094
1095	if ((hw->mac.type < e1000_pch_lpt) ||
1096	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1097	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1098	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1099	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1100	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on))
1101		return 0;
1102
1103	if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1104		/* Request ME configure ULP mode in the PHY */
1105		mac_reg = er32(H2ME);
1106		mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS;
1107		ew32(H2ME, mac_reg);
1108
1109		goto out;
1110	}
1111
1112	if (!to_sx) {
1113		int i = 0;
1114
1115		/* Poll up to 5 seconds for Cable Disconnected indication */
1116		while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) {
1117			/* Bail if link is re-acquired */
1118			if (er32(STATUS) & E1000_STATUS_LU)
1119				return -E1000_ERR_PHY;
1120
1121			if (i++ == 100)
1122				break;
1123
1124			msleep(50);
1125		}
1126		e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n",
1127		      (er32(FEXT) &
1128		       E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50);
1129	}
1130
1131	ret_val = hw->phy.ops.acquire(hw);
1132	if (ret_val)
1133		goto out;
1134
1135	/* Si workaround for ULP entry flow on i127/rev6 h/w.  Enable
1136	 * LPLU and disable Gig speed when entering ULP
1137	 */
1138	if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) {
1139		ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS,
1140						       &phy_reg);
1141		if (ret_val)
1142			goto release;
1143		phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS;
1144		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS,
1145							phy_reg);
1146		if (ret_val)
1147			goto release;
1148	}
1149
1150	/* Force SMBus mode in PHY */
1151	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1152	if (ret_val)
1153		goto release;
1154	phy_reg |= CV_SMB_CTRL_FORCE_SMBUS;
1155	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1156
1157	/* Force SMBus mode in MAC */
1158	mac_reg = er32(CTRL_EXT);
1159	mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1160	ew32(CTRL_EXT, mac_reg);
1161
1162	/* Set Inband ULP Exit, Reset to SMBus mode and
1163	 * Disable SMBus Release on PERST# in PHY
1164	 */
1165	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1166	if (ret_val)
1167		goto release;
1168	phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS |
1169		    I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1170	if (to_sx) {
1171		if (er32(WUFC) & E1000_WUFC_LNKC)
1172			phy_reg |= I218_ULP_CONFIG1_WOL_HOST;
1173
1174		phy_reg |= I218_ULP_CONFIG1_STICKY_ULP;
1175	} else {
1176		phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT;
1177	}
1178	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1179
1180	/* Set Disable SMBus Release on PERST# in MAC */
1181	mac_reg = er32(FEXTNVM7);
1182	mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST;
1183	ew32(FEXTNVM7, mac_reg);
1184
1185	/* Commit ULP changes in PHY by starting auto ULP configuration */
1186	phy_reg |= I218_ULP_CONFIG1_START;
1187	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1188release:
1189	hw->phy.ops.release(hw);
1190out:
1191	if (ret_val)
1192		e_dbg("Error in ULP enable flow: %d\n", ret_val);
1193	else
1194		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on;
1195
1196	return ret_val;
1197}
1198
1199/**
1200 *  e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP
1201 *  @hw: pointer to the HW structure
1202 *  @force: boolean indicating whether or not to force disabling ULP
1203 *
1204 *  Un-configure ULP mode when link is up, the system is transitioned from
1205 *  Sx or the driver is unloaded.  If on a Manageability Engine (ME) enabled
1206 *  system, poll for an indication from ME that ULP has been un-configured.
1207 *  If not on an ME enabled system, un-configure the ULP mode by software.
1208 *
1209 *  During nominal operation, this function is called when link is acquired
1210 *  to disable ULP mode (force=false); otherwise, for example when unloading
1211 *  the driver or during Sx->S0 transitions, this is called with force=true
1212 *  to forcibly disable ULP.
1213 */
1214static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
1215{
1216	s32 ret_val = 0;
1217	u32 mac_reg;
1218	u16 phy_reg;
1219	int i = 0;
1220
1221	if ((hw->mac.type < e1000_pch_lpt) ||
1222	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) ||
1223	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) ||
1224	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) ||
1225	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) ||
1226	    (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off))
1227		return 0;
1228
1229	if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
1230		if (force) {
1231			/* Request ME un-configure ULP mode in the PHY */
1232			mac_reg = er32(H2ME);
1233			mac_reg &= ~E1000_H2ME_ULP;
1234			mac_reg |= E1000_H2ME_ENFORCE_SETTINGS;
1235			ew32(H2ME, mac_reg);
1236		}
1237
1238		/* Poll up to 100msec for ME to clear ULP_CFG_DONE */
1239		while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
1240			if (i++ == 10) {
1241				ret_val = -E1000_ERR_PHY;
1242				goto out;
1243			}
1244
1245			usleep_range(10000, 20000);
1246		}
1247		e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
1248
1249		if (force) {
1250			mac_reg = er32(H2ME);
1251			mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS;
1252			ew32(H2ME, mac_reg);
1253		} else {
1254			/* Clear H2ME.ULP after ME ULP configuration */
1255			mac_reg = er32(H2ME);
1256			mac_reg &= ~E1000_H2ME_ULP;
1257			ew32(H2ME, mac_reg);
1258		}
1259
1260		goto out;
1261	}
1262
1263	ret_val = hw->phy.ops.acquire(hw);
1264	if (ret_val)
1265		goto out;
1266
1267	if (force)
1268		/* Toggle LANPHYPC Value bit */
1269		e1000_toggle_lanphypc_pch_lpt(hw);
1270
1271	/* Unforce SMBus mode in PHY */
1272	ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg);
1273	if (ret_val) {
1274		/* The MAC might be in PCIe mode, so temporarily force to
1275		 * SMBus mode in order to access the PHY.
1276		 */
1277		mac_reg = er32(CTRL_EXT);
1278		mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
1279		ew32(CTRL_EXT, mac_reg);
1280
1281		msleep(50);
1282
1283		ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL,
1284						       &phy_reg);
1285		if (ret_val)
1286			goto release;
1287	}
1288	phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
1289	e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg);
1290
1291	/* Unforce SMBus mode in MAC */
1292	mac_reg = er32(CTRL_EXT);
1293	mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
1294	ew32(CTRL_EXT, mac_reg);
1295
1296	/* When ULP mode was previously entered, K1 was disabled by the
1297	 * hardware.  Re-Enable K1 in the PHY when exiting ULP.
1298	 */
1299	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg);
1300	if (ret_val)
1301		goto release;
1302	phy_reg |= HV_PM_CTRL_K1_ENABLE;
1303	e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg);
1304
1305	/* Clear ULP enabled configuration */
1306	ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg);
1307	if (ret_val)
1308		goto release;
1309	phy_reg &= ~(I218_ULP_CONFIG1_IND |
1310		     I218_ULP_CONFIG1_STICKY_ULP |
1311		     I218_ULP_CONFIG1_RESET_TO_SMBUS |
1312		     I218_ULP_CONFIG1_WOL_HOST |
1313		     I218_ULP_CONFIG1_INBAND_EXIT |
1314		     I218_ULP_CONFIG1_DISABLE_SMB_PERST);
1315	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1316
1317	/* Commit ULP changes by starting auto ULP configuration */
1318	phy_reg |= I218_ULP_CONFIG1_START;
1319	e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg);
1320
1321	/* Clear Disable SMBus Release on PERST# in MAC */
1322	mac_reg = er32(FEXTNVM7);
1323	mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST;
1324	ew32(FEXTNVM7, mac_reg);
1325
1326release:
1327	hw->phy.ops.release(hw);
1328	if (force) {
1329		e1000_phy_hw_reset(hw);
1330		msleep(50);
1331	}
1332out:
1333	if (ret_val)
1334		e_dbg("Error in ULP disable flow: %d\n", ret_val);
1335	else
1336		hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off;
1337
1338	return ret_val;
1339}
1340
1341/**
1342 *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
1343 *  @hw: pointer to the HW structure
1344 *
1345 *  Checks to see of the link status of the hardware has changed.  If a
1346 *  change in link status has been detected, then we read the PHY registers
1347 *  to get the current speed/duplex if link exists.
1348 **/
1349static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1350{
1351	struct e1000_mac_info *mac = &hw->mac;
1352	s32 ret_val, tipg_reg = 0;
1353	u16 emi_addr, emi_val = 0;
1354	bool link;
1355	u16 phy_reg;
1356
1357	/* We only want to go out to the PHY registers to see if Auto-Neg
1358	 * has completed and/or if our link status has changed.  The
1359	 * get_link_status flag is set upon receiving a Link Status
1360	 * Change or Rx Sequence Error interrupt.
1361	 */
1362	if (!mac->get_link_status)
1363		return 0;
1364
1365	/* First we want to see if the MII Status Register reports
1366	 * link.  If so, then we want to get the current speed/duplex
1367	 * of the PHY.
1368	 */
1369	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1370	if (ret_val)
1371		return ret_val;
1372
1373	if (hw->mac.type == e1000_pchlan) {
1374		ret_val = e1000_k1_gig_workaround_hv(hw, link);
1375		if (ret_val)
1376			return ret_val;
1377	}
1378
1379	/* When connected at 10Mbps half-duplex, some parts are excessively
1380	 * aggressive resulting in many collisions. To avoid this, increase
1381	 * the IPG and reduce Rx latency in the PHY.
1382	 */
1383	if (((hw->mac.type == e1000_pch2lan) ||
1384	     (hw->mac.type == e1000_pch_lpt) ||
1385	     (hw->mac.type == e1000_pch_spt)) && link) {
1386		u32 reg;
1387
1388		reg = er32(STATUS);
1389		tipg_reg = er32(TIPG);
1390		tipg_reg &= ~E1000_TIPG_IPGT_MASK;
1391
1392		if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
1393			tipg_reg |= 0xFF;
1394			/* Reduce Rx latency in analog PHY */
1395			emi_val = 0;
1396		} else {
1397
1398			/* Roll back the default values */
1399			tipg_reg |= 0x08;
1400			emi_val = 1;
1401		}
1402
1403		ew32(TIPG, tipg_reg);
1404
1405		ret_val = hw->phy.ops.acquire(hw);
1406		if (ret_val)
1407			return ret_val;
1408
1409		if (hw->mac.type == e1000_pch2lan)
1410			emi_addr = I82579_RX_CONFIG;
1411		else
1412			emi_addr = I217_RX_CONFIG;
1413		ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val);
1414
1415		hw->phy.ops.release(hw);
1416
1417		if (ret_val)
1418			return ret_val;
1419	}
1420
1421	/* Work-around I218 hang issue */
1422	if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
1423	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
1424	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
1425	    (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) ||
1426	    (hw->mac.type == e1000_pch_spt)) {
1427		ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1428		if (ret_val)
1429			return ret_val;
1430	}
1431	if ((hw->mac.type == e1000_pch_lpt) ||
1432	    (hw->mac.type == e1000_pch_spt)) {
1433		/* Set platform power management values for
1434		 * Latency Tolerance Reporting (LTR)
1435		 */
1436		ret_val = e1000_platform_pm_pch_lpt(hw, link);
1437		if (ret_val)
1438			return ret_val;
1439	}
1440
1441	/* Clear link partner's EEE ability */
1442	hw->dev_spec.ich8lan.eee_lp_ability = 0;
1443
1444	/* FEXTNVM6 K1-off workaround */
1445	if (hw->mac.type == e1000_pch_spt) {
1446		u32 pcieanacfg = er32(PCIEANACFG);
1447		u32 fextnvm6 = er32(FEXTNVM6);
1448
1449		if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
1450			fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
1451		else
1452			fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
1453
1454		ew32(FEXTNVM6, fextnvm6);
1455	}
1456
1457	if (!link)
1458		return 0;	/* No link detected */
1459
1460	mac->get_link_status = false;
1461
1462	switch (hw->mac.type) {
1463	case e1000_pch2lan:
1464		ret_val = e1000_k1_workaround_lv(hw);
1465		if (ret_val)
1466			return ret_val;
1467		/* fall-thru */
1468	case e1000_pchlan:
1469		if (hw->phy.type == e1000_phy_82578) {
1470			ret_val = e1000_link_stall_workaround_hv(hw);
1471			if (ret_val)
1472				return ret_val;
1473		}
1474
1475		/* Workaround for PCHx parts in half-duplex:
1476		 * Set the number of preambles removed from the packet
1477		 * when it is passed from the PHY to the MAC to prevent
1478		 * the MAC from misinterpreting the packet type.
1479		 */
1480		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
1481		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
1482
1483		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
1484			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
1485
1486		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
1487		break;
1488	default:
1489		break;
1490	}
1491
1492	/* Check if there was DownShift, must be checked
1493	 * immediately after link-up
1494	 */
1495	e1000e_check_downshift(hw);
1496
1497	/* Enable/Disable EEE after link up */
1498	if (hw->phy.type > e1000_phy_82579) {
1499		ret_val = e1000_set_eee_pchlan(hw);
1500		if (ret_val)
1501			return ret_val;
1502	}
1503
1504	/* If we are forcing speed/duplex, then we simply return since
1505	 * we have already determined whether we have link or not.
1506	 */
1507	if (!mac->autoneg)
1508		return -E1000_ERR_CONFIG;
1509
1510	/* Auto-Neg is enabled.  Auto Speed Detection takes care
1511	 * of MAC speed/duplex configuration.  So we only need to
1512	 * configure Collision Distance in the MAC.
1513	 */
1514	mac->ops.config_collision_dist(hw);
1515
1516	/* Configure Flow Control now that Auto-Neg has completed.
1517	 * First, we need to restore the desired flow control
1518	 * settings because we may have had to re-autoneg with a
1519	 * different link partner.
1520	 */
1521	ret_val = e1000e_config_fc_after_link_up(hw);
1522	if (ret_val)
1523		e_dbg("Error configuring flow control\n");
1524
1525	return ret_val;
1526}
1527
1528static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1529{
1530	struct e1000_hw *hw = &adapter->hw;
1531	s32 rc;
1532
1533	rc = e1000_init_mac_params_ich8lan(hw);
1534	if (rc)
1535		return rc;
1536
1537	rc = e1000_init_nvm_params_ich8lan(hw);
1538	if (rc)
1539		return rc;
1540
1541	switch (hw->mac.type) {
1542	case e1000_ich8lan:
1543	case e1000_ich9lan:
1544	case e1000_ich10lan:
1545		rc = e1000_init_phy_params_ich8lan(hw);
1546		break;
1547	case e1000_pchlan:
1548	case e1000_pch2lan:
1549	case e1000_pch_lpt:
1550	case e1000_pch_spt:
1551		rc = e1000_init_phy_params_pchlan(hw);
1552		break;
1553	default:
1554		break;
1555	}
1556	if (rc)
1557		return rc;
1558
1559	/* Disable Jumbo Frame support on parts with Intel 10/100 PHY or
1560	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
1561	 */
1562	if ((adapter->hw.phy.type == e1000_phy_ife) ||
1563	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
1564	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
1565		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
1566		adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
1567
1568		hw->mac.ops.blink_led = NULL;
1569	}
1570
1571	if ((adapter->hw.mac.type == e1000_ich8lan) &&
1572	    (adapter->hw.phy.type != e1000_phy_ife))
1573		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
1574
1575	/* Enable workaround for 82579 w/ ME enabled */
1576	if ((adapter->hw.mac.type == e1000_pch2lan) &&
1577	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1578		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1579
1580	return 0;
1581}
1582
1583static DEFINE_MUTEX(nvm_mutex);
1584
1585/**
1586 *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
1587 *  @hw: pointer to the HW structure
1588 *
1589 *  Acquires the mutex for performing NVM operations.
1590 **/
1591static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1592{
1593	mutex_lock(&nvm_mutex);
1594
1595	return 0;
1596}
1597
1598/**
1599 *  e1000_release_nvm_ich8lan - Release NVM mutex
1600 *  @hw: pointer to the HW structure
1601 *
1602 *  Releases the mutex used while performing NVM operations.
1603 **/
1604static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw)
1605{
1606	mutex_unlock(&nvm_mutex);
1607}
1608
1609/**
1610 *  e1000_acquire_swflag_ich8lan - Acquire software control flag
1611 *  @hw: pointer to the HW structure
1612 *
1613 *  Acquires the software control flag for performing PHY and select
1614 *  MAC CSR accesses.
1615 **/
1616static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
1617{
1618	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
1619	s32 ret_val = 0;
1620
1621	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
1622			     &hw->adapter->state)) {
1623		e_dbg("contention for Phy access\n");
1624		return -E1000_ERR_PHY;
1625	}
1626
1627	while (timeout) {
1628		extcnf_ctrl = er32(EXTCNF_CTRL);
1629		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
1630			break;
1631
1632		mdelay(1);
1633		timeout--;
1634	}
1635
1636	if (!timeout) {
1637		e_dbg("SW has already locked the resource.\n");
1638		ret_val = -E1000_ERR_CONFIG;
1639		goto out;
1640	}
1641
1642	timeout = SW_FLAG_TIMEOUT;
1643
1644	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
1645	ew32(EXTCNF_CTRL, extcnf_ctrl);
1646
1647	while (timeout) {
1648		extcnf_ctrl = er32(EXTCNF_CTRL);
1649		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
1650			break;
1651
1652		mdelay(1);
1653		timeout--;
1654	}
1655
1656	if (!timeout) {
1657		e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
1658		      er32(FWSM), extcnf_ctrl);
1659		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1660		ew32(EXTCNF_CTRL, extcnf_ctrl);
1661		ret_val = -E1000_ERR_CONFIG;
1662		goto out;
1663	}
1664
1665out:
1666	if (ret_val)
1667		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1668
1669	return ret_val;
1670}
1671
1672/**
1673 *  e1000_release_swflag_ich8lan - Release software control flag
1674 *  @hw: pointer to the HW structure
1675 *
1676 *  Releases the software control flag for performing PHY and select
1677 *  MAC CSR accesses.
1678 **/
1679static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
1680{
1681	u32 extcnf_ctrl;
1682
1683	extcnf_ctrl = er32(EXTCNF_CTRL);
1684
1685	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
1686		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
1687		ew32(EXTCNF_CTRL, extcnf_ctrl);
1688	} else {
1689		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
1690	}
1691
1692	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
1693}
1694
1695/**
1696 *  e1000_check_mng_mode_ich8lan - Checks management mode
1697 *  @hw: pointer to the HW structure
1698 *
1699 *  This checks if the adapter has any manageability enabled.
1700 *  This is a function pointer entry point only called by read/write
1701 *  routines for the PHY and NVM parts.
1702 **/
1703static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1704{
1705	u32 fwsm;
1706
1707	fwsm = er32(FWSM);
1708	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1709		((fwsm & E1000_FWSM_MODE_MASK) ==
1710		 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1711}
1712
1713/**
1714 *  e1000_check_mng_mode_pchlan - Checks management mode
1715 *  @hw: pointer to the HW structure
1716 *
1717 *  This checks if the adapter has iAMT enabled.
1718 *  This is a function pointer entry point only called by read/write
1719 *  routines for the PHY and NVM parts.
1720 **/
1721static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1722{
1723	u32 fwsm;
1724
1725	fwsm = er32(FWSM);
1726	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1727	    (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1728}
1729
1730/**
1731 *  e1000_rar_set_pch2lan - Set receive address register
1732 *  @hw: pointer to the HW structure
1733 *  @addr: pointer to the receive address
1734 *  @index: receive address array register
1735 *
1736 *  Sets the receive address array register at index to the address passed
1737 *  in by addr.  For 82579, RAR[0] is the base address register that is to
1738 *  contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1739 *  Use SHRA[0-3] in place of those reserved for ME.
1740 **/
1741static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1742{
1743	u32 rar_low, rar_high;
1744
1745	/* HW expects these in little endian so we reverse the byte order
1746	 * from network order (big endian) to little endian
1747	 */
1748	rar_low = ((u32)addr[0] |
1749		   ((u32)addr[1] << 8) |
1750		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1751
1752	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1753
1754	/* If MAC address zero, no need to set the AV bit */
1755	if (rar_low || rar_high)
1756		rar_high |= E1000_RAH_AV;
1757
1758	if (index == 0) {
1759		ew32(RAL(index), rar_low);
1760		e1e_flush();
1761		ew32(RAH(index), rar_high);
1762		e1e_flush();
1763		return 0;
1764	}
1765
1766	/* RAR[1-6] are owned by manageability.  Skip those and program the
1767	 * next address into the SHRA register array.
1768	 */
1769	if (index < (u32)(hw->mac.rar_entry_count)) {
1770		s32 ret_val;
1771
1772		ret_val = e1000_acquire_swflag_ich8lan(hw);
1773		if (ret_val)
1774			goto out;
1775
1776		ew32(SHRAL(index - 1), rar_low);
1777		e1e_flush();
1778		ew32(SHRAH(index - 1), rar_high);
1779		e1e_flush();
1780
1781		e1000_release_swflag_ich8lan(hw);
1782
1783		/* verify the register updates */
1784		if ((er32(SHRAL(index - 1)) == rar_low) &&
1785		    (er32(SHRAH(index - 1)) == rar_high))
1786			return 0;
1787
1788		e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1789		      (index - 1), er32(FWSM));
1790	}
1791
1792out:
1793	e_dbg("Failed to write receive address at index %d\n", index);
1794	return -E1000_ERR_CONFIG;
1795}
1796
1797/**
1798 *  e1000_rar_get_count_pch_lpt - Get the number of available SHRA
1799 *  @hw: pointer to the HW structure
1800 *
1801 *  Get the number of available receive registers that the Host can
1802 *  program. SHRA[0-10] are the shared receive address registers
1803 *  that are shared between the Host and manageability engine (ME).
1804 *  ME can reserve any number of addresses and the host needs to be
1805 *  able to tell how many available registers it has access to.
1806 **/
1807static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
1808{
1809	u32 wlock_mac;
1810	u32 num_entries;
1811
1812	wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1813	wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1814
1815	switch (wlock_mac) {
1816	case 0:
1817		/* All SHRA[0..10] and RAR[0] available */
1818		num_entries = hw->mac.rar_entry_count;
1819		break;
1820	case 1:
1821		/* Only RAR[0] available */
1822		num_entries = 1;
1823		break;
1824	default:
1825		/* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
1826		num_entries = wlock_mac + 1;
1827		break;
1828	}
1829
1830	return num_entries;
1831}
1832
1833/**
1834 *  e1000_rar_set_pch_lpt - Set receive address registers
1835 *  @hw: pointer to the HW structure
1836 *  @addr: pointer to the receive address
1837 *  @index: receive address array register
1838 *
1839 *  Sets the receive address register array at index to the address passed
1840 *  in by addr. For LPT, RAR[0] is the base address register that is to
1841 *  contain the MAC address. SHRA[0-10] are the shared receive address
1842 *  registers that are shared between the Host and manageability engine (ME).
1843 **/
1844static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1845{
1846	u32 rar_low, rar_high;
1847	u32 wlock_mac;
1848
1849	/* HW expects these in little endian so we reverse the byte order
1850	 * from network order (big endian) to little endian
1851	 */
1852	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1853		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1854
1855	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1856
1857	/* If MAC address zero, no need to set the AV bit */
1858	if (rar_low || rar_high)
1859		rar_high |= E1000_RAH_AV;
1860
1861	if (index == 0) {
1862		ew32(RAL(index), rar_low);
1863		e1e_flush();
1864		ew32(RAH(index), rar_high);
1865		e1e_flush();
1866		return 0;
1867	}
1868
1869	/* The manageability engine (ME) can lock certain SHRAR registers that
1870	 * it is using - those registers are unavailable for use.
1871	 */
1872	if (index < hw->mac.rar_entry_count) {
1873		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1874		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1875
1876		/* Check if all SHRAR registers are locked */
1877		if (wlock_mac == 1)
1878			goto out;
1879
1880		if ((wlock_mac == 0) || (index <= wlock_mac)) {
1881			s32 ret_val;
1882
1883			ret_val = e1000_acquire_swflag_ich8lan(hw);
1884
1885			if (ret_val)
1886				goto out;
1887
1888			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1889			e1e_flush();
1890			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1891			e1e_flush();
1892
1893			e1000_release_swflag_ich8lan(hw);
1894
1895			/* verify the register updates */
1896			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1897			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1898				return 0;
1899		}
1900	}
1901
1902out:
1903	e_dbg("Failed to write receive address at index %d\n", index);
1904	return -E1000_ERR_CONFIG;
1905}
1906
1907/**
1908 *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
1909 *  @hw: pointer to the HW structure
1910 *
1911 *  Checks if firmware is blocking the reset of the PHY.
1912 *  This is a function pointer entry point only called by
1913 *  reset routines.
1914 **/
1915static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
1916{
1917	bool blocked = false;
1918	int i = 0;
1919
1920	while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) &&
1921	       (i++ < 10))
1922		usleep_range(10000, 20000);
1923	return blocked ? E1000_BLK_PHY_RESET : 0;
1924}
1925
1926/**
1927 *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
1928 *  @hw: pointer to the HW structure
1929 *
1930 *  Assumes semaphore already acquired.
1931 *
1932 **/
1933static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1934{
1935	u16 phy_data;
1936	u32 strap = er32(STRAP);
1937	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1938	    E1000_STRAP_SMT_FREQ_SHIFT;
1939	s32 ret_val;
1940
1941	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
1942
1943	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
1944	if (ret_val)
1945		return ret_val;
1946
1947	phy_data &= ~HV_SMB_ADDR_MASK;
1948	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1949	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1950
1951	if (hw->phy.type == e1000_phy_i217) {
1952		/* Restore SMBus frequency */
1953		if (freq--) {
1954			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1955			phy_data |= (freq & (1 << 0)) <<
1956			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
1957			phy_data |= (freq & (1 << 1)) <<
1958			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1959		} else {
1960			e_dbg("Unsupported SMB frequency in PHY\n");
1961		}
1962	}
1963
1964	return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1965}
1966
1967/**
1968 *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
1969 *  @hw:   pointer to the HW structure
1970 *
1971 *  SW should configure the LCD from the NVM extended configuration region
1972 *  as a workaround for certain parts.
1973 **/
1974static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1975{
1976	struct e1000_phy_info *phy = &hw->phy;
1977	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
1978	s32 ret_val = 0;
1979	u16 word_addr, reg_data, reg_addr, phy_page = 0;
1980
1981	/* Initialize the PHY from the NVM on ICH platforms.  This
1982	 * is needed due to an issue where the NVM configuration is
1983	 * not properly autoloaded after power transitions.
1984	 * Therefore, after each PHY reset, we will load the
1985	 * configuration data out of the NVM manually.
1986	 */
1987	switch (hw->mac.type) {
1988	case e1000_ich8lan:
1989		if (phy->type != e1000_phy_igp_3)
1990			return ret_val;
1991
1992		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
1993		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
1994			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
1995			break;
1996		}
1997		/* Fall-thru */
1998	case e1000_pchlan:
1999	case e1000_pch2lan:
2000	case e1000_pch_lpt:
2001	case e1000_pch_spt:
2002		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
2003		break;
2004	default:
2005		return ret_val;
2006	}
2007
2008	ret_val = hw->phy.ops.acquire(hw);
2009	if (ret_val)
2010		return ret_val;
2011
2012	data = er32(FEXTNVM);
2013	if (!(data & sw_cfg_mask))
2014		goto release;
2015
2016	/* Make sure HW does not configure LCD from PHY
2017	 * extended configuration before SW configuration
2018	 */
2019	data = er32(EXTCNF_CTRL);
2020	if ((hw->mac.type < e1000_pch2lan) &&
2021	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
2022		goto release;
2023
2024	cnf_size = er32(EXTCNF_SIZE);
2025	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
2026	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
2027	if (!cnf_size)
2028		goto release;
2029
2030	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
2031	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
2032
2033	if (((hw->mac.type == e1000_pchlan) &&
2034	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
2035	    (hw->mac.type > e1000_pchlan)) {
2036		/* HW configures the SMBus address and LEDs when the
2037		 * OEM and LCD Write Enable bits are set in the NVM.
2038		 * When both NVM bits are cleared, SW will configure
2039		 * them instead.
2040		 */
2041		ret_val = e1000_write_smbus_addr(hw);
2042		if (ret_val)
2043			goto release;
2044
2045		data = er32(LEDCTL);
2046		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
2047							(u16)data);
2048		if (ret_val)
2049			goto release;
2050	}
2051
2052	/* Configure LCD from extended configuration region. */
2053
2054	/* cnf_base_addr is in DWORD */
2055	word_addr = (u16)(cnf_base_addr << 1);
2056
2057	for (i = 0; i < cnf_size; i++) {
2058		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
2059		if (ret_val)
2060			goto release;
2061
2062		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
2063					 1, &reg_addr);
2064		if (ret_val)
2065			goto release;
2066
2067		/* Save off the PHY page for future writes. */
2068		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
2069			phy_page = reg_data;
2070			continue;
2071		}
2072
2073		reg_addr &= PHY_REG_MASK;
2074		reg_addr |= phy_page;
2075
2076		ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
2077		if (ret_val)
2078			goto release;
2079	}
2080
2081release:
2082	hw->phy.ops.release(hw);
2083	return ret_val;
2084}
2085
2086/**
2087 *  e1000_k1_gig_workaround_hv - K1 Si workaround
2088 *  @hw:   pointer to the HW structure
2089 *  @link: link up bool flag
2090 *
2091 *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
2092 *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
2093 *  If link is down, the function will restore the default K1 setting located
2094 *  in the NVM.
2095 **/
2096static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
2097{
2098	s32 ret_val = 0;
2099	u16 status_reg = 0;
2100	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
2101
2102	if (hw->mac.type != e1000_pchlan)
2103		return 0;
2104
2105	/* Wrap the whole flow with the sw flag */
2106	ret_val = hw->phy.ops.acquire(hw);
2107	if (ret_val)
2108		return ret_val;
2109
2110	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
2111	if (link) {
2112		if (hw->phy.type == e1000_phy_82578) {
2113			ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
2114						  &status_reg);
2115			if (ret_val)
2116				goto release;
2117
2118			status_reg &= (BM_CS_STATUS_LINK_UP |
2119				       BM_CS_STATUS_RESOLVED |
2120				       BM_CS_STATUS_SPEED_MASK);
2121
2122			if (status_reg == (BM_CS_STATUS_LINK_UP |
2123					   BM_CS_STATUS_RESOLVED |
2124					   BM_CS_STATUS_SPEED_1000))
2125				k1_enable = false;
2126		}
2127
2128		if (hw->phy.type == e1000_phy_82577) {
2129			ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
2130			if (ret_val)
2131				goto release;
2132
2133			status_reg &= (HV_M_STATUS_LINK_UP |
2134				       HV_M_STATUS_AUTONEG_COMPLETE |
2135				       HV_M_STATUS_SPEED_MASK);
2136
2137			if (status_reg == (HV_M_STATUS_LINK_UP |
2138					   HV_M_STATUS_AUTONEG_COMPLETE |
2139					   HV_M_STATUS_SPEED_1000))
2140				k1_enable = false;
2141		}
2142
2143		/* Link stall fix for link up */
2144		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
2145		if (ret_val)
2146			goto release;
2147
2148	} else {
2149		/* Link stall fix for link down */
2150		ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
2151		if (ret_val)
2152			goto release;
2153	}
2154
2155	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
2156
2157release:
2158	hw->phy.ops.release(hw);
2159
2160	return ret_val;
2161}
2162
2163/**
2164 *  e1000_configure_k1_ich8lan - Configure K1 power state
2165 *  @hw: pointer to the HW structure
2166 *  @enable: K1 state to configure
2167 *
2168 *  Configure the K1 power state based on the provided parameter.
2169 *  Assumes semaphore already acquired.
2170 *
2171 *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
2172 **/
2173s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
2174{
2175	s32 ret_val;
2176	u32 ctrl_reg = 0;
2177	u32 ctrl_ext = 0;
2178	u32 reg = 0;
2179	u16 kmrn_reg = 0;
2180
2181	ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2182					      &kmrn_reg);
2183	if (ret_val)
2184		return ret_val;
2185
2186	if (k1_enable)
2187		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
2188	else
2189		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
2190
2191	ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
2192					       kmrn_reg);
2193	if (ret_val)
2194		return ret_val;
2195
2196	usleep_range(20, 40);
2197	ctrl_ext = er32(CTRL_EXT);
2198	ctrl_reg = er32(CTRL);
2199
2200	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
2201	reg |= E1000_CTRL_FRCSPD;
2202	ew32(CTRL, reg);
2203
2204	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
2205	e1e_flush();
2206	usleep_range(20, 40);
2207	ew32(CTRL, ctrl_reg);
2208	ew32(CTRL_EXT, ctrl_ext);
2209	e1e_flush();
2210	usleep_range(20, 40);
2211
2212	return 0;
2213}
2214
2215/**
2216 *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
2217 *  @hw:       pointer to the HW structure
2218 *  @d0_state: boolean if entering d0 or d3 device state
2219 *
2220 *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
2221 *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
2222 *  in NVM determines whether HW should configure LPLU and Gbe Disable.
2223 **/
2224static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
2225{
2226	s32 ret_val = 0;
2227	u32 mac_reg;
2228	u16 oem_reg;
2229
2230	if (hw->mac.type < e1000_pchlan)
2231		return ret_val;
2232
2233	ret_val = hw->phy.ops.acquire(hw);
2234	if (ret_val)
2235		return ret_val;
2236
2237	if (hw->mac.type == e1000_pchlan) {
2238		mac_reg = er32(EXTCNF_CTRL);
2239		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
2240			goto release;
2241	}
2242
2243	mac_reg = er32(FEXTNVM);
2244	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
2245		goto release;
2246
2247	mac_reg = er32(PHY_CTRL);
2248
2249	ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
2250	if (ret_val)
2251		goto release;
2252
2253	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
2254
2255	if (d0_state) {
2256		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
2257			oem_reg |= HV_OEM_BITS_GBE_DIS;
2258
2259		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
2260			oem_reg |= HV_OEM_BITS_LPLU;
2261	} else {
2262		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
2263			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
2264			oem_reg |= HV_OEM_BITS_GBE_DIS;
2265
2266		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
2267			       E1000_PHY_CTRL_NOND0A_LPLU))
2268			oem_reg |= HV_OEM_BITS_LPLU;
2269	}
2270
2271	/* Set Restart auto-neg to activate the bits */
2272	if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
2273	    !hw->phy.ops.check_reset_block(hw))
2274		oem_reg |= HV_OEM_BITS_RESTART_AN;
2275
2276	ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
2277
2278release:
2279	hw->phy.ops.release(hw);
2280
2281	return ret_val;
2282}
2283
2284/**
2285 *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
2286 *  @hw:   pointer to the HW structure
2287 **/
2288static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
2289{
2290	s32 ret_val;
2291	u16 data;
2292
2293	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
2294	if (ret_val)
2295		return ret_val;
2296
2297	data |= HV_KMRN_MDIO_SLOW;
2298
2299	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
2300
2301	return ret_val;
2302}
2303
2304/**
2305 *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2306 *  done after every PHY reset.
2307 **/
2308static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2309{
2310	s32 ret_val = 0;
2311	u16 phy_data;
2312
2313	if (hw->mac.type != e1000_pchlan)
2314		return 0;
2315
2316	/* Set MDIO slow mode before any other MDIO access */
2317	if (hw->phy.type == e1000_phy_82577) {
2318		ret_val = e1000_set_mdio_slow_mode_hv(hw);
2319		if (ret_val)
2320			return ret_val;
2321	}
2322
2323	if (((hw->phy.type == e1000_phy_82577) &&
2324	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
2325	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
2326		/* Disable generation of early preamble */
2327		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
2328		if (ret_val)
2329			return ret_val;
2330
2331		/* Preamble tuning for SSC */
2332		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
2333		if (ret_val)
2334			return ret_val;
2335	}
2336
2337	if (hw->phy.type == e1000_phy_82578) {
2338		/* Return registers to default by doing a soft reset then
2339		 * writing 0x3140 to the control register.
2340		 */
2341		if (hw->phy.revision < 2) {
2342			e1000e_phy_sw_reset(hw);
2343			ret_val = e1e_wphy(hw, MII_BMCR, 0x3140);
2344		}
2345	}
2346
2347	/* Select page 0 */
2348	ret_val = hw->phy.ops.acquire(hw);
2349	if (ret_val)
2350		return ret_val;
2351
2352	hw->phy.addr = 1;
2353	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
2354	hw->phy.ops.release(hw);
2355	if (ret_val)
2356		return ret_val;
2357
2358	/* Configure the K1 Si workaround during phy reset assuming there is
2359	 * link so that it disables K1 if link is in 1Gbps.
2360	 */
2361	ret_val = e1000_k1_gig_workaround_hv(hw, true);
2362	if (ret_val)
2363		return ret_val;
2364
2365	/* Workaround for link disconnects on a busy hub in half duplex */
2366	ret_val = hw->phy.ops.acquire(hw);
2367	if (ret_val)
2368		return ret_val;
2369	ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
2370	if (ret_val)
2371		goto release;
2372	ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
2373	if (ret_val)
2374		goto release;
2375
2376	/* set MSE higher to enable link to stay up when noise is high */
2377	ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034);
2378release:
2379	hw->phy.ops.release(hw);
2380
2381	return ret_val;
2382}
2383
2384/**
2385 *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
2386 *  @hw:   pointer to the HW structure
2387 **/
2388void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
2389{
2390	u32 mac_reg;
2391	u16 i, phy_reg = 0;
2392	s32 ret_val;
2393
2394	ret_val = hw->phy.ops.acquire(hw);
2395	if (ret_val)
2396		return;
2397	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2398	if (ret_val)
2399		goto release;
2400
2401	/* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */
2402	for (i = 0; i < (hw->mac.rar_entry_count); i++) {
2403		mac_reg = er32(RAL(i));
2404		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
2405					   (u16)(mac_reg & 0xFFFF));
2406		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
2407					   (u16)((mac_reg >> 16) & 0xFFFF));
2408
2409		mac_reg = er32(RAH(i));
2410		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
2411					   (u16)(mac_reg & 0xFFFF));
2412		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
2413					   (u16)((mac_reg & E1000_RAH_AV)
2414						 >> 16));
2415	}
2416
2417	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
2418
2419release:
2420	hw->phy.ops.release(hw);
2421}
2422
2423/**
2424 *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
2425 *  with 82579 PHY
2426 *  @hw: pointer to the HW structure
2427 *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
2428 **/
2429s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
2430{
2431	s32 ret_val = 0;
2432	u16 phy_reg, data;
2433	u32 mac_reg;
2434	u16 i;
2435
2436	if (hw->mac.type < e1000_pch2lan)
2437		return 0;
2438
2439	/* disable Rx path while enabling/disabling workaround */
2440	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
2441	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
2442	if (ret_val)
2443		return ret_val;
2444
2445	if (enable) {
2446		/* Write Rx addresses (rar_entry_count for RAL/H, and
2447		 * SHRAL/H) and initial CRC values to the MAC
2448		 */
2449		for (i = 0; i < hw->mac.rar_entry_count; i++) {
2450			u8 mac_addr[ETH_ALEN] = { 0 };
2451			u32 addr_high, addr_low;
2452
2453			addr_high = er32(RAH(i));
2454			if (!(addr_high & E1000_RAH_AV))
2455				continue;
2456			addr_low = er32(RAL(i));
2457			mac_addr[0] = (addr_low & 0xFF);
2458			mac_addr[1] = ((addr_low >> 8) & 0xFF);
2459			mac_addr[2] = ((addr_low >> 16) & 0xFF);
2460			mac_addr[3] = ((addr_low >> 24) & 0xFF);
2461			mac_addr[4] = (addr_high & 0xFF);
2462			mac_addr[5] = ((addr_high >> 8) & 0xFF);
2463
2464			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
2465		}
2466
2467		/* Write Rx addresses to the PHY */
2468		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
2469
2470		/* Enable jumbo frame workaround in the MAC */
2471		mac_reg = er32(FFLT_DBG);
2472		mac_reg &= ~(1 << 14);
2473		mac_reg |= (7 << 15);
2474		ew32(FFLT_DBG, mac_reg);
2475
2476		mac_reg = er32(RCTL);
2477		mac_reg |= E1000_RCTL_SECRC;
2478		ew32(RCTL, mac_reg);
2479
2480		ret_val = e1000e_read_kmrn_reg(hw,
2481					       E1000_KMRNCTRLSTA_CTRL_OFFSET,
2482					       &data);
2483		if (ret_val)
2484			return ret_val;
2485		ret_val = e1000e_write_kmrn_reg(hw,
2486						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2487						data | (1 << 0));
2488		if (ret_val)
2489			return ret_val;
2490		ret_val = e1000e_read_kmrn_reg(hw,
2491					       E1000_KMRNCTRLSTA_HD_CTRL,
2492					       &data);
2493		if (ret_val)
2494			return ret_val;
2495		data &= ~(0xF << 8);
2496		data |= (0xB << 8);
2497		ret_val = e1000e_write_kmrn_reg(hw,
2498						E1000_KMRNCTRLSTA_HD_CTRL,
2499						data);
2500		if (ret_val)
2501			return ret_val;
2502
2503		/* Enable jumbo frame workaround in the PHY */
2504		e1e_rphy(hw, PHY_REG(769, 23), &data);
2505		data &= ~(0x7F << 5);
2506		data |= (0x37 << 5);
2507		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2508		if (ret_val)
2509			return ret_val;
2510		e1e_rphy(hw, PHY_REG(769, 16), &data);
2511		data &= ~(1 << 13);
2512		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2513		if (ret_val)
2514			return ret_val;
2515		e1e_rphy(hw, PHY_REG(776, 20), &data);
2516		data &= ~(0x3FF << 2);
2517		data |= (E1000_TX_PTR_GAP << 2);
2518		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2519		if (ret_val)
2520			return ret_val;
2521		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
2522		if (ret_val)
2523			return ret_val;
2524		e1e_rphy(hw, HV_PM_CTRL, &data);
2525		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
2526		if (ret_val)
2527			return ret_val;
2528	} else {
2529		/* Write MAC register values back to h/w defaults */
2530		mac_reg = er32(FFLT_DBG);
2531		mac_reg &= ~(0xF << 14);
2532		ew32(FFLT_DBG, mac_reg);
2533
2534		mac_reg = er32(RCTL);
2535		mac_reg &= ~E1000_RCTL_SECRC;
2536		ew32(RCTL, mac_reg);
2537
2538		ret_val = e1000e_read_kmrn_reg(hw,
2539					       E1000_KMRNCTRLSTA_CTRL_OFFSET,
2540					       &data);
2541		if (ret_val)
2542			return ret_val;
2543		ret_val = e1000e_write_kmrn_reg(hw,
2544						E1000_KMRNCTRLSTA_CTRL_OFFSET,
2545						data & ~(1 << 0));
2546		if (ret_val)
2547			return ret_val;
2548		ret_val = e1000e_read_kmrn_reg(hw,
2549					       E1000_KMRNCTRLSTA_HD_CTRL,
2550					       &data);
2551		if (ret_val)
2552			return ret_val;
2553		data &= ~(0xF << 8);
2554		data |= (0xB << 8);
2555		ret_val = e1000e_write_kmrn_reg(hw,
2556						E1000_KMRNCTRLSTA_HD_CTRL,
2557						data);
2558		if (ret_val)
2559			return ret_val;
2560
2561		/* Write PHY register values back to h/w defaults */
2562		e1e_rphy(hw, PHY_REG(769, 23), &data);
2563		data &= ~(0x7F << 5);
2564		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
2565		if (ret_val)
2566			return ret_val;
2567		e1e_rphy(hw, PHY_REG(769, 16), &data);
2568		data |= (1 << 13);
2569		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
2570		if (ret_val)
2571			return ret_val;
2572		e1e_rphy(hw, PHY_REG(776, 20), &data);
2573		data &= ~(0x3FF << 2);
2574		data |= (0x8 << 2);
2575		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
2576		if (ret_val)
2577			return ret_val;
2578		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
2579		if (ret_val)
2580			return ret_val;
2581		e1e_rphy(hw, HV_PM_CTRL, &data);
2582		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
2583		if (ret_val)
2584			return ret_val;
2585	}
2586
2587	/* re-enable Rx path after enabling/disabling workaround */
2588	return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
2589}
2590
2591/**
2592 *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
2593 *  done after every PHY reset.
2594 **/
2595static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
2596{
2597	s32 ret_val = 0;
2598
2599	if (hw->mac.type != e1000_pch2lan)
2600		return 0;
2601
2602	/* Set MDIO slow mode before any other MDIO access */
2603	ret_val = e1000_set_mdio_slow_mode_hv(hw);
2604	if (ret_val)
2605		return ret_val;
2606
2607	ret_val = hw->phy.ops.acquire(hw);
2608	if (ret_val)
2609		return ret_val;
2610	/* set MSE higher to enable link to stay up when noise is high */
2611	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034);
2612	if (ret_val)
2613		goto release;
2614	/* drop link after 5 times MSE threshold was reached */
2615	ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005);
2616release:
2617	hw->phy.ops.release(hw);
2618
2619	return ret_val;
2620}
2621
2622/**
2623 *  e1000_k1_gig_workaround_lv - K1 Si workaround
2624 *  @hw:   pointer to the HW structure
2625 *
2626 *  Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
2627 *  Disable K1 in 1000Mbps and 100Mbps
2628 **/
2629static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
2630{
2631	s32 ret_val = 0;
2632	u16 status_reg = 0;
2633
2634	if (hw->mac.type != e1000_pch2lan)
2635		return 0;
2636
2637	/* Set K1 beacon duration based on 10Mbs speed */
2638	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
2639	if (ret_val)
2640		return ret_val;
2641
2642	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
2643	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
2644		if (status_reg &
2645		    (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
2646			u16 pm_phy_reg;
2647
2648			/* LV 1G/100 Packet drop issue wa  */
2649			ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2650			if (ret_val)
2651				return ret_val;
2652			pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
2653			ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2654			if (ret_val)
2655				return ret_val;
2656		} else {
2657			u32 mac_reg;
2658
2659			mac_reg = er32(FEXTNVM4);
2660			mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
2661			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
2662			ew32(FEXTNVM4, mac_reg);
2663		}
2664	}
2665
2666	return ret_val;
2667}
2668
2669/**
2670 *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
2671 *  @hw:   pointer to the HW structure
2672 *  @gate: boolean set to true to gate, false to ungate
2673 *
2674 *  Gate/ungate the automatic PHY configuration via hardware; perform
2675 *  the configuration via software instead.
2676 **/
2677static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
2678{
2679	u32 extcnf_ctrl;
2680
2681	if (hw->mac.type < e1000_pch2lan)
2682		return;
2683
2684	extcnf_ctrl = er32(EXTCNF_CTRL);
2685
2686	if (gate)
2687		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2688	else
2689		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
2690
2691	ew32(EXTCNF_CTRL, extcnf_ctrl);
2692}
2693
2694/**
2695 *  e1000_lan_init_done_ich8lan - Check for PHY config completion
2696 *  @hw: pointer to the HW structure
2697 *
2698 *  Check the appropriate indication the MAC has finished configuring the
2699 *  PHY after a software reset.
2700 **/
2701static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2702{
2703	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
2704
2705	/* Wait for basic configuration completes before proceeding */
2706	do {
2707		data = er32(STATUS);
2708		data &= E1000_STATUS_LAN_INIT_DONE;
2709		usleep_range(100, 200);
2710	} while ((!data) && --loop);
2711
2712	/* If basic configuration is incomplete before the above loop
2713	 * count reaches 0, loading the configuration from NVM will
2714	 * leave the PHY in a bad state possibly resulting in no link.
2715	 */
2716	if (loop == 0)
2717		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
2718
2719	/* Clear the Init Done bit for the next init event */
2720	data = er32(STATUS);
2721	data &= ~E1000_STATUS_LAN_INIT_DONE;
2722	ew32(STATUS, data);
2723}
2724
2725/**
2726 *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
2727 *  @hw: pointer to the HW structure
2728 **/
2729static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
2730{
2731	s32 ret_val = 0;
2732	u16 reg;
2733
2734	if (hw->phy.ops.check_reset_block(hw))
2735		return 0;
2736
2737	/* Allow time for h/w to get to quiescent state after reset */
2738	usleep_range(10000, 20000);
2739
2740	/* Perform any necessary post-reset workarounds */
2741	switch (hw->mac.type) {
2742	case e1000_pchlan:
2743		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2744		if (ret_val)
2745			return ret_val;
2746		break;
2747	case e1000_pch2lan:
2748		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
2749		if (ret_val)
2750			return ret_val;
2751		break;
2752	default:
2753		break;
2754	}
2755
2756	/* Clear the host wakeup bit after lcd reset */
2757	if (hw->mac.type >= e1000_pchlan) {
2758		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
2759		reg &= ~BM_WUC_HOST_WU_BIT;
2760		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
2761	}
2762
2763	/* Configure the LCD with the extended configuration region in NVM */
2764	ret_val = e1000_sw_lcd_config_ich8lan(hw);
2765	if (ret_val)
2766		return ret_val;
2767
2768	/* Configure the LCD with the OEM bits in NVM */
2769	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2770
2771	if (hw->mac.type == e1000_pch2lan) {
2772		/* Ungate automatic PHY configuration on non-managed 82579 */
2773		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
2774			usleep_range(10000, 20000);
2775			e1000_gate_hw_phy_config_ich8lan(hw, false);
2776		}
2777
2778		/* Set EEE LPI Update Timer to 200usec */
2779		ret_val = hw->phy.ops.acquire(hw);
2780		if (ret_val)
2781			return ret_val;
2782		ret_val = e1000_write_emi_reg_locked(hw,
2783						     I82579_LPI_UPDATE_TIMER,
2784						     0x1387);
2785		hw->phy.ops.release(hw);
2786	}
2787
2788	return ret_val;
2789}
2790
2791/**
2792 *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
2793 *  @hw: pointer to the HW structure
2794 *
2795 *  Resets the PHY
2796 *  This is a function pointer entry point called by drivers
2797 *  or other shared routines.
2798 **/
2799static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
2800{
2801	s32 ret_val = 0;
2802
2803	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
2804	if ((hw->mac.type == e1000_pch2lan) &&
2805	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
2806		e1000_gate_hw_phy_config_ich8lan(hw, true);
2807
2808	ret_val = e1000e_phy_hw_reset_generic(hw);
2809	if (ret_val)
2810		return ret_val;
2811
2812	return e1000_post_phy_reset_ich8lan(hw);
2813}
2814
2815/**
2816 *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
2817 *  @hw: pointer to the HW structure
2818 *  @active: true to enable LPLU, false to disable
2819 *
2820 *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
2821 *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
2822 *  the phy speed. This function will manually set the LPLU bit and restart
2823 *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
2824 *  since it configures the same bit.
2825 **/
2826static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
2827{
2828	s32 ret_val;
2829	u16 oem_reg;
2830
2831	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
2832	if (ret_val)
2833		return ret_val;
2834
2835	if (active)
2836		oem_reg |= HV_OEM_BITS_LPLU;
2837	else
2838		oem_reg &= ~HV_OEM_BITS_LPLU;
2839
2840	if (!hw->phy.ops.check_reset_block(hw))
2841		oem_reg |= HV_OEM_BITS_RESTART_AN;
2842
2843	return e1e_wphy(hw, HV_OEM_BITS, oem_reg);
2844}
2845
2846/**
2847 *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
2848 *  @hw: pointer to the HW structure
2849 *  @active: true to enable LPLU, false to disable
2850 *
2851 *  Sets the LPLU D0 state according to the active flag.  When
2852 *  activating LPLU this function also disables smart speed
2853 *  and vice versa.  LPLU will not be activated unless the
2854 *  device autonegotiation advertisement meets standards of
2855 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2856 *  This is a function pointer entry point only called by
2857 *  PHY setup routines.
2858 **/
2859static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2860{
2861	struct e1000_phy_info *phy = &hw->phy;
2862	u32 phy_ctrl;
2863	s32 ret_val = 0;
2864	u16 data;
2865
2866	if (phy->type == e1000_phy_ife)
2867		return 0;
2868
2869	phy_ctrl = er32(PHY_CTRL);
2870
2871	if (active) {
2872		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
2873		ew32(PHY_CTRL, phy_ctrl);
2874
2875		if (phy->type != e1000_phy_igp_3)
2876			return 0;
2877
2878		/* Call gig speed drop workaround on LPLU before accessing
2879		 * any PHY registers
2880		 */
2881		if (hw->mac.type == e1000_ich8lan)
2882			e1000e_gig_downshift_workaround_ich8lan(hw);
2883
2884		/* When LPLU is enabled, we should disable SmartSpeed */
2885		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
2886		if (ret_val)
2887			return ret_val;
2888		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2889		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
2890		if (ret_val)
2891			return ret_val;
2892	} else {
2893		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
2894		ew32(PHY_CTRL, phy_ctrl);
2895
2896		if (phy->type != e1000_phy_igp_3)
2897			return 0;
2898
2899		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2900		 * during Dx states where the power conservation is most
2901		 * important.  During driver activity we should enable
2902		 * SmartSpeed, so performance is maintained.
2903		 */
2904		if (phy->smart_speed == e1000_smart_speed_on) {
2905			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2906					   &data);
2907			if (ret_val)
2908				return ret_val;
2909
2910			data |= IGP01E1000_PSCFR_SMART_SPEED;
2911			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2912					   data);
2913			if (ret_val)
2914				return ret_val;
2915		} else if (phy->smart_speed == e1000_smart_speed_off) {
2916			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2917					   &data);
2918			if (ret_val)
2919				return ret_val;
2920
2921			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2922			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2923					   data);
2924			if (ret_val)
2925				return ret_val;
2926		}
2927	}
2928
2929	return 0;
2930}
2931
2932/**
2933 *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
2934 *  @hw: pointer to the HW structure
2935 *  @active: true to enable LPLU, false to disable
2936 *
2937 *  Sets the LPLU D3 state according to the active flag.  When
2938 *  activating LPLU this function also disables smart speed
2939 *  and vice versa.  LPLU will not be activated unless the
2940 *  device autonegotiation advertisement meets standards of
2941 *  either 10 or 10/100 or 10/100/1000 at all duplexes.
2942 *  This is a function pointer entry point only called by
2943 *  PHY setup routines.
2944 **/
2945static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
2946{
2947	struct e1000_phy_info *phy = &hw->phy;
2948	u32 phy_ctrl;
2949	s32 ret_val = 0;
2950	u16 data;
2951
2952	phy_ctrl = er32(PHY_CTRL);
2953
2954	if (!active) {
2955		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
2956		ew32(PHY_CTRL, phy_ctrl);
2957
2958		if (phy->type != e1000_phy_igp_3)
2959			return 0;
2960
2961		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
2962		 * during Dx states where the power conservation is most
2963		 * important.  During driver activity we should enable
2964		 * SmartSpeed, so performance is maintained.
2965		 */
2966		if (phy->smart_speed == e1000_smart_speed_on) {
2967			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2968					   &data);
2969			if (ret_val)
2970				return ret_val;
2971
2972			data |= IGP01E1000_PSCFR_SMART_SPEED;
2973			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2974					   data);
2975			if (ret_val)
2976				return ret_val;
2977		} else if (phy->smart_speed == e1000_smart_speed_off) {
2978			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2979					   &data);
2980			if (ret_val)
2981				return ret_val;
2982
2983			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
2984			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
2985					   data);
2986			if (ret_val)
2987				return ret_val;
2988		}
2989	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
2990		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
2991		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
2992		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
2993		ew32(PHY_CTRL, phy_ctrl);
2994
2995		if (phy->type != e1000_phy_igp_3)
2996			return 0;
2997
2998		/* Call gig speed drop workaround on LPLU before accessing
2999		 * any PHY registers
3000		 */
3001		if (hw->mac.type == e1000_ich8lan)
3002			e1000e_gig_downshift_workaround_ich8lan(hw);
3003
3004		/* When LPLU is enabled, we should disable SmartSpeed */
3005		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
3006		if (ret_val)
3007			return ret_val;
3008
3009		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
3010		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
3011	}
3012
3013	return ret_val;
3014}
3015
3016/**
3017 *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
3018 *  @hw: pointer to the HW structure
3019 *  @bank:  pointer to the variable that returns the active bank
3020 *
3021 *  Reads signature byte from the NVM using the flash access registers.
3022 *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
3023 **/
3024static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
3025{
3026	u32 eecd;
3027	struct e1000_nvm_info *nvm = &hw->nvm;
3028	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
3029	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
3030	u8 sig_byte = 0;
3031	s32 ret_val;
3032
3033	switch (hw->mac.type) {
3034		/* In SPT, read from the CTRL_EXT reg instead of
3035		 * accessing the sector valid bits from the nvm
3036		 */
3037	case e1000_pch_spt:
3038		*bank = er32(CTRL_EXT)
3039		    & E1000_CTRL_EXT_NVMVS;
3040		if ((*bank == 0) || (*bank == 1)) {
3041			e_dbg("ERROR: No valid NVM bank present\n");
3042			return -E1000_ERR_NVM;
3043		} else {
3044			*bank = *bank - 2;
3045			return 0;
3046		}
3047		break;
3048	case e1000_ich8lan:
3049	case e1000_ich9lan:
3050		eecd = er32(EECD);
3051		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
3052		    E1000_EECD_SEC1VAL_VALID_MASK) {
3053			if (eecd & E1000_EECD_SEC1VAL)
3054				*bank = 1;
3055			else
3056				*bank = 0;
3057
3058			return 0;
3059		}
3060		e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n");
3061		/* fall-thru */
3062	default:
3063		/* set bank to 0 in case flash read fails */
3064		*bank = 0;
3065
3066		/* Check bank 0 */
3067		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
3068							&sig_byte);
3069		if (ret_val)
3070			return ret_val;
3071		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3072		    E1000_ICH_NVM_SIG_VALUE) {
3073			*bank = 0;
3074			return 0;
3075		}
3076
3077		/* Check bank 1 */
3078		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
3079							bank1_offset,
3080							&sig_byte);
3081		if (ret_val)
3082			return ret_val;
3083		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
3084		    E1000_ICH_NVM_SIG_VALUE) {
3085			*bank = 1;
3086			return 0;
3087		}
3088
3089		e_dbg("ERROR: No valid NVM bank present\n");
3090		return -E1000_ERR_NVM;
3091	}
3092}
3093
3094/**
3095 *  e1000_read_nvm_spt - NVM access for SPT
3096 *  @hw: pointer to the HW structure
3097 *  @offset: The offset (in bytes) of the word(s) to read.
3098 *  @words: Size of data to read in words.
3099 *  @data: pointer to the word(s) to read at offset.
3100 *
3101 *  Reads a word(s) from the NVM
3102 **/
3103static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words,
3104			      u16 *data)
3105{
3106	struct e1000_nvm_info *nvm = &hw->nvm;
3107	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3108	u32 act_offset;
3109	s32 ret_val = 0;
3110	u32 bank = 0;
3111	u32 dword = 0;
3112	u16 offset_to_read;
3113	u16 i;
3114
3115	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3116	    (words == 0)) {
3117		e_dbg("nvm parameter(s) out of bounds\n");
3118		ret_val = -E1000_ERR_NVM;
3119		goto out;
3120	}
3121
3122	nvm->ops.acquire(hw);
3123
3124	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3125	if (ret_val) {
3126		e_dbg("Could not detect valid bank, assuming bank 0\n");
3127		bank = 0;
3128	}
3129
3130	act_offset = (bank) ? nvm->flash_bank_size : 0;
3131	act_offset += offset;
3132
3133	ret_val = 0;
3134
3135	for (i = 0; i < words; i += 2) {
3136		if (words - i == 1) {
3137			if (dev_spec->shadow_ram[offset + i].modified) {
3138				data[i] =
3139				    dev_spec->shadow_ram[offset + i].value;
3140			} else {
3141				offset_to_read = act_offset + i -
3142				    ((act_offset + i) % 2);
3143				ret_val =
3144				  e1000_read_flash_dword_ich8lan(hw,
3145								 offset_to_read,
3146								 &dword);
3147				if (ret_val)
3148					break;
3149				if ((act_offset + i) % 2 == 0)
3150					data[i] = (u16)(dword & 0xFFFF);
3151				else
3152					data[i] = (u16)((dword >> 16) & 0xFFFF);
3153			}
3154		} else {
3155			offset_to_read = act_offset + i;
3156			if (!(dev_spec->shadow_ram[offset + i].modified) ||
3157			    !(dev_spec->shadow_ram[offset + i + 1].modified)) {
3158				ret_val =
3159				  e1000_read_flash_dword_ich8lan(hw,
3160								 offset_to_read,
3161								 &dword);
3162				if (ret_val)
3163					break;
3164			}
3165			if (dev_spec->shadow_ram[offset + i].modified)
3166				data[i] =
3167				    dev_spec->shadow_ram[offset + i].value;
3168			else
3169				data[i] = (u16)(dword & 0xFFFF);
3170			if (dev_spec->shadow_ram[offset + i].modified)
3171				data[i + 1] =
3172				    dev_spec->shadow_ram[offset + i + 1].value;
3173			else
3174				data[i + 1] = (u16)(dword >> 16 & 0xFFFF);
3175		}
3176	}
3177
3178	nvm->ops.release(hw);
3179
3180out:
3181	if (ret_val)
3182		e_dbg("NVM read error: %d\n", ret_val);
3183
3184	return ret_val;
3185}
3186
3187/**
3188 *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
3189 *  @hw: pointer to the HW structure
3190 *  @offset: The offset (in bytes) of the word(s) to read.
3191 *  @words: Size of data to read in words
3192 *  @data: Pointer to the word(s) to read at offset.
3193 *
3194 *  Reads a word(s) from the NVM using the flash access registers.
3195 **/
3196static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3197				  u16 *data)
3198{
3199	struct e1000_nvm_info *nvm = &hw->nvm;
3200	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3201	u32 act_offset;
3202	s32 ret_val = 0;
3203	u32 bank = 0;
3204	u16 i, word;
3205
3206	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3207	    (words == 0)) {
3208		e_dbg("nvm parameter(s) out of bounds\n");
3209		ret_val = -E1000_ERR_NVM;
3210		goto out;
3211	}
3212
3213	nvm->ops.acquire(hw);
3214
3215	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3216	if (ret_val) {
3217		e_dbg("Could not detect valid bank, assuming bank 0\n");
3218		bank = 0;
3219	}
3220
3221	act_offset = (bank) ? nvm->flash_bank_size : 0;
3222	act_offset += offset;
3223
3224	ret_val = 0;
3225	for (i = 0; i < words; i++) {
3226		if (dev_spec->shadow_ram[offset + i].modified) {
3227			data[i] = dev_spec->shadow_ram[offset + i].value;
3228		} else {
3229			ret_val = e1000_read_flash_word_ich8lan(hw,
3230								act_offset + i,
3231								&word);
3232			if (ret_val)
3233				break;
3234			data[i] = word;
3235		}
3236	}
3237
3238	nvm->ops.release(hw);
3239
3240out:
3241	if (ret_val)
3242		e_dbg("NVM read error: %d\n", ret_val);
3243
3244	return ret_val;
3245}
3246
3247/**
3248 *  e1000_flash_cycle_init_ich8lan - Initialize flash
3249 *  @hw: pointer to the HW structure
3250 *
3251 *  This function does initial flash setup so that a new read/write/erase cycle
3252 *  can be started.
3253 **/
3254static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
3255{
3256	union ich8_hws_flash_status hsfsts;
3257	s32 ret_val = -E1000_ERR_NVM;
3258
3259	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3260
3261	/* Check if the flash descriptor is valid */
3262	if (!hsfsts.hsf_status.fldesvalid) {
3263		e_dbg("Flash descriptor invalid.  SW Sequencing must be used.\n");
3264		return -E1000_ERR_NVM;
3265	}
3266
3267	/* Clear FCERR and DAEL in hw status by writing 1 */
3268	hsfsts.hsf_status.flcerr = 1;
3269	hsfsts.hsf_status.dael = 1;
3270	if (hw->mac.type == e1000_pch_spt)
3271		ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3272	else
3273		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3274
3275	/* Either we should have a hardware SPI cycle in progress
3276	 * bit to check against, in order to start a new cycle or
3277	 * FDONE bit should be changed in the hardware so that it
3278	 * is 1 after hardware reset, which can then be used as an
3279	 * indication whether a cycle is in progress or has been
3280	 * completed.
3281	 */
3282
3283	if (!hsfsts.hsf_status.flcinprog) {
3284		/* There is no cycle running at present,
3285		 * so we can start a cycle.
3286		 * Begin by setting Flash Cycle Done.
3287		 */
3288		hsfsts.hsf_status.flcdone = 1;
3289		if (hw->mac.type == e1000_pch_spt)
3290			ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF);
3291		else
3292			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3293		ret_val = 0;
3294	} else {
3295		s32 i;
3296
3297		/* Otherwise poll for sometime so the current
3298		 * cycle has a chance to end before giving up.
3299		 */
3300		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
3301			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3302			if (!hsfsts.hsf_status.flcinprog) {
3303				ret_val = 0;
3304				break;
3305			}
3306			udelay(1);
3307		}
3308		if (!ret_val) {
3309			/* Successful in waiting for previous cycle to timeout,
3310			 * now set the Flash Cycle Done.
3311			 */
3312			hsfsts.hsf_status.flcdone = 1;
3313			if (hw->mac.type == e1000_pch_spt)
3314				ew32flash(ICH_FLASH_HSFSTS,
3315					  hsfsts.regval & 0xFFFF);
3316			else
3317				ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
3318		} else {
3319			e_dbg("Flash controller busy, cannot get access\n");
3320		}
3321	}
3322
3323	return ret_val;
3324}
3325
3326/**
3327 *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
3328 *  @hw: pointer to the HW structure
3329 *  @timeout: maximum time to wait for completion
3330 *
3331 *  This function starts a flash cycle and waits for its completion.
3332 **/
3333static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
3334{
3335	union ich8_hws_flash_ctrl hsflctl;
3336	union ich8_hws_flash_status hsfsts;
3337	u32 i = 0;
3338
3339	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
3340	if (hw->mac.type == e1000_pch_spt)
3341		hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3342	else
3343		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3344	hsflctl.hsf_ctrl.flcgo = 1;
3345
3346	if (hw->mac.type == e1000_pch_spt)
3347		ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
3348	else
3349		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3350
3351	/* wait till FDONE bit is set to 1 */
3352	do {
3353		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3354		if (hsfsts.hsf_status.flcdone)
3355			break;
3356		udelay(1);
3357	} while (i++ < timeout);
3358
3359	if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
3360		return 0;
3361
3362	return -E1000_ERR_NVM;
3363}
3364
3365/**
3366 *  e1000_read_flash_dword_ich8lan - Read dword from flash
3367 *  @hw: pointer to the HW structure
3368 *  @offset: offset to data location
3369 *  @data: pointer to the location for storing the data
3370 *
3371 *  Reads the flash dword at offset into data.  Offset is converted
3372 *  to bytes before read.
3373 **/
3374static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset,
3375					  u32 *data)
3376{
3377	/* Must convert word offset into bytes. */
3378	offset <<= 1;
3379	return e1000_read_flash_data32_ich8lan(hw, offset, data);
3380}
3381
3382/**
3383 *  e1000_read_flash_word_ich8lan - Read word from flash
3384 *  @hw: pointer to the HW structure
3385 *  @offset: offset to data location
3386 *  @data: pointer to the location for storing the data
3387 *
3388 *  Reads the flash word at offset into data.  Offset is converted
3389 *  to bytes before read.
3390 **/
3391static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
3392					 u16 *data)
3393{
3394	/* Must convert offset into bytes. */
3395	offset <<= 1;
3396
3397	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
3398}
3399
3400/**
3401 *  e1000_read_flash_byte_ich8lan - Read byte from flash
3402 *  @hw: pointer to the HW structure
3403 *  @offset: The offset of the byte to read.
3404 *  @data: Pointer to a byte to store the value read.
3405 *
3406 *  Reads a single byte from the NVM using the flash access registers.
3407 **/
3408static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
3409					 u8 *data)
3410{
3411	s32 ret_val;
3412	u16 word = 0;
3413
3414	/* In SPT, only 32 bits access is supported,
3415	 * so this function should not be called.
3416	 */
3417	if (hw->mac.type == e1000_pch_spt)
3418		return -E1000_ERR_NVM;
3419	else
3420		ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
3421
3422	if (ret_val)
3423		return ret_val;
3424
3425	*data = (u8)word;
3426
3427	return 0;
3428}
3429
3430/**
3431 *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
3432 *  @hw: pointer to the HW structure
3433 *  @offset: The offset (in bytes) of the byte or word to read.
3434 *  @size: Size of data to read, 1=byte 2=word
3435 *  @data: Pointer to the word to store the value read.
3436 *
3437 *  Reads a byte or word from the NVM using the flash access registers.
3438 **/
3439static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3440					 u8 size, u16 *data)
3441{
3442	union ich8_hws_flash_status hsfsts;
3443	union ich8_hws_flash_ctrl hsflctl;
3444	u32 flash_linear_addr;
3445	u32 flash_data = 0;
3446	s32 ret_val = -E1000_ERR_NVM;
3447	u8 count = 0;
3448
3449	if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
3450		return -E1000_ERR_NVM;
3451
3452	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3453			     hw->nvm.flash_base_addr);
3454
3455	do {
3456		udelay(1);
3457		/* Steps */
3458		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3459		if (ret_val)
3460			break;
3461
3462		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3463		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3464		hsflctl.hsf_ctrl.fldbcount = size - 1;
3465		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3466		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3467
3468		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3469
3470		ret_val =
3471		    e1000_flash_cycle_ich8lan(hw,
3472					      ICH_FLASH_READ_COMMAND_TIMEOUT);
3473
3474		/* Check if FCERR is set to 1, if set to 1, clear it
3475		 * and try the whole sequence a few more times, else
3476		 * read in (shift in) the Flash Data0, the order is
3477		 * least significant byte first msb to lsb
3478		 */
3479		if (!ret_val) {
3480			flash_data = er32flash(ICH_FLASH_FDATA0);
3481			if (size == 1)
3482				*data = (u8)(flash_data & 0x000000FF);
3483			else if (size == 2)
3484				*data = (u16)(flash_data & 0x0000FFFF);
3485			break;
3486		} else {
3487			/* If we've gotten here, then things are probably
3488			 * completely hosed, but if the error condition is
3489			 * detected, it won't hurt to give it another try...
3490			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3491			 */
3492			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3493			if (hsfsts.hsf_status.flcerr) {
3494				/* Repeat for some time before giving up. */
3495				continue;
3496			} else if (!hsfsts.hsf_status.flcdone) {
3497				e_dbg("Timeout error - flash cycle did not complete.\n");
3498				break;
3499			}
3500		}
3501	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3502
3503	return ret_val;
3504}
3505
3506/**
3507 *  e1000_read_flash_data32_ich8lan - Read dword from NVM
3508 *  @hw: pointer to the HW structure
3509 *  @offset: The offset (in bytes) of the dword to read.
3510 *  @data: Pointer to the dword to store the value read.
3511 *
3512 *  Reads a byte or word from the NVM using the flash access registers.
3513 **/
3514
3515static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
3516					   u32 *data)
3517{
3518	union ich8_hws_flash_status hsfsts;
3519	union ich8_hws_flash_ctrl hsflctl;
3520	u32 flash_linear_addr;
3521	s32 ret_val = -E1000_ERR_NVM;
3522	u8 count = 0;
3523
3524	if (offset > ICH_FLASH_LINEAR_ADDR_MASK ||
3525	    hw->mac.type != e1000_pch_spt)
3526		return -E1000_ERR_NVM;
3527	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3528			     hw->nvm.flash_base_addr);
3529
3530	do {
3531		udelay(1);
3532		/* Steps */
3533		ret_val = e1000_flash_cycle_init_ich8lan(hw);
3534		if (ret_val)
3535			break;
3536		/* In SPT, This register is in Lan memory space, not flash.
3537		 * Therefore, only 32 bit access is supported
3538		 */
3539		hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
3540
3541		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3542		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
3543		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
3544		/* In SPT, This register is in Lan memory space, not flash.
3545		 * Therefore, only 32 bit access is supported
3546		 */
3547		ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16);
3548		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3549
3550		ret_val =
3551		   e1000_flash_cycle_ich8lan(hw,
3552					     ICH_FLASH_READ_COMMAND_TIMEOUT);
3553
3554		/* Check if FCERR is set to 1, if set to 1, clear it
3555		 * and try the whole sequence a few more times, else
3556		 * read in (shift in) the Flash Data0, the order is
3557		 * least significant byte first msb to lsb
3558		 */
3559		if (!ret_val) {
3560			*data = er32flash(ICH_FLASH_FDATA0);
3561			break;
3562		} else {
3563			/* If we've gotten here, then things are probably
3564			 * completely hosed, but if the error condition is
3565			 * detected, it won't hurt to give it another try...
3566			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
3567			 */
3568			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
3569			if (hsfsts.hsf_status.flcerr) {
3570				/* Repeat for some time before giving up. */
3571				continue;
3572			} else if (!hsfsts.hsf_status.flcdone) {
3573				e_dbg("Timeout error - flash cycle did not complete.\n");
3574				break;
3575			}
3576		}
3577	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
3578
3579	return ret_val;
3580}
3581
3582/**
3583 *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
3584 *  @hw: pointer to the HW structure
3585 *  @offset: The offset (in bytes) of the word(s) to write.
3586 *  @words: Size of data to write in words
3587 *  @data: Pointer to the word(s) to write at offset.
3588 *
3589 *  Writes a byte or word to the NVM using the flash access registers.
3590 **/
3591static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
3592				   u16 *data)
3593{
3594	struct e1000_nvm_info *nvm = &hw->nvm;
3595	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3596	u16 i;
3597
3598	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
3599	    (words == 0)) {
3600		e_dbg("nvm parameter(s) out of bounds\n");
3601		return -E1000_ERR_NVM;
3602	}
3603
3604	nvm->ops.acquire(hw);
3605
3606	for (i = 0; i < words; i++) {
3607		dev_spec->shadow_ram[offset + i].modified = true;
3608		dev_spec->shadow_ram[offset + i].value = data[i];
3609	}
3610
3611	nvm->ops.release(hw);
3612
3613	return 0;
3614}
3615
3616/**
3617 *  e1000_update_nvm_checksum_spt - Update the checksum for NVM
3618 *  @hw: pointer to the HW structure
3619 *
3620 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3621 *  which writes the checksum to the shadow ram.  The changes in the shadow
3622 *  ram are then committed to the EEPROM by processing each bank at a time
3623 *  checking for the modified bit and writing only the pending changes.
3624 *  After a successful commit, the shadow ram is cleared and is ready for
3625 *  future writes.
3626 **/
3627static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw)
3628{
3629	struct e1000_nvm_info *nvm = &hw->nvm;
3630	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3631	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3632	s32 ret_val;
3633	u32 dword = 0;
3634
3635	ret_val = e1000e_update_nvm_checksum_generic(hw);
3636	if (ret_val)
3637		goto out;
3638
3639	if (nvm->type != e1000_nvm_flash_sw)
3640		goto out;
3641
3642	nvm->ops.acquire(hw);
3643
3644	/* We're writing to the opposite bank so if we're on bank 1,
3645	 * write to bank 0 etc.  We also need to erase the segment that
3646	 * is going to be written
3647	 */
3648	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3649	if (ret_val) {
3650		e_dbg("Could not detect valid bank, assuming bank 0\n");
3651		bank = 0;
3652	}
3653
3654	if (bank == 0) {
3655		new_bank_offset = nvm->flash_bank_size;
3656		old_bank_offset = 0;
3657		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3658		if (ret_val)
3659			goto release;
3660	} else {
3661		old_bank_offset = nvm->flash_bank_size;
3662		new_bank_offset = 0;
3663		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3664		if (ret_val)
3665			goto release;
3666	}
3667	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) {
3668		/* Determine whether to write the value stored
3669		 * in the other NVM bank or a modified value stored
3670		 * in the shadow RAM
3671		 */
3672		ret_val = e1000_read_flash_dword_ich8lan(hw,
3673							 i + old_bank_offset,
3674							 &dword);
3675
3676		if (dev_spec->shadow_ram[i].modified) {
3677			dword &= 0xffff0000;
3678			dword |= (dev_spec->shadow_ram[i].value & 0xffff);
3679		}
3680		if (dev_spec->shadow_ram[i + 1].modified) {
3681			dword &= 0x0000ffff;
3682			dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff)
3683				  << 16);
3684		}
3685		if (ret_val)
3686			break;
3687
3688		/* If the word is 0x13, then make sure the signature bits
3689		 * (15:14) are 11b until the commit has completed.
3690		 * This will allow us to write 10b which indicates the
3691		 * signature is valid.  We want to do this after the write
3692		 * has completed so that we don't mark the segment valid
3693		 * while the write is still in progress
3694		 */
3695		if (i == E1000_ICH_NVM_SIG_WORD - 1)
3696			dword |= E1000_ICH_NVM_SIG_MASK << 16;
3697
3698		/* Convert offset to bytes. */
3699		act_offset = (i + new_bank_offset) << 1;
3700
3701		usleep_range(100, 200);
3702
3703		/* Write the data to the new bank. Offset in words */
3704		act_offset = i + new_bank_offset;
3705		ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset,
3706								dword);
3707		if (ret_val)
3708			break;
3709	}
3710
3711	/* Don't bother writing the segment valid bits if sector
3712	 * programming failed.
3713	 */
3714	if (ret_val) {
3715		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3716		e_dbg("Flash commit failed.\n");
3717		goto release;
3718	}
3719
3720	/* Finally validate the new segment by setting bit 15:14
3721	 * to 10b in word 0x13 , this can be done without an
3722	 * erase as well since these bits are 11 to start with
3723	 * and we need to change bit 14 to 0b
3724	 */
3725	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3726
3727	/*offset in words but we read dword */
3728	--act_offset;
3729	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3730
3731	if (ret_val)
3732		goto release;
3733
3734	dword &= 0xBFFFFFFF;
3735	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3736
3737	if (ret_val)
3738		goto release;
3739
3740	/* And invalidate the previously valid segment by setting
3741	 * its signature word (0x13) high_byte to 0b. This can be
3742	 * done without an erase because flash erase sets all bits
3743	 * to 1's. We can write 1's to 0's without an erase
3744	 */
3745	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3746
3747	/* offset in words but we read dword */
3748	act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1;
3749	ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword);
3750
3751	if (ret_val)
3752		goto release;
3753
3754	dword &= 0x00FFFFFF;
3755	ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword);
3756
3757	if (ret_val)
3758		goto release;
3759
3760	/* Great!  Everything worked, we can now clear the cached entries. */
3761	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3762		dev_spec->shadow_ram[i].modified = false;
3763		dev_spec->shadow_ram[i].value = 0xFFFF;
3764	}
3765
3766release:
3767	nvm->ops.release(hw);
3768
3769	/* Reload the EEPROM, or else modifications will not appear
3770	 * until after the next adapter reset.
3771	 */
3772	if (!ret_val) {
3773		nvm->ops.reload(hw);
3774		usleep_range(10000, 20000);
3775	}
3776
3777out:
3778	if (ret_val)
3779		e_dbg("NVM update error: %d\n", ret_val);
3780
3781	return ret_val;
3782}
3783
3784/**
3785 *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
3786 *  @hw: pointer to the HW structure
3787 *
3788 *  The NVM checksum is updated by calling the generic update_nvm_checksum,
3789 *  which writes the checksum to the shadow ram.  The changes in the shadow
3790 *  ram are then committed to the EEPROM by processing each bank at a time
3791 *  checking for the modified bit and writing only the pending changes.
3792 *  After a successful commit, the shadow ram is cleared and is ready for
3793 *  future writes.
3794 **/
3795static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
3796{
3797	struct e1000_nvm_info *nvm = &hw->nvm;
3798	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3799	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
3800	s32 ret_val;
3801	u16 data = 0;
3802
3803	ret_val = e1000e_update_nvm_checksum_generic(hw);
3804	if (ret_val)
3805		goto out;
3806
3807	if (nvm->type != e1000_nvm_flash_sw)
3808		goto out;
3809
3810	nvm->ops.acquire(hw);
3811
3812	/* We're writing to the opposite bank so if we're on bank 1,
3813	 * write to bank 0 etc.  We also need to erase the segment that
3814	 * is going to be written
3815	 */
3816	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
3817	if (ret_val) {
3818		e_dbg("Could not detect valid bank, assuming bank 0\n");
3819		bank = 0;
3820	}
3821
3822	if (bank == 0) {
3823		new_bank_offset = nvm->flash_bank_size;
3824		old_bank_offset = 0;
3825		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
3826		if (ret_val)
3827			goto release;
3828	} else {
3829		old_bank_offset = nvm->flash_bank_size;
3830		new_bank_offset = 0;
3831		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
3832		if (ret_val)
3833			goto release;
3834	}
3835	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3836		if (dev_spec->shadow_ram[i].modified) {
3837			data = dev_spec->shadow_ram[i].value;
3838		} else {
3839			ret_val = e1000_read_flash_word_ich8lan(hw, i +
3840								old_bank_offset,
3841								&data);
3842			if (ret_val)
3843				break;
3844		}
3845
3846		/* If the word is 0x13, then make sure the signature bits
3847		 * (15:14) are 11b until the commit has completed.
3848		 * This will allow us to write 10b which indicates the
3849		 * signature is valid.  We want to do this after the write
3850		 * has completed so that we don't mark the segment valid
3851		 * while the write is still in progress
3852		 */
3853		if (i == E1000_ICH_NVM_SIG_WORD)
3854			data |= E1000_ICH_NVM_SIG_MASK;
3855
3856		/* Convert offset to bytes. */
3857		act_offset = (i + new_bank_offset) << 1;
3858
3859		usleep_range(100, 200);
3860		/* Write the bytes to the new bank. */
3861		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3862							       act_offset,
3863							       (u8)data);
3864		if (ret_val)
3865			break;
3866
3867		usleep_range(100, 200);
3868		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3869							       act_offset + 1,
3870							       (u8)(data >> 8));
3871		if (ret_val)
3872			break;
3873	}
3874
3875	/* Don't bother writing the segment valid bits if sector
3876	 * programming failed.
3877	 */
3878	if (ret_val) {
3879		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
3880		e_dbg("Flash commit failed.\n");
3881		goto release;
3882	}
3883
3884	/* Finally validate the new segment by setting bit 15:14
3885	 * to 10b in word 0x13 , this can be done without an
3886	 * erase as well since these bits are 11 to start with
3887	 * and we need to change bit 14 to 0b
3888	 */
3889	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
3890	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
3891	if (ret_val)
3892		goto release;
3893
3894	data &= 0xBFFF;
3895	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
3896						       act_offset * 2 + 1,
3897						       (u8)(data >> 8));
3898	if (ret_val)
3899		goto release;
3900
3901	/* And invalidate the previously valid segment by setting
3902	 * its signature word (0x13) high_byte to 0b. This can be
3903	 * done without an erase because flash erase sets all bits
3904	 * to 1's. We can write 1's to 0's without an erase
3905	 */
3906	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
3907	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
3908	if (ret_val)
3909		goto release;
3910
3911	/* Great!  Everything worked, we can now clear the cached entries. */
3912	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
3913		dev_spec->shadow_ram[i].modified = false;
3914		dev_spec->shadow_ram[i].value = 0xFFFF;
3915	}
3916
3917release:
3918	nvm->ops.release(hw);
3919
3920	/* Reload the EEPROM, or else modifications will not appear
3921	 * until after the next adapter reset.
3922	 */
3923	if (!ret_val) {
3924		nvm->ops.reload(hw);
3925		usleep_range(10000, 20000);
3926	}
3927
3928out:
3929	if (ret_val)
3930		e_dbg("NVM update error: %d\n", ret_val);
3931
3932	return ret_val;
3933}
3934
3935/**
3936 *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
3937 *  @hw: pointer to the HW structure
3938 *
3939 *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
3940 *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
3941 *  calculated, in which case we need to calculate the checksum and set bit 6.
3942 **/
3943static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
3944{
3945	s32 ret_val;
3946	u16 data;
3947	u16 word;
3948	u16 valid_csum_mask;
3949
3950	/* Read NVM and check Invalid Image CSUM bit.  If this bit is 0,
3951	 * the checksum needs to be fixed.  This bit is an indication that
3952	 * the NVM was prepared by OEM software and did not calculate
3953	 * the checksum...a likely scenario.
3954	 */
3955	switch (hw->mac.type) {
3956	case e1000_pch_lpt:
3957	case e1000_pch_spt:
3958		word = NVM_COMPAT;
3959		valid_csum_mask = NVM_COMPAT_VALID_CSUM;
3960		break;
3961	default:
3962		word = NVM_FUTURE_INIT_WORD1;
3963		valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM;
3964		break;
3965	}
3966
3967	ret_val = e1000_read_nvm(hw, word, 1, &data);
3968	if (ret_val)
3969		return ret_val;
3970
3971	if (!(data & valid_csum_mask)) {
3972		data |= valid_csum_mask;
3973		ret_val = e1000_write_nvm(hw, word, 1, &data);
3974		if (ret_val)
3975			return ret_val;
3976		ret_val = e1000e_update_nvm_checksum(hw);
3977		if (ret_val)
3978			return ret_val;
3979	}
3980
3981	return e1000e_validate_nvm_checksum_generic(hw);
3982}
3983
3984/**
3985 *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
3986 *  @hw: pointer to the HW structure
3987 *
3988 *  To prevent malicious write/erase of the NVM, set it to be read-only
3989 *  so that the hardware ignores all write/erase cycles of the NVM via
3990 *  the flash control registers.  The shadow-ram copy of the NVM will
3991 *  still be updated, however any updates to this copy will not stick
3992 *  across driver reloads.
3993 **/
3994void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
3995{
3996	struct e1000_nvm_info *nvm = &hw->nvm;
3997	union ich8_flash_protected_range pr0;
3998	union ich8_hws_flash_status hsfsts;
3999	u32 gfpreg;
4000
4001	nvm->ops.acquire(hw);
4002
4003	gfpreg = er32flash(ICH_FLASH_GFPREG);
4004
4005	/* Write-protect GbE Sector of NVM */
4006	pr0.regval = er32flash(ICH_FLASH_PR0);
4007	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
4008	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
4009	pr0.range.wpe = true;
4010	ew32flash(ICH_FLASH_PR0, pr0.regval);
4011
4012	/* Lock down a subset of GbE Flash Control Registers, e.g.
4013	 * PR0 to prevent the write-protection from being lifted.
4014	 * Once FLOCKDN is set, the registers protected by it cannot
4015	 * be written until FLOCKDN is cleared by a hardware reset.
4016	 */
4017	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4018	hsfsts.hsf_status.flockdn = true;
4019	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
4020
4021	nvm->ops.release(hw);
4022}
4023
4024/**
4025 *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
4026 *  @hw: pointer to the HW structure
4027 *  @offset: The offset (in bytes) of the byte/word to read.
4028 *  @size: Size of data to read, 1=byte 2=word
4029 *  @data: The byte(s) to write to the NVM.
4030 *
4031 *  Writes one/two bytes to the NVM using the flash access registers.
4032 **/
4033static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
4034					  u8 size, u16 data)
4035{
4036	union ich8_hws_flash_status hsfsts;
4037	union ich8_hws_flash_ctrl hsflctl;
4038	u32 flash_linear_addr;
4039	u32 flash_data = 0;
4040	s32 ret_val;
4041	u8 count = 0;
4042
4043	if (hw->mac.type == e1000_pch_spt) {
4044		if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4045			return -E1000_ERR_NVM;
4046	} else {
4047		if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
4048			return -E1000_ERR_NVM;
4049	}
4050
4051	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4052			     hw->nvm.flash_base_addr);
4053
4054	do {
4055		udelay(1);
4056		/* Steps */
4057		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4058		if (ret_val)
4059			break;
4060		/* In SPT, This register is in Lan memory space, not
4061		 * flash.  Therefore, only 32 bit access is supported
4062		 */
4063		if (hw->mac.type == e1000_pch_spt)
4064			hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16;
4065		else
4066			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4067
4068		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
4069		hsflctl.hsf_ctrl.fldbcount = size - 1;
4070		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4071		/* In SPT, This register is in Lan memory space,
4072		 * not flash.  Therefore, only 32 bit access is
4073		 * supported
4074		 */
4075		if (hw->mac.type == e1000_pch_spt)
4076			ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4077		else
4078			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4079
4080		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4081
4082		if (size == 1)
4083			flash_data = (u32)data & 0x00FF;
4084		else
4085			flash_data = (u32)data;
4086
4087		ew32flash(ICH_FLASH_FDATA0, flash_data);
4088
4089		/* check if FCERR is set to 1 , if set to 1, clear it
4090		 * and try the whole sequence a few more times else done
4091		 */
4092		ret_val =
4093		    e1000_flash_cycle_ich8lan(hw,
4094					      ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4095		if (!ret_val)
4096			break;
4097
4098		/* If we're here, then things are most likely
4099		 * completely hosed, but if the error condition
4100		 * is detected, it won't hurt to give it another
4101		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4102		 */
4103		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4104		if (hsfsts.hsf_status.flcerr)
4105			/* Repeat for some time before giving up. */
4106			continue;
4107		if (!hsfsts.hsf_status.flcdone) {
4108			e_dbg("Timeout error - flash cycle did not complete.\n");
4109			break;
4110		}
4111	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4112
4113	return ret_val;
4114}
4115
4116/**
4117*  e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM
4118*  @hw: pointer to the HW structure
4119*  @offset: The offset (in bytes) of the dwords to read.
4120*  @data: The 4 bytes to write to the NVM.
4121*
4122*  Writes one/two/four bytes to the NVM using the flash access registers.
4123**/
4124static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset,
4125					    u32 data)
4126{
4127	union ich8_hws_flash_status hsfsts;
4128	union ich8_hws_flash_ctrl hsflctl;
4129	u32 flash_linear_addr;
4130	s32 ret_val;
4131	u8 count = 0;
4132
4133	if (hw->mac.type == e1000_pch_spt) {
4134		if (offset > ICH_FLASH_LINEAR_ADDR_MASK)
4135			return -E1000_ERR_NVM;
4136	}
4137	flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
4138			     hw->nvm.flash_base_addr);
4139	do {
4140		udelay(1);
4141		/* Steps */
4142		ret_val = e1000_flash_cycle_init_ich8lan(hw);
4143		if (ret_val)
4144			break;
4145
4146		/* In SPT, This register is in Lan memory space, not
4147		 * flash.  Therefore, only 32 bit access is supported
4148		 */
4149		if (hw->mac.type == e1000_pch_spt)
4150			hsflctl.regval = er32flash(ICH_FLASH_HSFSTS)
4151			    >> 16;
4152		else
4153			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4154
4155		hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1;
4156		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
4157
4158		/* In SPT, This register is in Lan memory space,
4159		 * not flash.  Therefore, only 32 bit access is
4160		 * supported
4161		 */
4162		if (hw->mac.type == e1000_pch_spt)
4163			ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16);
4164		else
4165			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4166
4167		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4168
4169		ew32flash(ICH_FLASH_FDATA0, data);
4170
4171		/* check if FCERR is set to 1 , if set to 1, clear it
4172		 * and try the whole sequence a few more times else done
4173		 */
4174		ret_val =
4175		   e1000_flash_cycle_ich8lan(hw,
4176					     ICH_FLASH_WRITE_COMMAND_TIMEOUT);
4177
4178		if (!ret_val)
4179			break;
4180
4181		/* If we're here, then things are most likely
4182		 * completely hosed, but if the error condition
4183		 * is detected, it won't hurt to give it another
4184		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
4185		 */
4186		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4187
4188		if (hsfsts.hsf_status.flcerr)
4189			/* Repeat for some time before giving up. */
4190			continue;
4191		if (!hsfsts.hsf_status.flcdone) {
4192			e_dbg("Timeout error - flash cycle did not complete.\n");
4193			break;
4194		}
4195	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
4196
4197	return ret_val;
4198}
4199
4200/**
4201 *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
4202 *  @hw: pointer to the HW structure
4203 *  @offset: The index of the byte to read.
4204 *  @data: The byte to write to the NVM.
4205 *
4206 *  Writes a single byte to the NVM using the flash access registers.
4207 **/
4208static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
4209					  u8 data)
4210{
4211	u16 word = (u16)data;
4212
4213	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
4214}
4215
4216/**
4217*  e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM
4218*  @hw: pointer to the HW structure
4219*  @offset: The offset of the word to write.
4220*  @dword: The dword to write to the NVM.
4221*
4222*  Writes a single dword to the NVM using the flash access registers.
4223*  Goes through a retry algorithm before giving up.
4224**/
4225static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw,
4226						 u32 offset, u32 dword)
4227{
4228	s32 ret_val;
4229	u16 program_retries;
4230
4231	/* Must convert word offset into bytes. */
4232	offset <<= 1;
4233	ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4234
4235	if (!ret_val)
4236		return ret_val;
4237	for (program_retries = 0; program_retries < 100; program_retries++) {
4238		e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset);
4239		usleep_range(100, 200);
4240		ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword);
4241		if (!ret_val)
4242			break;
4243	}
4244	if (program_retries == 100)
4245		return -E1000_ERR_NVM;
4246
4247	return 0;
4248}
4249
4250/**
4251 *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
4252 *  @hw: pointer to the HW structure
4253 *  @offset: The offset of the byte to write.
4254 *  @byte: The byte to write to the NVM.
4255 *
4256 *  Writes a single byte to the NVM using the flash access registers.
4257 *  Goes through a retry algorithm before giving up.
4258 **/
4259static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
4260						u32 offset, u8 byte)
4261{
4262	s32 ret_val;
4263	u16 program_retries;
4264
4265	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4266	if (!ret_val)
4267		return ret_val;
4268
4269	for (program_retries = 0; program_retries < 100; program_retries++) {
4270		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
4271		usleep_range(100, 200);
4272		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
4273		if (!ret_val)
4274			break;
4275	}
4276	if (program_retries == 100)
4277		return -E1000_ERR_NVM;
4278
4279	return 0;
4280}
4281
4282/**
4283 *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
4284 *  @hw: pointer to the HW structure
4285 *  @bank: 0 for first bank, 1 for second bank, etc.
4286 *
4287 *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
4288 *  bank N is 4096 * N + flash_reg_addr.
4289 **/
4290static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
4291{
4292	struct e1000_nvm_info *nvm = &hw->nvm;
4293	union ich8_hws_flash_status hsfsts;
4294	union ich8_hws_flash_ctrl hsflctl;
4295	u32 flash_linear_addr;
4296	/* bank size is in 16bit words - adjust to bytes */
4297	u32 flash_bank_size = nvm->flash_bank_size * 2;
4298	s32 ret_val;
4299	s32 count = 0;
4300	s32 j, iteration, sector_size;
4301
4302	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4303
4304	/* Determine HW Sector size: Read BERASE bits of hw flash status
4305	 * register
4306	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
4307	 *     consecutive sectors.  The start index for the nth Hw sector
4308	 *     can be calculated as = bank * 4096 + n * 256
4309	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
4310	 *     The start index for the nth Hw sector can be calculated
4311	 *     as = bank * 4096
4312	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
4313	 *     (ich9 only, otherwise error condition)
4314	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
4315	 */
4316	switch (hsfsts.hsf_status.berasesz) {
4317	case 0:
4318		/* Hw sector size 256 */
4319		sector_size = ICH_FLASH_SEG_SIZE_256;
4320		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
4321		break;
4322	case 1:
4323		sector_size = ICH_FLASH_SEG_SIZE_4K;
4324		iteration = 1;
4325		break;
4326	case 2:
4327		sector_size = ICH_FLASH_SEG_SIZE_8K;
4328		iteration = 1;
4329		break;
4330	case 3:
4331		sector_size = ICH_FLASH_SEG_SIZE_64K;
4332		iteration = 1;
4333		break;
4334	default:
4335		return -E1000_ERR_NVM;
4336	}
4337
4338	/* Start with the base address, then add the sector offset. */
4339	flash_linear_addr = hw->nvm.flash_base_addr;
4340	flash_linear_addr += (bank) ? flash_bank_size : 0;
4341
4342	for (j = 0; j < iteration; j++) {
4343		do {
4344			u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
4345
4346			/* Steps */
4347			ret_val = e1000_flash_cycle_init_ich8lan(hw);
4348			if (ret_val)
4349				return ret_val;
4350
4351			/* Write a value 11 (block Erase) in Flash
4352			 * Cycle field in hw flash control
4353			 */
4354			if (hw->mac.type == e1000_pch_spt)
4355				hsflctl.regval =
4356				    er32flash(ICH_FLASH_HSFSTS) >> 16;
4357			else
4358				hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
4359
4360			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
4361			if (hw->mac.type == e1000_pch_spt)
4362				ew32flash(ICH_FLASH_HSFSTS,
4363					  hsflctl.regval << 16);
4364			else
4365				ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
4366
4367			/* Write the last 24 bits of an index within the
4368			 * block into Flash Linear address field in Flash
4369			 * Address.
4370			 */
4371			flash_linear_addr += (j * sector_size);
4372			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
4373
4374			ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
4375			if (!ret_val)
4376				break;
4377
4378			/* Check if FCERR is set to 1.  If 1,
4379			 * clear it and try the whole sequence
4380			 * a few more times else Done
4381			 */
4382			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
4383			if (hsfsts.hsf_status.flcerr)
4384				/* repeat for some time before giving up */
4385				continue;
4386			else if (!hsfsts.hsf_status.flcdone)
4387				return ret_val;
4388		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
4389	}
4390
4391	return 0;
4392}
4393
4394/**
4395 *  e1000_valid_led_default_ich8lan - Set the default LED settings
4396 *  @hw: pointer to the HW structure
4397 *  @data: Pointer to the LED settings
4398 *
4399 *  Reads the LED default settings from the NVM to data.  If the NVM LED
4400 *  settings is all 0's or F's, set the LED default to a valid LED default
4401 *  setting.
4402 **/
4403static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
4404{
4405	s32 ret_val;
4406
4407	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
4408	if (ret_val) {
4409		e_dbg("NVM Read Error\n");
4410		return ret_val;
4411	}
4412
4413	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
4414		*data = ID_LED_DEFAULT_ICH8LAN;
4415
4416	return 0;
4417}
4418
4419/**
4420 *  e1000_id_led_init_pchlan - store LED configurations
4421 *  @hw: pointer to the HW structure
4422 *
4423 *  PCH does not control LEDs via the LEDCTL register, rather it uses
4424 *  the PHY LED configuration register.
4425 *
4426 *  PCH also does not have an "always on" or "always off" mode which
4427 *  complicates the ID feature.  Instead of using the "on" mode to indicate
4428 *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()),
4429 *  use "link_up" mode.  The LEDs will still ID on request if there is no
4430 *  link based on logic in e1000_led_[on|off]_pchlan().
4431 **/
4432static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
4433{
4434	struct e1000_mac_info *mac = &hw->mac;
4435	s32 ret_val;
4436	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
4437	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
4438	u16 data, i, temp, shift;
4439
4440	/* Get default ID LED modes */
4441	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
4442	if (ret_val)
4443		return ret_val;
4444
4445	mac->ledctl_default = er32(LEDCTL);
4446	mac->ledctl_mode1 = mac->ledctl_default;
4447	mac->ledctl_mode2 = mac->ledctl_default;
4448
4449	for (i = 0; i < 4; i++) {
4450		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
4451		shift = (i * 5);
4452		switch (temp) {
4453		case ID_LED_ON1_DEF2:
4454		case ID_LED_ON1_ON2:
4455		case ID_LED_ON1_OFF2:
4456			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4457			mac->ledctl_mode1 |= (ledctl_on << shift);
4458			break;
4459		case ID_LED_OFF1_DEF2:
4460		case ID_LED_OFF1_ON2:
4461		case ID_LED_OFF1_OFF2:
4462			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
4463			mac->ledctl_mode1 |= (ledctl_off << shift);
4464			break;
4465		default:
4466			/* Do nothing */
4467			break;
4468		}
4469		switch (temp) {
4470		case ID_LED_DEF1_ON2:
4471		case ID_LED_ON1_ON2:
4472		case ID_LED_OFF1_ON2:
4473			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4474			mac->ledctl_mode2 |= (ledctl_on << shift);
4475			break;
4476		case ID_LED_DEF1_OFF2:
4477		case ID_LED_ON1_OFF2:
4478		case ID_LED_OFF1_OFF2:
4479			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
4480			mac->ledctl_mode2 |= (ledctl_off << shift);
4481			break;
4482		default:
4483			/* Do nothing */
4484			break;
4485		}
4486	}
4487
4488	return 0;
4489}
4490
4491/**
4492 *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
4493 *  @hw: pointer to the HW structure
4494 *
4495 *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
4496 *  register, so the the bus width is hard coded.
4497 **/
4498static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
4499{
4500	struct e1000_bus_info *bus = &hw->bus;
4501	s32 ret_val;
4502
4503	ret_val = e1000e_get_bus_info_pcie(hw);
4504
4505	/* ICH devices are "PCI Express"-ish.  They have
4506	 * a configuration space, but do not contain
4507	 * PCI Express Capability registers, so bus width
4508	 * must be hardcoded.
4509	 */
4510	if (bus->width == e1000_bus_width_unknown)
4511		bus->width = e1000_bus_width_pcie_x1;
4512
4513	return ret_val;
4514}
4515
4516/**
4517 *  e1000_reset_hw_ich8lan - Reset the hardware
4518 *  @hw: pointer to the HW structure
4519 *
4520 *  Does a full reset of the hardware which includes a reset of the PHY and
4521 *  MAC.
4522 **/
4523static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
4524{
4525	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
4526	u16 kum_cfg;
4527	u32 ctrl, reg;
4528	s32 ret_val;
4529
4530	/* Prevent the PCI-E bus from sticking if there is no TLP connection
4531	 * on the last TLP read/write transaction when MAC is reset.
4532	 */
4533	ret_val = e1000e_disable_pcie_master(hw);
4534	if (ret_val)
4535		e_dbg("PCI-E Master disable polling has failed.\n");
4536
4537	e_dbg("Masking off all interrupts\n");
4538	ew32(IMC, 0xffffffff);
4539
4540	/* Disable the Transmit and Receive units.  Then delay to allow
4541	 * any pending transactions to complete before we hit the MAC
4542	 * with the global reset.
4543	 */
4544	ew32(RCTL, 0);
4545	ew32(TCTL, E1000_TCTL_PSP);
4546	e1e_flush();
4547
4548	usleep_range(10000, 20000);
4549
4550	/* Workaround for ICH8 bit corruption issue in FIFO memory */
4551	if (hw->mac.type == e1000_ich8lan) {
4552		/* Set Tx and Rx buffer allocation to 8k apiece. */
4553		ew32(PBA, E1000_PBA_8K);
4554		/* Set Packet Buffer Size to 16k. */
4555		ew32(PBS, E1000_PBS_16K);
4556	}
4557
4558	if (hw->mac.type == e1000_pchlan) {
4559		/* Save the NVM K1 bit setting */
4560		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
4561		if (ret_val)
4562			return ret_val;
4563
4564		if (kum_cfg & E1000_NVM_K1_ENABLE)
4565			dev_spec->nvm_k1_enabled = true;
4566		else
4567			dev_spec->nvm_k1_enabled = false;
4568	}
4569
4570	ctrl = er32(CTRL);
4571
4572	if (!hw->phy.ops.check_reset_block(hw)) {
4573		/* Full-chip reset requires MAC and PHY reset at the same
4574		 * time to make sure the interface between MAC and the
4575		 * external PHY is reset.
4576		 */
4577		ctrl |= E1000_CTRL_PHY_RST;
4578
4579		/* Gate automatic PHY configuration by hardware on
4580		 * non-managed 82579
4581		 */
4582		if ((hw->mac.type == e1000_pch2lan) &&
4583		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
4584			e1000_gate_hw_phy_config_ich8lan(hw, true);
4585	}
4586	ret_val = e1000_acquire_swflag_ich8lan(hw);
4587	e_dbg("Issuing a global reset to ich8lan\n");
4588	ew32(CTRL, (ctrl | E1000_CTRL_RST));
4589	/* cannot issue a flush here because it hangs the hardware */
4590	msleep(20);
4591
4592	/* Set Phy Config Counter to 50msec */
4593	if (hw->mac.type == e1000_pch2lan) {
4594		reg = er32(FEXTNVM3);
4595		reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
4596		reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
4597		ew32(FEXTNVM3, reg);
4598	}
4599
4600	if (!ret_val)
4601		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
4602
4603	if (ctrl & E1000_CTRL_PHY_RST) {
4604		ret_val = hw->phy.ops.get_cfg_done(hw);
4605		if (ret_val)
4606			return ret_val;
4607
4608		ret_val = e1000_post_phy_reset_ich8lan(hw);
4609		if (ret_val)
4610			return ret_val;
4611	}
4612
4613	/* For PCH, this write will make sure that any noise
4614	 * will be detected as a CRC error and be dropped rather than show up
4615	 * as a bad packet to the DMA engine.
4616	 */
4617	if (hw->mac.type == e1000_pchlan)
4618		ew32(CRC_OFFSET, 0x65656565);
4619
4620	ew32(IMC, 0xffffffff);
4621	er32(ICR);
4622
4623	reg = er32(KABGTXD);
4624	reg |= E1000_KABGTXD_BGSQLBIAS;
4625	ew32(KABGTXD, reg);
4626
4627	return 0;
4628}
4629
4630/**
4631 *  e1000_init_hw_ich8lan - Initialize the hardware
4632 *  @hw: pointer to the HW structure
4633 *
4634 *  Prepares the hardware for transmit and receive by doing the following:
4635 *   - initialize hardware bits
4636 *   - initialize LED identification
4637 *   - setup receive address registers
4638 *   - setup flow control
4639 *   - setup transmit descriptors
4640 *   - clear statistics
4641 **/
4642static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
4643{
4644	struct e1000_mac_info *mac = &hw->mac;
4645	u32 ctrl_ext, txdctl, snoop;
4646	s32 ret_val;
4647	u16 i;
4648
4649	e1000_initialize_hw_bits_ich8lan(hw);
4650
4651	/* Initialize identification LED */
4652	ret_val = mac->ops.id_led_init(hw);
4653	/* An error is not fatal and we should not stop init due to this */
4654	if (ret_val)
4655		e_dbg("Error initializing identification LED\n");
4656
4657	/* Setup the receive address. */
4658	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
4659
4660	/* Zero out the Multicast HASH table */
4661	e_dbg("Zeroing the MTA\n");
4662	for (i = 0; i < mac->mta_reg_count; i++)
4663		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
4664
4665	/* The 82578 Rx buffer will stall if wakeup is enabled in host and
4666	 * the ME.  Disable wakeup by clearing the host wakeup bit.
4667	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
4668	 */
4669	if (hw->phy.type == e1000_phy_82578) {
4670		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
4671		i &= ~BM_WUC_HOST_WU_BIT;
4672		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
4673		ret_val = e1000_phy_hw_reset_ich8lan(hw);
4674		if (ret_val)
4675			return ret_val;
4676	}
4677
4678	/* Setup link and flow control */
4679	ret_val = mac->ops.setup_link(hw);
4680
4681	/* Set the transmit descriptor write-back policy for both queues */
4682	txdctl = er32(TXDCTL(0));
4683	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4684		  E1000_TXDCTL_FULL_TX_DESC_WB);
4685	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4686		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4687	ew32(TXDCTL(0), txdctl);
4688	txdctl = er32(TXDCTL(1));
4689	txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
4690		  E1000_TXDCTL_FULL_TX_DESC_WB);
4691	txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
4692		  E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
4693	ew32(TXDCTL(1), txdctl);
4694
4695	/* ICH8 has opposite polarity of no_snoop bits.
4696	 * By default, we should use snoop behavior.
4697	 */
4698	if (mac->type == e1000_ich8lan)
4699		snoop = PCIE_ICH8_SNOOP_ALL;
4700	else
4701		snoop = (u32)~(PCIE_NO_SNOOP_ALL);
4702	e1000e_set_pcie_no_snoop(hw, snoop);
4703
4704	ctrl_ext = er32(CTRL_EXT);
4705	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
4706	ew32(CTRL_EXT, ctrl_ext);
4707
4708	/* Clear all of the statistics registers (clear on read).  It is
4709	 * important that we do this after we have tried to establish link
4710	 * because the symbol error count will increment wildly if there
4711	 * is no link.
4712	 */
4713	e1000_clear_hw_cntrs_ich8lan(hw);
4714
4715	return ret_val;
4716}
4717
4718/**
4719 *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
4720 *  @hw: pointer to the HW structure
4721 *
4722 *  Sets/Clears required hardware bits necessary for correctly setting up the
4723 *  hardware for transmit and receive.
4724 **/
4725static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
4726{
4727	u32 reg;
4728
4729	/* Extended Device Control */
4730	reg = er32(CTRL_EXT);
4731	reg |= (1 << 22);
4732	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
4733	if (hw->mac.type >= e1000_pchlan)
4734		reg |= E1000_CTRL_EXT_PHYPDEN;
4735	ew32(CTRL_EXT, reg);
4736
4737	/* Transmit Descriptor Control 0 */
4738	reg = er32(TXDCTL(0));
4739	reg |= (1 << 22);
4740	ew32(TXDCTL(0), reg);
4741
4742	/* Transmit Descriptor Control 1 */
4743	reg = er32(TXDCTL(1));
4744	reg |= (1 << 22);
4745	ew32(TXDCTL(1), reg);
4746
4747	/* Transmit Arbitration Control 0 */
4748	reg = er32(TARC(0));
4749	if (hw->mac.type == e1000_ich8lan)
4750		reg |= (1 << 28) | (1 << 29);
4751	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
4752	ew32(TARC(0), reg);
4753
4754	/* Transmit Arbitration Control 1 */
4755	reg = er32(TARC(1));
4756	if (er32(TCTL) & E1000_TCTL_MULR)
4757		reg &= ~(1 << 28);
4758	else
4759		reg |= (1 << 28);
4760	reg |= (1 << 24) | (1 << 26) | (1 << 30);
4761	ew32(TARC(1), reg);
4762
4763	/* Device Status */
4764	if (hw->mac.type == e1000_ich8lan) {
4765		reg = er32(STATUS);
4766		reg &= ~(1 << 31);
4767		ew32(STATUS, reg);
4768	}
4769
4770	/* work-around descriptor data corruption issue during nfs v2 udp
4771	 * traffic, just disable the nfs filtering capability
4772	 */
4773	reg = er32(RFCTL);
4774	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
4775
4776	/* Disable IPv6 extension header parsing because some malformed
4777	 * IPv6 headers can hang the Rx.
4778	 */
4779	if (hw->mac.type == e1000_ich8lan)
4780		reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
4781	ew32(RFCTL, reg);
4782
4783	/* Enable ECC on Lynxpoint */
4784	if ((hw->mac.type == e1000_pch_lpt) ||
4785	    (hw->mac.type == e1000_pch_spt)) {
4786		reg = er32(PBECCSTS);
4787		reg |= E1000_PBECCSTS_ECC_ENABLE;
4788		ew32(PBECCSTS, reg);
4789
4790		reg = er32(CTRL);
4791		reg |= E1000_CTRL_MEHE;
4792		ew32(CTRL, reg);
4793	}
4794}
4795
4796/**
4797 *  e1000_setup_link_ich8lan - Setup flow control and link settings
4798 *  @hw: pointer to the HW structure
4799 *
4800 *  Determines which flow control settings to use, then configures flow
4801 *  control.  Calls the appropriate media-specific link configuration
4802 *  function.  Assuming the adapter has a valid link partner, a valid link
4803 *  should be established.  Assumes the hardware has previously been reset
4804 *  and the transmitter and receiver are not enabled.
4805 **/
4806static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
4807{
4808	s32 ret_val;
4809
4810	if (hw->phy.ops.check_reset_block(hw))
4811		return 0;
4812
4813	/* ICH parts do not have a word in the NVM to determine
4814	 * the default flow control setting, so we explicitly
4815	 * set it to full.
4816	 */
4817	if (hw->fc.requested_mode == e1000_fc_default) {
4818		/* Workaround h/w hang when Tx flow control enabled */
4819		if (hw->mac.type == e1000_pchlan)
4820			hw->fc.requested_mode = e1000_fc_rx_pause;
4821		else
4822			hw->fc.requested_mode = e1000_fc_full;
4823	}
4824
4825	/* Save off the requested flow control mode for use later.  Depending
4826	 * on the link partner's capabilities, we may or may not use this mode.
4827	 */
4828	hw->fc.current_mode = hw->fc.requested_mode;
4829
4830	e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
4831
4832	/* Continue to configure the copper link. */
4833	ret_val = hw->mac.ops.setup_physical_interface(hw);
4834	if (ret_val)
4835		return ret_val;
4836
4837	ew32(FCTTV, hw->fc.pause_time);
4838	if ((hw->phy.type == e1000_phy_82578) ||
4839	    (hw->phy.type == e1000_phy_82579) ||
4840	    (hw->phy.type == e1000_phy_i217) ||
4841	    (hw->phy.type == e1000_phy_82577)) {
4842		ew32(FCRTV_PCH, hw->fc.refresh_time);
4843
4844		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
4845				   hw->fc.pause_time);
4846		if (ret_val)
4847			return ret_val;
4848	}
4849
4850	return e1000e_set_fc_watermarks(hw);
4851}
4852
4853/**
4854 *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
4855 *  @hw: pointer to the HW structure
4856 *
4857 *  Configures the kumeran interface to the PHY to wait the appropriate time
4858 *  when polling the PHY, then call the generic setup_copper_link to finish
4859 *  configuring the copper link.
4860 **/
4861static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
4862{
4863	u32 ctrl;
4864	s32 ret_val;
4865	u16 reg_data;
4866
4867	ctrl = er32(CTRL);
4868	ctrl |= E1000_CTRL_SLU;
4869	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4870	ew32(CTRL, ctrl);
4871
4872	/* Set the mac to wait the maximum time between each iteration
4873	 * and increase the max iterations when polling the phy;
4874	 * this fixes erroneous timeouts at 10Mbps.
4875	 */
4876	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
4877	if (ret_val)
4878		return ret_val;
4879	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
4880				       &reg_data);
4881	if (ret_val)
4882		return ret_val;
4883	reg_data |= 0x3F;
4884	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
4885					reg_data);
4886	if (ret_val)
4887		return ret_val;
4888
4889	switch (hw->phy.type) {
4890	case e1000_phy_igp_3:
4891		ret_val = e1000e_copper_link_setup_igp(hw);
4892		if (ret_val)
4893			return ret_val;
4894		break;
4895	case e1000_phy_bm:
4896	case e1000_phy_82578:
4897		ret_val = e1000e_copper_link_setup_m88(hw);
4898		if (ret_val)
4899			return ret_val;
4900		break;
4901	case e1000_phy_82577:
4902	case e1000_phy_82579:
4903		ret_val = e1000_copper_link_setup_82577(hw);
4904		if (ret_val)
4905			return ret_val;
4906		break;
4907	case e1000_phy_ife:
4908		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
4909		if (ret_val)
4910			return ret_val;
4911
4912		reg_data &= ~IFE_PMC_AUTO_MDIX;
4913
4914		switch (hw->phy.mdix) {
4915		case 1:
4916			reg_data &= ~IFE_PMC_FORCE_MDIX;
4917			break;
4918		case 2:
4919			reg_data |= IFE_PMC_FORCE_MDIX;
4920			break;
4921		case 0:
4922		default:
4923			reg_data |= IFE_PMC_AUTO_MDIX;
4924			break;
4925		}
4926		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
4927		if (ret_val)
4928			return ret_val;
4929		break;
4930	default:
4931		break;
4932	}
4933
4934	return e1000e_setup_copper_link(hw);
4935}
4936
4937/**
4938 *  e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
4939 *  @hw: pointer to the HW structure
4940 *
4941 *  Calls the PHY specific link setup function and then calls the
4942 *  generic setup_copper_link to finish configuring the link for
4943 *  Lynxpoint PCH devices
4944 **/
4945static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
4946{
4947	u32 ctrl;
4948	s32 ret_val;
4949
4950	ctrl = er32(CTRL);
4951	ctrl |= E1000_CTRL_SLU;
4952	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
4953	ew32(CTRL, ctrl);
4954
4955	ret_val = e1000_copper_link_setup_82577(hw);
4956	if (ret_val)
4957		return ret_val;
4958
4959	return e1000e_setup_copper_link(hw);
4960}
4961
4962/**
4963 *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
4964 *  @hw: pointer to the HW structure
4965 *  @speed: pointer to store current link speed
4966 *  @duplex: pointer to store the current link duplex
4967 *
4968 *  Calls the generic get_speed_and_duplex to retrieve the current link
4969 *  information and then calls the Kumeran lock loss workaround for links at
4970 *  gigabit speeds.
4971 **/
4972static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
4973					  u16 *duplex)
4974{
4975	s32 ret_val;
4976
4977	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
4978	if (ret_val)
4979		return ret_val;
4980
4981	if ((hw->mac.type == e1000_ich8lan) &&
4982	    (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
4983		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
4984	}
4985
4986	return ret_val;
4987}
4988
4989/**
4990 *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
4991 *  @hw: pointer to the HW structure
4992 *
4993 *  Work-around for 82566 Kumeran PCS lock loss:
4994 *  On link status change (i.e. PCI reset, speed change) and link is up and
4995 *  speed is gigabit-
4996 *    0) if workaround is optionally disabled do nothing
4997 *    1) wait 1ms for Kumeran link to come up
4998 *    2) check Kumeran Diagnostic register PCS lock loss bit
4999 *    3) if not set the link is locked (all is good), otherwise...
5000 *    4) reset the PHY
5001 *    5) repeat up to 10 times
5002 *  Note: this is only called for IGP3 copper when speed is 1gb.
5003 **/
5004static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
5005{
5006	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5007	u32 phy_ctrl;
5008	s32 ret_val;
5009	u16 i, data;
5010	bool link;
5011
5012	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
5013		return 0;
5014
5015	/* Make sure link is up before proceeding.  If not just return.
5016	 * Attempting this while link is negotiating fouled up link
5017	 * stability
5018	 */
5019	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
5020	if (!link)
5021		return 0;
5022
5023	for (i = 0; i < 10; i++) {
5024		/* read once to clear */
5025		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5026		if (ret_val)
5027			return ret_val;
5028		/* and again to get new status */
5029		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
5030		if (ret_val)
5031			return ret_val;
5032
5033		/* check for PCS lock */
5034		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
5035			return 0;
5036
5037		/* Issue PHY reset */
5038		e1000_phy_hw_reset(hw);
5039		mdelay(5);
5040	}
5041	/* Disable GigE link negotiation */
5042	phy_ctrl = er32(PHY_CTRL);
5043	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
5044		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5045	ew32(PHY_CTRL, phy_ctrl);
5046
5047	/* Call gig speed drop workaround on Gig disable before accessing
5048	 * any PHY registers
5049	 */
5050	e1000e_gig_downshift_workaround_ich8lan(hw);
5051
5052	/* unable to acquire PCS lock */
5053	return -E1000_ERR_PHY;
5054}
5055
5056/**
5057 *  e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
5058 *  @hw: pointer to the HW structure
5059 *  @state: boolean value used to set the current Kumeran workaround state
5060 *
5061 *  If ICH8, set the current Kumeran workaround state (enabled - true
5062 *  /disabled - false).
5063 **/
5064void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
5065						  bool state)
5066{
5067	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5068
5069	if (hw->mac.type != e1000_ich8lan) {
5070		e_dbg("Workaround applies to ICH8 only.\n");
5071		return;
5072	}
5073
5074	dev_spec->kmrn_lock_loss_workaround_enabled = state;
5075}
5076
5077/**
5078 *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
5079 *  @hw: pointer to the HW structure
5080 *
5081 *  Workaround for 82566 power-down on D3 entry:
5082 *    1) disable gigabit link
5083 *    2) write VR power-down enable
5084 *    3) read it back
5085 *  Continue if successful, else issue LCD reset and repeat
5086 **/
5087void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
5088{
5089	u32 reg;
5090	u16 data;
5091	u8 retry = 0;
5092
5093	if (hw->phy.type != e1000_phy_igp_3)
5094		return;
5095
5096	/* Try the workaround twice (if needed) */
5097	do {
5098		/* Disable link */
5099		reg = er32(PHY_CTRL);
5100		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
5101			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
5102		ew32(PHY_CTRL, reg);
5103
5104		/* Call gig speed drop workaround on Gig disable before
5105		 * accessing any PHY registers
5106		 */
5107		if (hw->mac.type == e1000_ich8lan)
5108			e1000e_gig_downshift_workaround_ich8lan(hw);
5109
5110		/* Write VR power-down enable */
5111		e1e_rphy(hw, IGP3_VR_CTRL, &data);
5112		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5113		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
5114
5115		/* Read it back and test */
5116		e1e_rphy(hw, IGP3_VR_CTRL, &data);
5117		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
5118		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
5119			break;
5120
5121		/* Issue PHY reset and repeat at most one more time */
5122		reg = er32(CTRL);
5123		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
5124		retry++;
5125	} while (retry);
5126}
5127
5128/**
5129 *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
5130 *  @hw: pointer to the HW structure
5131 *
5132 *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
5133 *  LPLU, Gig disable, MDIC PHY reset):
5134 *    1) Set Kumeran Near-end loopback
5135 *    2) Clear Kumeran Near-end loopback
5136 *  Should only be called for ICH8[m] devices with any 1G Phy.
5137 **/
5138void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
5139{
5140	s32 ret_val;
5141	u16 reg_data;
5142
5143	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
5144		return;
5145
5146	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5147				       &reg_data);
5148	if (ret_val)
5149		return;
5150	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
5151	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
5152					reg_data);
5153	if (ret_val)
5154		return;
5155	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
5156	e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data);
5157}
5158
5159/**
5160 *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
5161 *  @hw: pointer to the HW structure
5162 *
5163 *  During S0 to Sx transition, it is possible the link remains at gig
5164 *  instead of negotiating to a lower speed.  Before going to Sx, set
5165 *  'Gig Disable' to force link speed negotiation to a lower speed based on
5166 *  the LPLU setting in the NVM or custom setting.  For PCH and newer parts,
5167 *  the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
5168 *  needs to be written.
5169 *  Parts that support (and are linked to a partner which support) EEE in
5170 *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
5171 *  than 10Mbps w/o EEE.
5172 **/
5173void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
5174{
5175	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
5176	u32 phy_ctrl;
5177	s32 ret_val;
5178
5179	phy_ctrl = er32(PHY_CTRL);
5180	phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
5181
5182	if (hw->phy.type == e1000_phy_i217) {
5183		u16 phy_reg, device_id = hw->adapter->pdev->device;
5184
5185		if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
5186		    (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
5187		    (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
5188		    (device_id == E1000_DEV_ID_PCH_I218_V3) ||
5189		    (hw->mac.type == e1000_pch_spt)) {
5190			u32 fextnvm6 = er32(FEXTNVM6);
5191
5192			ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
5193		}
5194
5195		ret_val = hw->phy.ops.acquire(hw);
5196		if (ret_val)
5197			goto out;
5198
5199		if (!dev_spec->eee_disable) {
5200			u16 eee_advert;
5201
5202			ret_val =
5203			    e1000_read_emi_reg_locked(hw,
5204						      I217_EEE_ADVERTISEMENT,
5205						      &eee_advert);
5206			if (ret_val)
5207				goto release;
5208
5209			/* Disable LPLU if both link partners support 100BaseT
5210			 * EEE and 100Full is advertised on both ends of the
5211			 * link, and enable Auto Enable LPI since there will
5212			 * be no driver to enable LPI while in Sx.
5213			 */
5214			if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
5215			    (dev_spec->eee_lp_ability &
5216			     I82579_EEE_100_SUPPORTED) &&
5217			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
5218				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
5219					      E1000_PHY_CTRL_NOND0A_LPLU);
5220
5221				/* Set Auto Enable LPI after link up */
5222				e1e_rphy_locked(hw,
5223						I217_LPI_GPIO_CTRL, &phy_reg);
5224				phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5225				e1e_wphy_locked(hw,
5226						I217_LPI_GPIO_CTRL, phy_reg);
5227			}
5228		}
5229
5230		/* For i217 Intel Rapid Start Technology support,
5231		 * when the system is going into Sx and no manageability engine
5232		 * is present, the driver must configure proxy to reset only on
5233		 * power good.  LPI (Low Power Idle) state must also reset only
5234		 * on power good, as well as the MTA (Multicast table array).
5235		 * The SMBus release must also be disabled on LCD reset.
5236		 */
5237		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5238			/* Enable proxy to reset only on power good. */
5239			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
5240			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
5241			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
5242
5243			/* Set bit enable LPI (EEE) to reset only on
5244			 * power good.
5245			 */
5246			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
5247			phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
5248			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
5249
5250			/* Disable the SMB release on LCD reset. */
5251			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5252			phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
5253			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5254		}
5255
5256		/* Enable MTA to reset for Intel Rapid Start Technology
5257		 * Support
5258		 */
5259		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5260		phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
5261		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5262
5263release:
5264		hw->phy.ops.release(hw);
5265	}
5266out:
5267	ew32(PHY_CTRL, phy_ctrl);
5268
5269	if (hw->mac.type == e1000_ich8lan)
5270		e1000e_gig_downshift_workaround_ich8lan(hw);
5271
5272	if (hw->mac.type >= e1000_pchlan) {
5273		e1000_oem_bits_config_ich8lan(hw, false);
5274
5275		/* Reset PHY to activate OEM bits on 82577/8 */
5276		if (hw->mac.type == e1000_pchlan)
5277			e1000e_phy_hw_reset_generic(hw);
5278
5279		ret_val = hw->phy.ops.acquire(hw);
5280		if (ret_val)
5281			return;
5282		e1000_write_smbus_addr(hw);
5283		hw->phy.ops.release(hw);
5284	}
5285}
5286
5287/**
5288 *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
5289 *  @hw: pointer to the HW structure
5290 *
5291 *  During Sx to S0 transitions on non-managed devices or managed devices
5292 *  on which PHY resets are not blocked, if the PHY registers cannot be
5293 *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
5294 *  the PHY.
5295 *  On i217, setup Intel Rapid Start Technology.
5296 **/
5297void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
5298{
5299	s32 ret_val;
5300
5301	if (hw->mac.type < e1000_pch2lan)
5302		return;
5303
5304	ret_val = e1000_init_phy_workarounds_pchlan(hw);
5305	if (ret_val) {
5306		e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
5307		return;
5308	}
5309
5310	/* For i217 Intel Rapid Start Technology support when the system
5311	 * is transitioning from Sx and no manageability engine is present
5312	 * configure SMBus to restore on reset, disable proxy, and enable
5313	 * the reset on MTA (Multicast table array).
5314	 */
5315	if (hw->phy.type == e1000_phy_i217) {
5316		u16 phy_reg;
5317
5318		ret_val = hw->phy.ops.acquire(hw);
5319		if (ret_val) {
5320			e_dbg("Failed to setup iRST\n");
5321			return;
5322		}
5323
5324		/* Clear Auto Enable LPI after link up */
5325		e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
5326		phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
5327		e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
5328
5329		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
5330			/* Restore clear on SMB if no manageability engine
5331			 * is present
5332			 */
5333			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
5334			if (ret_val)
5335				goto release;
5336			phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
5337			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
5338
5339			/* Disable Proxy */
5340			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
5341		}
5342		/* Enable reset on MTA */
5343		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
5344		if (ret_val)
5345			goto release;
5346		phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
5347		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
5348release:
5349		if (ret_val)
5350			e_dbg("Error %d in resume workarounds\n", ret_val);
5351		hw->phy.ops.release(hw);
5352	}
5353}
5354
5355/**
5356 *  e1000_cleanup_led_ich8lan - Restore the default LED operation
5357 *  @hw: pointer to the HW structure
5358 *
5359 *  Return the LED back to the default configuration.
5360 **/
5361static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
5362{
5363	if (hw->phy.type == e1000_phy_ife)
5364		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
5365
5366	ew32(LEDCTL, hw->mac.ledctl_default);
5367	return 0;
5368}
5369
5370/**
5371 *  e1000_led_on_ich8lan - Turn LEDs on
5372 *  @hw: pointer to the HW structure
5373 *
5374 *  Turn on the LEDs.
5375 **/
5376static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
5377{
5378	if (hw->phy.type == e1000_phy_ife)
5379		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5380				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
5381
5382	ew32(LEDCTL, hw->mac.ledctl_mode2);
5383	return 0;
5384}
5385
5386/**
5387 *  e1000_led_off_ich8lan - Turn LEDs off
5388 *  @hw: pointer to the HW structure
5389 *
5390 *  Turn off the LEDs.
5391 **/
5392static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
5393{
5394	if (hw->phy.type == e1000_phy_ife)
5395		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
5396				(IFE_PSCL_PROBE_MODE |
5397				 IFE_PSCL_PROBE_LEDS_OFF));
5398
5399	ew32(LEDCTL, hw->mac.ledctl_mode1);
5400	return 0;
5401}
5402
5403/**
5404 *  e1000_setup_led_pchlan - Configures SW controllable LED
5405 *  @hw: pointer to the HW structure
5406 *
5407 *  This prepares the SW controllable LED for use.
5408 **/
5409static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
5410{
5411	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
5412}
5413
5414/**
5415 *  e1000_cleanup_led_pchlan - Restore the default LED operation
5416 *  @hw: pointer to the HW structure
5417 *
5418 *  Return the LED back to the default configuration.
5419 **/
5420static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
5421{
5422	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
5423}
5424
5425/**
5426 *  e1000_led_on_pchlan - Turn LEDs on
5427 *  @hw: pointer to the HW structure
5428 *
5429 *  Turn on the LEDs.
5430 **/
5431static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
5432{
5433	u16 data = (u16)hw->mac.ledctl_mode2;
5434	u32 i, led;
5435
5436	/* If no link, then turn LED on by setting the invert bit
5437	 * for each LED that's mode is "link_up" in ledctl_mode2.
5438	 */
5439	if (!(er32(STATUS) & E1000_STATUS_LU)) {
5440		for (i = 0; i < 3; i++) {
5441			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5442			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5443			    E1000_LEDCTL_MODE_LINK_UP)
5444				continue;
5445			if (led & E1000_PHY_LED0_IVRT)
5446				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5447			else
5448				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5449		}
5450	}
5451
5452	return e1e_wphy(hw, HV_LED_CONFIG, data);
5453}
5454
5455/**
5456 *  e1000_led_off_pchlan - Turn LEDs off
5457 *  @hw: pointer to the HW structure
5458 *
5459 *  Turn off the LEDs.
5460 **/
5461static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
5462{
5463	u16 data = (u16)hw->mac.ledctl_mode1;
5464	u32 i, led;
5465
5466	/* If no link, then turn LED off by clearing the invert bit
5467	 * for each LED that's mode is "link_up" in ledctl_mode1.
5468	 */
5469	if (!(er32(STATUS) & E1000_STATUS_LU)) {
5470		for (i = 0; i < 3; i++) {
5471			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
5472			if ((led & E1000_PHY_LED0_MODE_MASK) !=
5473			    E1000_LEDCTL_MODE_LINK_UP)
5474				continue;
5475			if (led & E1000_PHY_LED0_IVRT)
5476				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
5477			else
5478				data |= (E1000_PHY_LED0_IVRT << (i * 5));
5479		}
5480	}
5481
5482	return e1e_wphy(hw, HV_LED_CONFIG, data);
5483}
5484
5485/**
5486 *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
5487 *  @hw: pointer to the HW structure
5488 *
5489 *  Read appropriate register for the config done bit for completion status
5490 *  and configure the PHY through s/w for EEPROM-less parts.
5491 *
5492 *  NOTE: some silicon which is EEPROM-less will fail trying to read the
5493 *  config done bit, so only an error is logged and continues.  If we were
5494 *  to return with error, EEPROM-less silicon would not be able to be reset
5495 *  or change link.
5496 **/
5497static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
5498{
5499	s32 ret_val = 0;
5500	u32 bank = 0;
5501	u32 status;
5502
5503	e1000e_get_cfg_done_generic(hw);
5504
5505	/* Wait for indication from h/w that it has completed basic config */
5506	if (hw->mac.type >= e1000_ich10lan) {
5507		e1000_lan_init_done_ich8lan(hw);
5508	} else {
5509		ret_val = e1000e_get_auto_rd_done(hw);
5510		if (ret_val) {
5511			/* When auto config read does not complete, do not
5512			 * return with an error. This can happen in situations
5513			 * where there is no eeprom and prevents getting link.
5514			 */
5515			e_dbg("Auto Read Done did not complete\n");
5516			ret_val = 0;
5517		}
5518	}
5519
5520	/* Clear PHY Reset Asserted bit */
5521	status = er32(STATUS);
5522	if (status & E1000_STATUS_PHYRA)
5523		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
5524	else
5525		e_dbg("PHY Reset Asserted not set - needs delay\n");
5526
5527	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
5528	if (hw->mac.type <= e1000_ich9lan) {
5529		if (!(er32(EECD) & E1000_EECD_PRES) &&
5530		    (hw->phy.type == e1000_phy_igp_3)) {
5531			e1000e_phy_init_script_igp3(hw);
5532		}
5533	} else {
5534		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
5535			/* Maybe we should do a basic PHY config */
5536			e_dbg("EEPROM not present\n");
5537			ret_val = -E1000_ERR_CONFIG;
5538		}
5539	}
5540
5541	return ret_val;
5542}
5543
5544/**
5545 * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
5546 * @hw: pointer to the HW structure
5547 *
5548 * In the case of a PHY power down to save power, or to turn off link during a
5549 * driver unload, or wake on lan is not enabled, remove the link.
5550 **/
5551static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
5552{
5553	/* If the management interface is not enabled, then power down */
5554	if (!(hw->mac.ops.check_mng_mode(hw) ||
5555	      hw->phy.ops.check_reset_block(hw)))
5556		e1000_power_down_phy_copper(hw);
5557}
5558
5559/**
5560 *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
5561 *  @hw: pointer to the HW structure
5562 *
5563 *  Clears hardware counters specific to the silicon family and calls
5564 *  clear_hw_cntrs_generic to clear all general purpose counters.
5565 **/
5566static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
5567{
5568	u16 phy_data;
5569	s32 ret_val;
5570
5571	e1000e_clear_hw_cntrs_base(hw);
5572
5573	er32(ALGNERRC);
5574	er32(RXERRC);
5575	er32(TNCRS);
5576	er32(CEXTERR);
5577	er32(TSCTC);
5578	er32(TSCTFC);
5579
5580	er32(MGTPRC);
5581	er32(MGTPDC);
5582	er32(MGTPTC);
5583
5584	er32(IAC);
5585	er32(ICRXOC);
5586
5587	/* Clear PHY statistics registers */
5588	if ((hw->phy.type == e1000_phy_82578) ||
5589	    (hw->phy.type == e1000_phy_82579) ||
5590	    (hw->phy.type == e1000_phy_i217) ||
5591	    (hw->phy.type == e1000_phy_82577)) {
5592		ret_val = hw->phy.ops.acquire(hw);
5593		if (ret_val)
5594			return;
5595		ret_val = hw->phy.ops.set_page(hw,
5596					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
5597		if (ret_val)
5598			goto release;
5599		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
5600		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
5601		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
5602		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
5603		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
5604		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
5605		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
5606		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
5607		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
5608		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
5609		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
5610		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
5611		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
5612		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
5613release:
5614		hw->phy.ops.release(hw);
5615	}
5616}
5617
5618static const struct e1000_mac_operations ich8_mac_ops = {
5619	/* check_mng_mode dependent on mac type */
5620	.check_for_link		= e1000_check_for_copper_link_ich8lan,
5621	/* cleanup_led dependent on mac type */
5622	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
5623	.get_bus_info		= e1000_get_bus_info_ich8lan,
5624	.set_lan_id		= e1000_set_lan_id_single_port,
5625	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
5626	/* led_on dependent on mac type */
5627	/* led_off dependent on mac type */
5628	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
5629	.reset_hw		= e1000_reset_hw_ich8lan,
5630	.init_hw		= e1000_init_hw_ich8lan,
5631	.setup_link		= e1000_setup_link_ich8lan,
5632	.setup_physical_interface = e1000_setup_copper_link_ich8lan,
5633	/* id_led_init dependent on mac type */
5634	.config_collision_dist	= e1000e_config_collision_dist_generic,
5635	.rar_set		= e1000e_rar_set_generic,
5636	.rar_get_count		= e1000e_rar_get_count_generic,
5637};
5638
5639static const struct e1000_phy_operations ich8_phy_ops = {
5640	.acquire		= e1000_acquire_swflag_ich8lan,
5641	.check_reset_block	= e1000_check_reset_block_ich8lan,
5642	.commit			= NULL,
5643	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
5644	.get_cable_length	= e1000e_get_cable_length_igp_2,
5645	.read_reg		= e1000e_read_phy_reg_igp,
5646	.release		= e1000_release_swflag_ich8lan,
5647	.reset			= e1000_phy_hw_reset_ich8lan,
5648	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
5649	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
5650	.write_reg		= e1000e_write_phy_reg_igp,
5651};
5652
5653static const struct e1000_nvm_operations ich8_nvm_ops = {
5654	.acquire		= e1000_acquire_nvm_ich8lan,
5655	.read			= e1000_read_nvm_ich8lan,
5656	.release		= e1000_release_nvm_ich8lan,
5657	.reload			= e1000e_reload_nvm_generic,
5658	.update			= e1000_update_nvm_checksum_ich8lan,
5659	.valid_led_default	= e1000_valid_led_default_ich8lan,
5660	.validate		= e1000_validate_nvm_checksum_ich8lan,
5661	.write			= e1000_write_nvm_ich8lan,
5662};
5663
5664static const struct e1000_nvm_operations spt_nvm_ops = {
5665	.acquire		= e1000_acquire_nvm_ich8lan,
5666	.release		= e1000_release_nvm_ich8lan,
5667	.read			= e1000_read_nvm_spt,
5668	.update			= e1000_update_nvm_checksum_spt,
5669	.reload			= e1000e_reload_nvm_generic,
5670	.valid_led_default	= e1000_valid_led_default_ich8lan,
5671	.validate		= e1000_validate_nvm_checksum_ich8lan,
5672	.write			= e1000_write_nvm_ich8lan,
5673};
5674
5675const struct e1000_info e1000_ich8_info = {
5676	.mac			= e1000_ich8lan,
5677	.flags			= FLAG_HAS_WOL
5678				  | FLAG_IS_ICH
5679				  | FLAG_HAS_CTRLEXT_ON_LOAD
5680				  | FLAG_HAS_AMT
5681				  | FLAG_HAS_FLASH
5682				  | FLAG_APME_IN_WUC,
5683	.pba			= 8,
5684	.max_hw_frame_size	= VLAN_ETH_FRAME_LEN + ETH_FCS_LEN,
5685	.get_variants		= e1000_get_variants_ich8lan,
5686	.mac_ops		= &ich8_mac_ops,
5687	.phy_ops		= &ich8_phy_ops,
5688	.nvm_ops		= &ich8_nvm_ops,
5689};
5690
5691const struct e1000_info e1000_ich9_info = {
5692	.mac			= e1000_ich9lan,
5693	.flags			= FLAG_HAS_JUMBO_FRAMES
5694				  | FLAG_IS_ICH
5695				  | FLAG_HAS_WOL
5696				  | FLAG_HAS_CTRLEXT_ON_LOAD
5697				  | FLAG_HAS_AMT
5698				  | FLAG_HAS_FLASH
5699				  | FLAG_APME_IN_WUC,
5700	.pba			= 18,
5701	.max_hw_frame_size	= DEFAULT_JUMBO,
5702	.get_variants		= e1000_get_variants_ich8lan,
5703	.mac_ops		= &ich8_mac_ops,
5704	.phy_ops		= &ich8_phy_ops,
5705	.nvm_ops		= &ich8_nvm_ops,
5706};
5707
5708const struct e1000_info e1000_ich10_info = {
5709	.mac			= e1000_ich10lan,
5710	.flags			= FLAG_HAS_JUMBO_FRAMES
5711				  | FLAG_IS_ICH
5712				  | FLAG_HAS_WOL
5713				  | FLAG_HAS_CTRLEXT_ON_LOAD
5714				  | FLAG_HAS_AMT
5715				  | FLAG_HAS_FLASH
5716				  | FLAG_APME_IN_WUC,
5717	.pba			= 18,
5718	.max_hw_frame_size	= DEFAULT_JUMBO,
5719	.get_variants		= e1000_get_variants_ich8lan,
5720	.mac_ops		= &ich8_mac_ops,
5721	.phy_ops		= &ich8_phy_ops,
5722	.nvm_ops		= &ich8_nvm_ops,
5723};
5724
5725const struct e1000_info e1000_pch_info = {
5726	.mac			= e1000_pchlan,
5727	.flags			= FLAG_IS_ICH
5728				  | FLAG_HAS_WOL
5729				  | FLAG_HAS_CTRLEXT_ON_LOAD
5730				  | FLAG_HAS_AMT
5731				  | FLAG_HAS_FLASH
5732				  | FLAG_HAS_JUMBO_FRAMES
5733				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
5734				  | FLAG_APME_IN_WUC,
5735	.flags2			= FLAG2_HAS_PHY_STATS,
5736	.pba			= 26,
5737	.max_hw_frame_size	= 4096,
5738	.get_variants		= e1000_get_variants_ich8lan,
5739	.mac_ops		= &ich8_mac_ops,
5740	.phy_ops		= &ich8_phy_ops,
5741	.nvm_ops		= &ich8_nvm_ops,
5742};
5743
5744const struct e1000_info e1000_pch2_info = {
5745	.mac			= e1000_pch2lan,
5746	.flags			= FLAG_IS_ICH
5747				  | FLAG_HAS_WOL
5748				  | FLAG_HAS_HW_TIMESTAMP
5749				  | FLAG_HAS_CTRLEXT_ON_LOAD
5750				  | FLAG_HAS_AMT
5751				  | FLAG_HAS_FLASH
5752				  | FLAG_HAS_JUMBO_FRAMES
5753				  | FLAG_APME_IN_WUC,
5754	.flags2			= FLAG2_HAS_PHY_STATS
5755				  | FLAG2_HAS_EEE,
5756	.pba			= 26,
5757	.max_hw_frame_size	= 9022,
5758	.get_variants		= e1000_get_variants_ich8lan,
5759	.mac_ops		= &ich8_mac_ops,
5760	.phy_ops		= &ich8_phy_ops,
5761	.nvm_ops		= &ich8_nvm_ops,
5762};
5763
5764const struct e1000_info e1000_pch_lpt_info = {
5765	.mac			= e1000_pch_lpt,
5766	.flags			= FLAG_IS_ICH
5767				  | FLAG_HAS_WOL
5768				  | FLAG_HAS_HW_TIMESTAMP
5769				  | FLAG_HAS_CTRLEXT_ON_LOAD
5770				  | FLAG_HAS_AMT
5771				  | FLAG_HAS_FLASH
5772				  | FLAG_HAS_JUMBO_FRAMES
5773				  | FLAG_APME_IN_WUC,
5774	.flags2			= FLAG2_HAS_PHY_STATS
5775				  | FLAG2_HAS_EEE,
5776	.pba			= 26,
5777	.max_hw_frame_size	= 9022,
5778	.get_variants		= e1000_get_variants_ich8lan,
5779	.mac_ops		= &ich8_mac_ops,
5780	.phy_ops		= &ich8_phy_ops,
5781	.nvm_ops		= &ich8_nvm_ops,
5782};
5783
5784const struct e1000_info e1000_pch_spt_info = {
5785	.mac			= e1000_pch_spt,
5786	.flags			= FLAG_IS_ICH
5787				  | FLAG_HAS_WOL
5788				  | FLAG_HAS_HW_TIMESTAMP
5789				  | FLAG_HAS_CTRLEXT_ON_LOAD
5790				  | FLAG_HAS_AMT
5791				  | FLAG_HAS_FLASH
5792				  | FLAG_HAS_JUMBO_FRAMES
5793				  | FLAG_APME_IN_WUC,
5794	.flags2			= FLAG2_HAS_PHY_STATS
5795				  | FLAG2_HAS_EEE,
5796	.pba			= 26,
5797	.max_hw_frame_size	= 9022,
5798	.get_variants		= e1000_get_variants_ich8lan,
5799	.mac_ops		= &ich8_mac_ops,
5800	.phy_ops		= &ich8_phy_ops,
5801	.nvm_ops		= &spt_nvm_ops,
5802};
5803