1/*
2 * tg3.c: Broadcom Tigon3 ethernet driver.
3 *
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2014 Broadcom Corporation.
8 *
9 * Firmware is:
10 *	Derived from proprietary unpublished source code,
11 *	Copyright (C) 2000-2003 Broadcom Corporation.
12 *
13 *	Permission is hereby granted for the distribution of this firmware
14 *	data in hexadecimal or equivalent format, provided this copyright
15 *	notice is accompanying it.
16 */
17
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/stringify.h>
22#include <linux/kernel.h>
23#include <linux/types.h>
24#include <linux/compiler.h>
25#include <linux/slab.h>
26#include <linux/delay.h>
27#include <linux/in.h>
28#include <linux/interrupt.h>
29#include <linux/ioport.h>
30#include <linux/pci.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/ethtool.h>
35#include <linux/mdio.h>
36#include <linux/mii.h>
37#include <linux/phy.h>
38#include <linux/brcmphy.h>
39#include <linux/if.h>
40#include <linux/if_vlan.h>
41#include <linux/ip.h>
42#include <linux/tcp.h>
43#include <linux/workqueue.h>
44#include <linux/prefetch.h>
45#include <linux/dma-mapping.h>
46#include <linux/firmware.h>
47#include <linux/ssb/ssb_driver_gige.h>
48#include <linux/hwmon.h>
49#include <linux/hwmon-sysfs.h>
50
51#include <net/checksum.h>
52#include <net/ip.h>
53
54#include <linux/io.h>
55#include <asm/byteorder.h>
56#include <linux/uaccess.h>
57
58#include <uapi/linux/net_tstamp.h>
59#include <linux/ptp_clock_kernel.h>
60
61#ifdef CONFIG_SPARC
62#include <asm/idprom.h>
63#include <asm/prom.h>
64#endif
65
66#define BAR_0	0
67#define BAR_2	2
68
69#include "tg3.h"
70
71/* Functions & macros to verify TG3_FLAGS types */
72
73static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74{
75	return test_bit(flag, bits);
76}
77
78static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79{
80	set_bit(flag, bits);
81}
82
83static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84{
85	clear_bit(flag, bits);
86}
87
88#define tg3_flag(tp, flag)				\
89	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90#define tg3_flag_set(tp, flag)				\
91	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92#define tg3_flag_clear(tp, flag)			\
93	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95#define DRV_MODULE_NAME		"tg3"
96#define TG3_MAJ_NUM			3
97#define TG3_MIN_NUM			137
98#define DRV_MODULE_VERSION	\
99	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE	"May 11, 2014"
101
102#define RESET_KIND_SHUTDOWN	0
103#define RESET_KIND_INIT		1
104#define RESET_KIND_SUSPEND	2
105
106#define TG3_DEF_RX_MODE		0
107#define TG3_DEF_TX_MODE		0
108#define TG3_DEF_MSG_ENABLE	  \
109	(NETIF_MSG_DRV		| \
110	 NETIF_MSG_PROBE	| \
111	 NETIF_MSG_LINK		| \
112	 NETIF_MSG_TIMER	| \
113	 NETIF_MSG_IFDOWN	| \
114	 NETIF_MSG_IFUP		| \
115	 NETIF_MSG_RX_ERR	| \
116	 NETIF_MSG_TX_ERR)
117
118#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
119
120/* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
122 */
123
124#define TG3_TX_TIMEOUT			(5 * HZ)
125
126/* hardware minimum and maximum for a single frame's data payload */
127#define TG3_MIN_MTU			60
128#define TG3_MAX_MTU(tp)	\
129	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131/* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
134 */
135#define TG3_RX_STD_RING_SIZE(tp) \
136	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138#define TG3_DEF_RX_RING_PENDING		200
139#define TG3_RX_JMB_RING_SIZE(tp) \
140	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142#define TG3_DEF_RX_JUMBO_RING_PENDING	100
143
144/* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al.  operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions.  Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
149 */
150
151#define TG3_TX_RING_SIZE		512
152#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
153
154#define TG3_RX_STD_RING_BYTES(tp) \
155	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156#define TG3_RX_JMB_RING_BYTES(tp) \
157	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158#define TG3_RX_RCB_RING_BYTES(tp) \
159	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
161				 TG3_TX_RING_SIZE)
162#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164#define TG3_DMA_BYTE_ENAB		64
165
166#define TG3_RX_STD_DMA_SZ		1536
167#define TG3_RX_JMB_DMA_SZ		9046
168
169#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
170
171#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180/* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode.  The driver
182 * works around this bug by double copying the packet.  This workaround
183 * is built into the normal double copy length check for efficiency.
184 *
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient.  For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path.  Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
190 */
191#define TG3_RX_COPY_THRESHOLD		256
192#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
194#else
195	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
196#endif
197
198#if (NET_IP_ALIGN != 0)
199#define TG3_RX_OFFSET(tp)	((tp)->rx_offset)
200#else
201#define TG3_RX_OFFSET(tp)	(NET_SKB_PAD)
202#endif
203
204/* minimum number of free TX descriptors required to wake up TX process */
205#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
206#define TG3_TX_BD_DMA_MAX_2K		2048
207#define TG3_TX_BD_DMA_MAX_4K		4096
208
209#define TG3_RAW_IP_ALIGN 2
210
211#define TG3_MAX_UCAST_ADDR(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 3)
212#define TG3_UCAST_ADDR_IDX(tp) (tg3_flag((tp), ENABLE_ASF) ? 2 : 1)
213
214#define TG3_FW_UPDATE_TIMEOUT_SEC	5
215#define TG3_FW_UPDATE_FREQ_SEC		(TG3_FW_UPDATE_TIMEOUT_SEC / 2)
216
217#define FIRMWARE_TG3		"tigon/tg3.bin"
218#define FIRMWARE_TG357766	"tigon/tg357766.bin"
219#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
220#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
221
222static char version[] =
223	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
224
225MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
226MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
227MODULE_LICENSE("GPL");
228MODULE_VERSION(DRV_MODULE_VERSION);
229MODULE_FIRMWARE(FIRMWARE_TG3);
230MODULE_FIRMWARE(FIRMWARE_TG3TSO);
231MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
232
233static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
234module_param(tg3_debug, int, 0);
235MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
236
237#define TG3_DRV_DATA_FLAG_10_100_ONLY	0x0001
238#define TG3_DRV_DATA_FLAG_5705_10_100	0x0002
239
240static const struct pci_device_id tg3_pci_tbl[] = {
241	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
242	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
243	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
244	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
245	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
246	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
247	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
248	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
249	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
250	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
251	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
252	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
253	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
254	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
255	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
256	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
257	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
258	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
259	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
260	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261			TG3_DRV_DATA_FLAG_5705_10_100},
262	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
263	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
264			TG3_DRV_DATA_FLAG_5705_10_100},
265	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
266	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
267	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
268			TG3_DRV_DATA_FLAG_5705_10_100},
269	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
270	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
271	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
272	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
273	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
274	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
275	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
276	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
277	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
278	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
279	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
280	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
281	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
282	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
283	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
284	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
285	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
286	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
287	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
288	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
289	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
290			PCI_VENDOR_ID_LENOVO,
291			TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
292	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
294	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
295	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
296	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
297	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
298	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
299	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
300	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
301	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
302	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
303	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
304	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
305	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
306	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
307	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
308	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
309	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
310	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
311	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
312	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
313	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
314	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
316	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317	{PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
318			PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
319	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
320	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
321	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
322	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
323	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
324	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
325	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
326	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
327	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
328	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
329	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
330	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
331	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
332	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
333	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
334	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
335	 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
336	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
337	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
338	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
339	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
340	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
341	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
342	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
343	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57764)},
344	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57767)},
345	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57787)},
346	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57782)},
347	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57786)},
348	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
349	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
350	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
351	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
352	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
353	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
354	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
355	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
356	{}
357};
358
359MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
360
361static const struct {
362	const char string[ETH_GSTRING_LEN];
363} ethtool_stats_keys[] = {
364	{ "rx_octets" },
365	{ "rx_fragments" },
366	{ "rx_ucast_packets" },
367	{ "rx_mcast_packets" },
368	{ "rx_bcast_packets" },
369	{ "rx_fcs_errors" },
370	{ "rx_align_errors" },
371	{ "rx_xon_pause_rcvd" },
372	{ "rx_xoff_pause_rcvd" },
373	{ "rx_mac_ctrl_rcvd" },
374	{ "rx_xoff_entered" },
375	{ "rx_frame_too_long_errors" },
376	{ "rx_jabbers" },
377	{ "rx_undersize_packets" },
378	{ "rx_in_length_errors" },
379	{ "rx_out_length_errors" },
380	{ "rx_64_or_less_octet_packets" },
381	{ "rx_65_to_127_octet_packets" },
382	{ "rx_128_to_255_octet_packets" },
383	{ "rx_256_to_511_octet_packets" },
384	{ "rx_512_to_1023_octet_packets" },
385	{ "rx_1024_to_1522_octet_packets" },
386	{ "rx_1523_to_2047_octet_packets" },
387	{ "rx_2048_to_4095_octet_packets" },
388	{ "rx_4096_to_8191_octet_packets" },
389	{ "rx_8192_to_9022_octet_packets" },
390
391	{ "tx_octets" },
392	{ "tx_collisions" },
393
394	{ "tx_xon_sent" },
395	{ "tx_xoff_sent" },
396	{ "tx_flow_control" },
397	{ "tx_mac_errors" },
398	{ "tx_single_collisions" },
399	{ "tx_mult_collisions" },
400	{ "tx_deferred" },
401	{ "tx_excessive_collisions" },
402	{ "tx_late_collisions" },
403	{ "tx_collide_2times" },
404	{ "tx_collide_3times" },
405	{ "tx_collide_4times" },
406	{ "tx_collide_5times" },
407	{ "tx_collide_6times" },
408	{ "tx_collide_7times" },
409	{ "tx_collide_8times" },
410	{ "tx_collide_9times" },
411	{ "tx_collide_10times" },
412	{ "tx_collide_11times" },
413	{ "tx_collide_12times" },
414	{ "tx_collide_13times" },
415	{ "tx_collide_14times" },
416	{ "tx_collide_15times" },
417	{ "tx_ucast_packets" },
418	{ "tx_mcast_packets" },
419	{ "tx_bcast_packets" },
420	{ "tx_carrier_sense_errors" },
421	{ "tx_discards" },
422	{ "tx_errors" },
423
424	{ "dma_writeq_full" },
425	{ "dma_write_prioq_full" },
426	{ "rxbds_empty" },
427	{ "rx_discards" },
428	{ "rx_errors" },
429	{ "rx_threshold_hit" },
430
431	{ "dma_readq_full" },
432	{ "dma_read_prioq_full" },
433	{ "tx_comp_queue_full" },
434
435	{ "ring_set_send_prod_index" },
436	{ "ring_status_update" },
437	{ "nic_irqs" },
438	{ "nic_avoided_irqs" },
439	{ "nic_tx_threshold_hit" },
440
441	{ "mbuf_lwm_thresh_hit" },
442};
443
444#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
445#define TG3_NVRAM_TEST		0
446#define TG3_LINK_TEST		1
447#define TG3_REGISTER_TEST	2
448#define TG3_MEMORY_TEST		3
449#define TG3_MAC_LOOPB_TEST	4
450#define TG3_PHY_LOOPB_TEST	5
451#define TG3_EXT_LOOPB_TEST	6
452#define TG3_INTERRUPT_TEST	7
453
454
455static const struct {
456	const char string[ETH_GSTRING_LEN];
457} ethtool_test_keys[] = {
458	[TG3_NVRAM_TEST]	= { "nvram test        (online) " },
459	[TG3_LINK_TEST]		= { "link test         (online) " },
460	[TG3_REGISTER_TEST]	= { "register test     (offline)" },
461	[TG3_MEMORY_TEST]	= { "memory test       (offline)" },
462	[TG3_MAC_LOOPB_TEST]	= { "mac loopback test (offline)" },
463	[TG3_PHY_LOOPB_TEST]	= { "phy loopback test (offline)" },
464	[TG3_EXT_LOOPB_TEST]	= { "ext loopback test (offline)" },
465	[TG3_INTERRUPT_TEST]	= { "interrupt test    (offline)" },
466};
467
468#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
469
470
471static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
472{
473	writel(val, tp->regs + off);
474}
475
476static u32 tg3_read32(struct tg3 *tp, u32 off)
477{
478	return readl(tp->regs + off);
479}
480
481static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
482{
483	writel(val, tp->aperegs + off);
484}
485
486static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
487{
488	return readl(tp->aperegs + off);
489}
490
491static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
492{
493	unsigned long flags;
494
495	spin_lock_irqsave(&tp->indirect_lock, flags);
496	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
497	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
498	spin_unlock_irqrestore(&tp->indirect_lock, flags);
499}
500
501static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
502{
503	writel(val, tp->regs + off);
504	readl(tp->regs + off);
505}
506
507static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
508{
509	unsigned long flags;
510	u32 val;
511
512	spin_lock_irqsave(&tp->indirect_lock, flags);
513	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
514	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
515	spin_unlock_irqrestore(&tp->indirect_lock, flags);
516	return val;
517}
518
519static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
520{
521	unsigned long flags;
522
523	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
524		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
525				       TG3_64BIT_REG_LOW, val);
526		return;
527	}
528	if (off == TG3_RX_STD_PROD_IDX_REG) {
529		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
530				       TG3_64BIT_REG_LOW, val);
531		return;
532	}
533
534	spin_lock_irqsave(&tp->indirect_lock, flags);
535	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
536	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
537	spin_unlock_irqrestore(&tp->indirect_lock, flags);
538
539	/* In indirect mode when disabling interrupts, we also need
540	 * to clear the interrupt bit in the GRC local ctrl register.
541	 */
542	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
543	    (val == 0x1)) {
544		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
545				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
546	}
547}
548
549static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
550{
551	unsigned long flags;
552	u32 val;
553
554	spin_lock_irqsave(&tp->indirect_lock, flags);
555	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
556	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
557	spin_unlock_irqrestore(&tp->indirect_lock, flags);
558	return val;
559}
560
561/* usec_wait specifies the wait time in usec when writing to certain registers
562 * where it is unsafe to read back the register without some delay.
563 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
564 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
565 */
566static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
567{
568	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
569		/* Non-posted methods */
570		tp->write32(tp, off, val);
571	else {
572		/* Posted method */
573		tg3_write32(tp, off, val);
574		if (usec_wait)
575			udelay(usec_wait);
576		tp->read32(tp, off);
577	}
578	/* Wait again after the read for the posted method to guarantee that
579	 * the wait time is met.
580	 */
581	if (usec_wait)
582		udelay(usec_wait);
583}
584
585static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
586{
587	tp->write32_mbox(tp, off, val);
588	if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
589	    (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
590	     !tg3_flag(tp, ICH_WORKAROUND)))
591		tp->read32_mbox(tp, off);
592}
593
594static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
595{
596	void __iomem *mbox = tp->regs + off;
597	writel(val, mbox);
598	if (tg3_flag(tp, TXD_MBOX_HWBUG))
599		writel(val, mbox);
600	if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
601	    tg3_flag(tp, FLUSH_POSTED_WRITES))
602		readl(mbox);
603}
604
605static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
606{
607	return readl(tp->regs + off + GRCMBOX_BASE);
608}
609
610static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
611{
612	writel(val, tp->regs + off + GRCMBOX_BASE);
613}
614
615#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
616#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
617#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
618#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
619#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
620
621#define tw32(reg, val)			tp->write32(tp, reg, val)
622#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
623#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
624#define tr32(reg)			tp->read32(tp, reg)
625
626static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
627{
628	unsigned long flags;
629
630	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
631	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
632		return;
633
634	spin_lock_irqsave(&tp->indirect_lock, flags);
635	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
636		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
637		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
638
639		/* Always leave this as zero. */
640		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
641	} else {
642		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
643		tw32_f(TG3PCI_MEM_WIN_DATA, val);
644
645		/* Always leave this as zero. */
646		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
647	}
648	spin_unlock_irqrestore(&tp->indirect_lock, flags);
649}
650
651static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
652{
653	unsigned long flags;
654
655	if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
656	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
657		*val = 0;
658		return;
659	}
660
661	spin_lock_irqsave(&tp->indirect_lock, flags);
662	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
663		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
664		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
665
666		/* Always leave this as zero. */
667		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
668	} else {
669		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
670		*val = tr32(TG3PCI_MEM_WIN_DATA);
671
672		/* Always leave this as zero. */
673		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
674	}
675	spin_unlock_irqrestore(&tp->indirect_lock, flags);
676}
677
678static void tg3_ape_lock_init(struct tg3 *tp)
679{
680	int i;
681	u32 regbase, bit;
682
683	if (tg3_asic_rev(tp) == ASIC_REV_5761)
684		regbase = TG3_APE_LOCK_GRANT;
685	else
686		regbase = TG3_APE_PER_LOCK_GRANT;
687
688	/* Make sure the driver hasn't any stale locks. */
689	for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
690		switch (i) {
691		case TG3_APE_LOCK_PHY0:
692		case TG3_APE_LOCK_PHY1:
693		case TG3_APE_LOCK_PHY2:
694		case TG3_APE_LOCK_PHY3:
695			bit = APE_LOCK_GRANT_DRIVER;
696			break;
697		default:
698			if (!tp->pci_fn)
699				bit = APE_LOCK_GRANT_DRIVER;
700			else
701				bit = 1 << tp->pci_fn;
702		}
703		tg3_ape_write32(tp, regbase + 4 * i, bit);
704	}
705
706}
707
708static int tg3_ape_lock(struct tg3 *tp, int locknum)
709{
710	int i, off;
711	int ret = 0;
712	u32 status, req, gnt, bit;
713
714	if (!tg3_flag(tp, ENABLE_APE))
715		return 0;
716
717	switch (locknum) {
718	case TG3_APE_LOCK_GPIO:
719		if (tg3_asic_rev(tp) == ASIC_REV_5761)
720			return 0;
721	case TG3_APE_LOCK_GRC:
722	case TG3_APE_LOCK_MEM:
723		if (!tp->pci_fn)
724			bit = APE_LOCK_REQ_DRIVER;
725		else
726			bit = 1 << tp->pci_fn;
727		break;
728	case TG3_APE_LOCK_PHY0:
729	case TG3_APE_LOCK_PHY1:
730	case TG3_APE_LOCK_PHY2:
731	case TG3_APE_LOCK_PHY3:
732		bit = APE_LOCK_REQ_DRIVER;
733		break;
734	default:
735		return -EINVAL;
736	}
737
738	if (tg3_asic_rev(tp) == ASIC_REV_5761) {
739		req = TG3_APE_LOCK_REQ;
740		gnt = TG3_APE_LOCK_GRANT;
741	} else {
742		req = TG3_APE_PER_LOCK_REQ;
743		gnt = TG3_APE_PER_LOCK_GRANT;
744	}
745
746	off = 4 * locknum;
747
748	tg3_ape_write32(tp, req + off, bit);
749
750	/* Wait for up to 1 millisecond to acquire lock. */
751	for (i = 0; i < 100; i++) {
752		status = tg3_ape_read32(tp, gnt + off);
753		if (status == bit)
754			break;
755		if (pci_channel_offline(tp->pdev))
756			break;
757
758		udelay(10);
759	}
760
761	if (status != bit) {
762		/* Revoke the lock request. */
763		tg3_ape_write32(tp, gnt + off, bit);
764		ret = -EBUSY;
765	}
766
767	return ret;
768}
769
770static void tg3_ape_unlock(struct tg3 *tp, int locknum)
771{
772	u32 gnt, bit;
773
774	if (!tg3_flag(tp, ENABLE_APE))
775		return;
776
777	switch (locknum) {
778	case TG3_APE_LOCK_GPIO:
779		if (tg3_asic_rev(tp) == ASIC_REV_5761)
780			return;
781	case TG3_APE_LOCK_GRC:
782	case TG3_APE_LOCK_MEM:
783		if (!tp->pci_fn)
784			bit = APE_LOCK_GRANT_DRIVER;
785		else
786			bit = 1 << tp->pci_fn;
787		break;
788	case TG3_APE_LOCK_PHY0:
789	case TG3_APE_LOCK_PHY1:
790	case TG3_APE_LOCK_PHY2:
791	case TG3_APE_LOCK_PHY3:
792		bit = APE_LOCK_GRANT_DRIVER;
793		break;
794	default:
795		return;
796	}
797
798	if (tg3_asic_rev(tp) == ASIC_REV_5761)
799		gnt = TG3_APE_LOCK_GRANT;
800	else
801		gnt = TG3_APE_PER_LOCK_GRANT;
802
803	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
804}
805
806static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
807{
808	u32 apedata;
809
810	while (timeout_us) {
811		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
812			return -EBUSY;
813
814		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
815		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
816			break;
817
818		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
819
820		udelay(10);
821		timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
822	}
823
824	return timeout_us ? 0 : -EBUSY;
825}
826
827static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
828{
829	u32 i, apedata;
830
831	for (i = 0; i < timeout_us / 10; i++) {
832		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
833
834		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
835			break;
836
837		udelay(10);
838	}
839
840	return i == timeout_us / 10;
841}
842
843static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
844				   u32 len)
845{
846	int err;
847	u32 i, bufoff, msgoff, maxlen, apedata;
848
849	if (!tg3_flag(tp, APE_HAS_NCSI))
850		return 0;
851
852	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
853	if (apedata != APE_SEG_SIG_MAGIC)
854		return -ENODEV;
855
856	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
857	if (!(apedata & APE_FW_STATUS_READY))
858		return -EAGAIN;
859
860	bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
861		 TG3_APE_SHMEM_BASE;
862	msgoff = bufoff + 2 * sizeof(u32);
863	maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
864
865	while (len) {
866		u32 length;
867
868		/* Cap xfer sizes to scratchpad limits. */
869		length = (len > maxlen) ? maxlen : len;
870		len -= length;
871
872		apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
873		if (!(apedata & APE_FW_STATUS_READY))
874			return -EAGAIN;
875
876		/* Wait for up to 1 msec for APE to service previous event. */
877		err = tg3_ape_event_lock(tp, 1000);
878		if (err)
879			return err;
880
881		apedata = APE_EVENT_STATUS_DRIVER_EVNT |
882			  APE_EVENT_STATUS_SCRTCHPD_READ |
883			  APE_EVENT_STATUS_EVENT_PENDING;
884		tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
885
886		tg3_ape_write32(tp, bufoff, base_off);
887		tg3_ape_write32(tp, bufoff + sizeof(u32), length);
888
889		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
890		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
891
892		base_off += length;
893
894		if (tg3_ape_wait_for_event(tp, 30000))
895			return -EAGAIN;
896
897		for (i = 0; length; i += 4, length -= 4) {
898			u32 val = tg3_ape_read32(tp, msgoff + i);
899			memcpy(data, &val, sizeof(u32));
900			data++;
901		}
902	}
903
904	return 0;
905}
906
907static int tg3_ape_send_event(struct tg3 *tp, u32 event)
908{
909	int err;
910	u32 apedata;
911
912	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
913	if (apedata != APE_SEG_SIG_MAGIC)
914		return -EAGAIN;
915
916	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
917	if (!(apedata & APE_FW_STATUS_READY))
918		return -EAGAIN;
919
920	/* Wait for up to 1 millisecond for APE to service previous event. */
921	err = tg3_ape_event_lock(tp, 1000);
922	if (err)
923		return err;
924
925	tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
926			event | APE_EVENT_STATUS_EVENT_PENDING);
927
928	tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
929	tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
930
931	return 0;
932}
933
934static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
935{
936	u32 event;
937	u32 apedata;
938
939	if (!tg3_flag(tp, ENABLE_APE))
940		return;
941
942	switch (kind) {
943	case RESET_KIND_INIT:
944		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
945				APE_HOST_SEG_SIG_MAGIC);
946		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
947				APE_HOST_SEG_LEN_MAGIC);
948		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
949		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
950		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
951			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
952		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
953				APE_HOST_BEHAV_NO_PHYLOCK);
954		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
955				    TG3_APE_HOST_DRVR_STATE_START);
956
957		event = APE_EVENT_STATUS_STATE_START;
958		break;
959	case RESET_KIND_SHUTDOWN:
960		/* With the interface we are currently using,
961		 * APE does not track driver state.  Wiping
962		 * out the HOST SEGMENT SIGNATURE forces
963		 * the APE to assume OS absent status.
964		 */
965		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
966
967		if (device_may_wakeup(&tp->pdev->dev) &&
968		    tg3_flag(tp, WOL_ENABLE)) {
969			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
970					    TG3_APE_HOST_WOL_SPEED_AUTO);
971			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
972		} else
973			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
974
975		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
976
977		event = APE_EVENT_STATUS_STATE_UNLOAD;
978		break;
979	default:
980		return;
981	}
982
983	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
984
985	tg3_ape_send_event(tp, event);
986}
987
988static void tg3_disable_ints(struct tg3 *tp)
989{
990	int i;
991
992	tw32(TG3PCI_MISC_HOST_CTRL,
993	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
994	for (i = 0; i < tp->irq_max; i++)
995		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
996}
997
998static void tg3_enable_ints(struct tg3 *tp)
999{
1000	int i;
1001
1002	tp->irq_sync = 0;
1003	wmb();
1004
1005	tw32(TG3PCI_MISC_HOST_CTRL,
1006	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1007
1008	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1009	for (i = 0; i < tp->irq_cnt; i++) {
1010		struct tg3_napi *tnapi = &tp->napi[i];
1011
1012		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1013		if (tg3_flag(tp, 1SHOT_MSI))
1014			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1015
1016		tp->coal_now |= tnapi->coal_now;
1017	}
1018
1019	/* Force an initial interrupt */
1020	if (!tg3_flag(tp, TAGGED_STATUS) &&
1021	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1022		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1023	else
1024		tw32(HOSTCC_MODE, tp->coal_now);
1025
1026	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1027}
1028
1029static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1030{
1031	struct tg3 *tp = tnapi->tp;
1032	struct tg3_hw_status *sblk = tnapi->hw_status;
1033	unsigned int work_exists = 0;
1034
1035	/* check for phy events */
1036	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1037		if (sblk->status & SD_STATUS_LINK_CHG)
1038			work_exists = 1;
1039	}
1040
1041	/* check for TX work to do */
1042	if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1043		work_exists = 1;
1044
1045	/* check for RX work to do */
1046	if (tnapi->rx_rcb_prod_idx &&
1047	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1048		work_exists = 1;
1049
1050	return work_exists;
1051}
1052
1053/* tg3_int_reenable
1054 *  similar to tg3_enable_ints, but it accurately determines whether there
1055 *  is new work pending and can return without flushing the PIO write
1056 *  which reenables interrupts
1057 */
1058static void tg3_int_reenable(struct tg3_napi *tnapi)
1059{
1060	struct tg3 *tp = tnapi->tp;
1061
1062	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1063	mmiowb();
1064
1065	/* When doing tagged status, this work check is unnecessary.
1066	 * The last_tag we write above tells the chip which piece of
1067	 * work we've completed.
1068	 */
1069	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1070		tw32(HOSTCC_MODE, tp->coalesce_mode |
1071		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
1072}
1073
1074static void tg3_switch_clocks(struct tg3 *tp)
1075{
1076	u32 clock_ctrl;
1077	u32 orig_clock_ctrl;
1078
1079	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1080		return;
1081
1082	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1083
1084	orig_clock_ctrl = clock_ctrl;
1085	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1086		       CLOCK_CTRL_CLKRUN_OENABLE |
1087		       0x1f);
1088	tp->pci_clock_ctrl = clock_ctrl;
1089
1090	if (tg3_flag(tp, 5705_PLUS)) {
1091		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1092			tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1094		}
1095	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1096		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1097			    clock_ctrl |
1098			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1099			    40);
1100		tw32_wait_f(TG3PCI_CLOCK_CTRL,
1101			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
1102			    40);
1103	}
1104	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1105}
1106
1107#define PHY_BUSY_LOOPS	5000
1108
1109static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1110			 u32 *val)
1111{
1112	u32 frame_val;
1113	unsigned int loops;
1114	int ret;
1115
1116	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1117		tw32_f(MAC_MI_MODE,
1118		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1119		udelay(80);
1120	}
1121
1122	tg3_ape_lock(tp, tp->phy_ape_lock);
1123
1124	*val = 0x0;
1125
1126	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1127		      MI_COM_PHY_ADDR_MASK);
1128	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1129		      MI_COM_REG_ADDR_MASK);
1130	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1131
1132	tw32_f(MAC_MI_COM, frame_val);
1133
1134	loops = PHY_BUSY_LOOPS;
1135	while (loops != 0) {
1136		udelay(10);
1137		frame_val = tr32(MAC_MI_COM);
1138
1139		if ((frame_val & MI_COM_BUSY) == 0) {
1140			udelay(5);
1141			frame_val = tr32(MAC_MI_COM);
1142			break;
1143		}
1144		loops -= 1;
1145	}
1146
1147	ret = -EBUSY;
1148	if (loops != 0) {
1149		*val = frame_val & MI_COM_DATA_MASK;
1150		ret = 0;
1151	}
1152
1153	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1154		tw32_f(MAC_MI_MODE, tp->mi_mode);
1155		udelay(80);
1156	}
1157
1158	tg3_ape_unlock(tp, tp->phy_ape_lock);
1159
1160	return ret;
1161}
1162
1163static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1164{
1165	return __tg3_readphy(tp, tp->phy_addr, reg, val);
1166}
1167
1168static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1169			  u32 val)
1170{
1171	u32 frame_val;
1172	unsigned int loops;
1173	int ret;
1174
1175	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1176	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1177		return 0;
1178
1179	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1180		tw32_f(MAC_MI_MODE,
1181		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1182		udelay(80);
1183	}
1184
1185	tg3_ape_lock(tp, tp->phy_ape_lock);
1186
1187	frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1188		      MI_COM_PHY_ADDR_MASK);
1189	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1190		      MI_COM_REG_ADDR_MASK);
1191	frame_val |= (val & MI_COM_DATA_MASK);
1192	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1193
1194	tw32_f(MAC_MI_COM, frame_val);
1195
1196	loops = PHY_BUSY_LOOPS;
1197	while (loops != 0) {
1198		udelay(10);
1199		frame_val = tr32(MAC_MI_COM);
1200		if ((frame_val & MI_COM_BUSY) == 0) {
1201			udelay(5);
1202			frame_val = tr32(MAC_MI_COM);
1203			break;
1204		}
1205		loops -= 1;
1206	}
1207
1208	ret = -EBUSY;
1209	if (loops != 0)
1210		ret = 0;
1211
1212	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1213		tw32_f(MAC_MI_MODE, tp->mi_mode);
1214		udelay(80);
1215	}
1216
1217	tg3_ape_unlock(tp, tp->phy_ape_lock);
1218
1219	return ret;
1220}
1221
1222static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1223{
1224	return __tg3_writephy(tp, tp->phy_addr, reg, val);
1225}
1226
1227static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1228{
1229	int err;
1230
1231	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1232	if (err)
1233		goto done;
1234
1235	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1236	if (err)
1237		goto done;
1238
1239	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1240			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1241	if (err)
1242		goto done;
1243
1244	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245
1246done:
1247	return err;
1248}
1249
1250static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1251{
1252	int err;
1253
1254	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1255	if (err)
1256		goto done;
1257
1258	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1259	if (err)
1260		goto done;
1261
1262	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1263			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1264	if (err)
1265		goto done;
1266
1267	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268
1269done:
1270	return err;
1271}
1272
1273static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1274{
1275	int err;
1276
1277	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1278	if (!err)
1279		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1280
1281	return err;
1282}
1283
1284static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1285{
1286	int err;
1287
1288	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1289	if (!err)
1290		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1291
1292	return err;
1293}
1294
1295static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1296{
1297	int err;
1298
1299	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1300			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1301			   MII_TG3_AUXCTL_SHDWSEL_MISC);
1302	if (!err)
1303		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1304
1305	return err;
1306}
1307
1308static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1309{
1310	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1311		set |= MII_TG3_AUXCTL_MISC_WREN;
1312
1313	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1314}
1315
1316static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1317{
1318	u32 val;
1319	int err;
1320
1321	err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322
1323	if (err)
1324		return err;
1325
1326	if (enable)
1327		val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1328	else
1329		val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1330
1331	err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1332				   val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1333
1334	return err;
1335}
1336
1337static int tg3_phy_shdw_write(struct tg3 *tp, int reg, u32 val)
1338{
1339	return tg3_writephy(tp, MII_TG3_MISC_SHDW,
1340			    reg | val | MII_TG3_MISC_SHDW_WREN);
1341}
1342
1343static int tg3_bmcr_reset(struct tg3 *tp)
1344{
1345	u32 phy_control;
1346	int limit, err;
1347
1348	/* OK, reset it, and poll the BMCR_RESET bit until it
1349	 * clears or we time out.
1350	 */
1351	phy_control = BMCR_RESET;
1352	err = tg3_writephy(tp, MII_BMCR, phy_control);
1353	if (err != 0)
1354		return -EBUSY;
1355
1356	limit = 5000;
1357	while (limit--) {
1358		err = tg3_readphy(tp, MII_BMCR, &phy_control);
1359		if (err != 0)
1360			return -EBUSY;
1361
1362		if ((phy_control & BMCR_RESET) == 0) {
1363			udelay(40);
1364			break;
1365		}
1366		udelay(10);
1367	}
1368	if (limit < 0)
1369		return -EBUSY;
1370
1371	return 0;
1372}
1373
1374static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1375{
1376	struct tg3 *tp = bp->priv;
1377	u32 val;
1378
1379	spin_lock_bh(&tp->lock);
1380
1381	if (__tg3_readphy(tp, mii_id, reg, &val))
1382		val = -EIO;
1383
1384	spin_unlock_bh(&tp->lock);
1385
1386	return val;
1387}
1388
1389static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1390{
1391	struct tg3 *tp = bp->priv;
1392	u32 ret = 0;
1393
1394	spin_lock_bh(&tp->lock);
1395
1396	if (__tg3_writephy(tp, mii_id, reg, val))
1397		ret = -EIO;
1398
1399	spin_unlock_bh(&tp->lock);
1400
1401	return ret;
1402}
1403
1404static void tg3_mdio_config_5785(struct tg3 *tp)
1405{
1406	u32 val;
1407	struct phy_device *phydev;
1408
1409	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1410	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1411	case PHY_ID_BCM50610:
1412	case PHY_ID_BCM50610M:
1413		val = MAC_PHYCFG2_50610_LED_MODES;
1414		break;
1415	case PHY_ID_BCMAC131:
1416		val = MAC_PHYCFG2_AC131_LED_MODES;
1417		break;
1418	case PHY_ID_RTL8211C:
1419		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1420		break;
1421	case PHY_ID_RTL8201E:
1422		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1423		break;
1424	default:
1425		return;
1426	}
1427
1428	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1429		tw32(MAC_PHYCFG2, val);
1430
1431		val = tr32(MAC_PHYCFG1);
1432		val &= ~(MAC_PHYCFG1_RGMII_INT |
1433			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1434		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1435		tw32(MAC_PHYCFG1, val);
1436
1437		return;
1438	}
1439
1440	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1441		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1442		       MAC_PHYCFG2_FMODE_MASK_MASK |
1443		       MAC_PHYCFG2_GMODE_MASK_MASK |
1444		       MAC_PHYCFG2_ACT_MASK_MASK   |
1445		       MAC_PHYCFG2_QUAL_MASK_MASK |
1446		       MAC_PHYCFG2_INBAND_ENABLE;
1447
1448	tw32(MAC_PHYCFG2, val);
1449
1450	val = tr32(MAC_PHYCFG1);
1451	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1452		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1453	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1454		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1455			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1456		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1457			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1458	}
1459	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1460	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1461	tw32(MAC_PHYCFG1, val);
1462
1463	val = tr32(MAC_EXT_RGMII_MODE);
1464	val &= ~(MAC_RGMII_MODE_RX_INT_B |
1465		 MAC_RGMII_MODE_RX_QUALITY |
1466		 MAC_RGMII_MODE_RX_ACTIVITY |
1467		 MAC_RGMII_MODE_RX_ENG_DET |
1468		 MAC_RGMII_MODE_TX_ENABLE |
1469		 MAC_RGMII_MODE_TX_LOWPWR |
1470		 MAC_RGMII_MODE_TX_RESET);
1471	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1472		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1473			val |= MAC_RGMII_MODE_RX_INT_B |
1474			       MAC_RGMII_MODE_RX_QUALITY |
1475			       MAC_RGMII_MODE_RX_ACTIVITY |
1476			       MAC_RGMII_MODE_RX_ENG_DET;
1477		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1478			val |= MAC_RGMII_MODE_TX_ENABLE |
1479			       MAC_RGMII_MODE_TX_LOWPWR |
1480			       MAC_RGMII_MODE_TX_RESET;
1481	}
1482	tw32(MAC_EXT_RGMII_MODE, val);
1483}
1484
1485static void tg3_mdio_start(struct tg3 *tp)
1486{
1487	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1488	tw32_f(MAC_MI_MODE, tp->mi_mode);
1489	udelay(80);
1490
1491	if (tg3_flag(tp, MDIOBUS_INITED) &&
1492	    tg3_asic_rev(tp) == ASIC_REV_5785)
1493		tg3_mdio_config_5785(tp);
1494}
1495
1496static int tg3_mdio_init(struct tg3 *tp)
1497{
1498	int i;
1499	u32 reg;
1500	struct phy_device *phydev;
1501
1502	if (tg3_flag(tp, 5717_PLUS)) {
1503		u32 is_serdes;
1504
1505		tp->phy_addr = tp->pci_fn + 1;
1506
1507		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1508			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1509		else
1510			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1511				    TG3_CPMU_PHY_STRAP_IS_SERDES;
1512		if (is_serdes)
1513			tp->phy_addr += 7;
1514	} else if (tg3_flag(tp, IS_SSB_CORE) && tg3_flag(tp, ROBOSWITCH)) {
1515		int addr;
1516
1517		addr = ssb_gige_get_phyaddr(tp->pdev);
1518		if (addr < 0)
1519			return addr;
1520		tp->phy_addr = addr;
1521	} else
1522		tp->phy_addr = TG3_PHY_MII_ADDR;
1523
1524	tg3_mdio_start(tp);
1525
1526	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1527		return 0;
1528
1529	tp->mdio_bus = mdiobus_alloc();
1530	if (tp->mdio_bus == NULL)
1531		return -ENOMEM;
1532
1533	tp->mdio_bus->name     = "tg3 mdio bus";
1534	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1535		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1536	tp->mdio_bus->priv     = tp;
1537	tp->mdio_bus->parent   = &tp->pdev->dev;
1538	tp->mdio_bus->read     = &tg3_mdio_read;
1539	tp->mdio_bus->write    = &tg3_mdio_write;
1540	tp->mdio_bus->phy_mask = ~(1 << tp->phy_addr);
1541	tp->mdio_bus->irq      = &tp->mdio_irq[0];
1542
1543	for (i = 0; i < PHY_MAX_ADDR; i++)
1544		tp->mdio_bus->irq[i] = PHY_POLL;
1545
1546	/* The bus registration will look for all the PHYs on the mdio bus.
1547	 * Unfortunately, it does not ensure the PHY is powered up before
1548	 * accessing the PHY ID registers.  A chip reset is the
1549	 * quickest way to bring the device back to an operational state..
1550	 */
1551	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1552		tg3_bmcr_reset(tp);
1553
1554	i = mdiobus_register(tp->mdio_bus);
1555	if (i) {
1556		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1557		mdiobus_free(tp->mdio_bus);
1558		return i;
1559	}
1560
1561	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
1562
1563	if (!phydev || !phydev->drv) {
1564		dev_warn(&tp->pdev->dev, "No PHY devices\n");
1565		mdiobus_unregister(tp->mdio_bus);
1566		mdiobus_free(tp->mdio_bus);
1567		return -ENODEV;
1568	}
1569
1570	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1571	case PHY_ID_BCM57780:
1572		phydev->interface = PHY_INTERFACE_MODE_GMII;
1573		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1574		break;
1575	case PHY_ID_BCM50610:
1576	case PHY_ID_BCM50610M:
1577		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1578				     PHY_BRCM_RX_REFCLK_UNUSED |
1579				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
1580				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
1581		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1582			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1583		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1584			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1585		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1586			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1587		/* fallthru */
1588	case PHY_ID_RTL8211C:
1589		phydev->interface = PHY_INTERFACE_MODE_RGMII;
1590		break;
1591	case PHY_ID_RTL8201E:
1592	case PHY_ID_BCMAC131:
1593		phydev->interface = PHY_INTERFACE_MODE_MII;
1594		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1595		tp->phy_flags |= TG3_PHYFLG_IS_FET;
1596		break;
1597	}
1598
1599	tg3_flag_set(tp, MDIOBUS_INITED);
1600
1601	if (tg3_asic_rev(tp) == ASIC_REV_5785)
1602		tg3_mdio_config_5785(tp);
1603
1604	return 0;
1605}
1606
1607static void tg3_mdio_fini(struct tg3 *tp)
1608{
1609	if (tg3_flag(tp, MDIOBUS_INITED)) {
1610		tg3_flag_clear(tp, MDIOBUS_INITED);
1611		mdiobus_unregister(tp->mdio_bus);
1612		mdiobus_free(tp->mdio_bus);
1613	}
1614}
1615
1616/* tp->lock is held. */
1617static inline void tg3_generate_fw_event(struct tg3 *tp)
1618{
1619	u32 val;
1620
1621	val = tr32(GRC_RX_CPU_EVENT);
1622	val |= GRC_RX_CPU_DRIVER_EVENT;
1623	tw32_f(GRC_RX_CPU_EVENT, val);
1624
1625	tp->last_event_jiffies = jiffies;
1626}
1627
1628#define TG3_FW_EVENT_TIMEOUT_USEC 2500
1629
1630/* tp->lock is held. */
1631static void tg3_wait_for_event_ack(struct tg3 *tp)
1632{
1633	int i;
1634	unsigned int delay_cnt;
1635	long time_remain;
1636
1637	/* If enough time has passed, no wait is necessary. */
1638	time_remain = (long)(tp->last_event_jiffies + 1 +
1639		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1640		      (long)jiffies;
1641	if (time_remain < 0)
1642		return;
1643
1644	/* Check if we can shorten the wait time. */
1645	delay_cnt = jiffies_to_usecs(time_remain);
1646	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1647		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1648	delay_cnt = (delay_cnt >> 3) + 1;
1649
1650	for (i = 0; i < delay_cnt; i++) {
1651		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1652			break;
1653		if (pci_channel_offline(tp->pdev))
1654			break;
1655
1656		udelay(8);
1657	}
1658}
1659
1660/* tp->lock is held. */
1661static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1662{
1663	u32 reg, val;
1664
1665	val = 0;
1666	if (!tg3_readphy(tp, MII_BMCR, &reg))
1667		val = reg << 16;
1668	if (!tg3_readphy(tp, MII_BMSR, &reg))
1669		val |= (reg & 0xffff);
1670	*data++ = val;
1671
1672	val = 0;
1673	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1674		val = reg << 16;
1675	if (!tg3_readphy(tp, MII_LPA, &reg))
1676		val |= (reg & 0xffff);
1677	*data++ = val;
1678
1679	val = 0;
1680	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1681		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1682			val = reg << 16;
1683		if (!tg3_readphy(tp, MII_STAT1000, &reg))
1684			val |= (reg & 0xffff);
1685	}
1686	*data++ = val;
1687
1688	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1689		val = reg << 16;
1690	else
1691		val = 0;
1692	*data++ = val;
1693}
1694
1695/* tp->lock is held. */
1696static void tg3_ump_link_report(struct tg3 *tp)
1697{
1698	u32 data[4];
1699
1700	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1701		return;
1702
1703	tg3_phy_gather_ump_data(tp, data);
1704
1705	tg3_wait_for_event_ack(tp);
1706
1707	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1708	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1709	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1710	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1711	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1712	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1713
1714	tg3_generate_fw_event(tp);
1715}
1716
1717/* tp->lock is held. */
1718static void tg3_stop_fw(struct tg3 *tp)
1719{
1720	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1721		/* Wait for RX cpu to ACK the previous event. */
1722		tg3_wait_for_event_ack(tp);
1723
1724		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1725
1726		tg3_generate_fw_event(tp);
1727
1728		/* Wait for RX cpu to ACK this event. */
1729		tg3_wait_for_event_ack(tp);
1730	}
1731}
1732
1733/* tp->lock is held. */
1734static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1735{
1736	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1737		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1738
1739	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1740		switch (kind) {
1741		case RESET_KIND_INIT:
1742			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1743				      DRV_STATE_START);
1744			break;
1745
1746		case RESET_KIND_SHUTDOWN:
1747			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1748				      DRV_STATE_UNLOAD);
1749			break;
1750
1751		case RESET_KIND_SUSPEND:
1752			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1753				      DRV_STATE_SUSPEND);
1754			break;
1755
1756		default:
1757			break;
1758		}
1759	}
1760}
1761
1762/* tp->lock is held. */
1763static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1764{
1765	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1766		switch (kind) {
1767		case RESET_KIND_INIT:
1768			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1769				      DRV_STATE_START_DONE);
1770			break;
1771
1772		case RESET_KIND_SHUTDOWN:
1773			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1774				      DRV_STATE_UNLOAD_DONE);
1775			break;
1776
1777		default:
1778			break;
1779		}
1780	}
1781}
1782
1783/* tp->lock is held. */
1784static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1785{
1786	if (tg3_flag(tp, ENABLE_ASF)) {
1787		switch (kind) {
1788		case RESET_KIND_INIT:
1789			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790				      DRV_STATE_START);
1791			break;
1792
1793		case RESET_KIND_SHUTDOWN:
1794			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795				      DRV_STATE_UNLOAD);
1796			break;
1797
1798		case RESET_KIND_SUSPEND:
1799			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1800				      DRV_STATE_SUSPEND);
1801			break;
1802
1803		default:
1804			break;
1805		}
1806	}
1807}
1808
1809static int tg3_poll_fw(struct tg3 *tp)
1810{
1811	int i;
1812	u32 val;
1813
1814	if (tg3_flag(tp, NO_FWARE_REPORTED))
1815		return 0;
1816
1817	if (tg3_flag(tp, IS_SSB_CORE)) {
1818		/* We don't use firmware. */
1819		return 0;
1820	}
1821
1822	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1823		/* Wait up to 20ms for init done. */
1824		for (i = 0; i < 200; i++) {
1825			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1826				return 0;
1827			if (pci_channel_offline(tp->pdev))
1828				return -ENODEV;
1829
1830			udelay(100);
1831		}
1832		return -ENODEV;
1833	}
1834
1835	/* Wait for firmware initialization to complete. */
1836	for (i = 0; i < 100000; i++) {
1837		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1838		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1839			break;
1840		if (pci_channel_offline(tp->pdev)) {
1841			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1842				tg3_flag_set(tp, NO_FWARE_REPORTED);
1843				netdev_info(tp->dev, "No firmware running\n");
1844			}
1845
1846			break;
1847		}
1848
1849		udelay(10);
1850	}
1851
1852	/* Chip might not be fitted with firmware.  Some Sun onboard
1853	 * parts are configured like that.  So don't signal the timeout
1854	 * of the above loop as an error, but do report the lack of
1855	 * running firmware once.
1856	 */
1857	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1858		tg3_flag_set(tp, NO_FWARE_REPORTED);
1859
1860		netdev_info(tp->dev, "No firmware running\n");
1861	}
1862
1863	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1864		/* The 57765 A0 needs a little more
1865		 * time to do some important work.
1866		 */
1867		mdelay(10);
1868	}
1869
1870	return 0;
1871}
1872
1873static void tg3_link_report(struct tg3 *tp)
1874{
1875	if (!netif_carrier_ok(tp->dev)) {
1876		netif_info(tp, link, tp->dev, "Link is down\n");
1877		tg3_ump_link_report(tp);
1878	} else if (netif_msg_link(tp)) {
1879		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1880			    (tp->link_config.active_speed == SPEED_1000 ?
1881			     1000 :
1882			     (tp->link_config.active_speed == SPEED_100 ?
1883			      100 : 10)),
1884			    (tp->link_config.active_duplex == DUPLEX_FULL ?
1885			     "full" : "half"));
1886
1887		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1888			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1889			    "on" : "off",
1890			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1891			    "on" : "off");
1892
1893		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1894			netdev_info(tp->dev, "EEE is %s\n",
1895				    tp->setlpicnt ? "enabled" : "disabled");
1896
1897		tg3_ump_link_report(tp);
1898	}
1899
1900	tp->link_up = netif_carrier_ok(tp->dev);
1901}
1902
1903static u32 tg3_decode_flowctrl_1000T(u32 adv)
1904{
1905	u32 flowctrl = 0;
1906
1907	if (adv & ADVERTISE_PAUSE_CAP) {
1908		flowctrl |= FLOW_CTRL_RX;
1909		if (!(adv & ADVERTISE_PAUSE_ASYM))
1910			flowctrl |= FLOW_CTRL_TX;
1911	} else if (adv & ADVERTISE_PAUSE_ASYM)
1912		flowctrl |= FLOW_CTRL_TX;
1913
1914	return flowctrl;
1915}
1916
1917static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1918{
1919	u16 miireg;
1920
1921	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1922		miireg = ADVERTISE_1000XPAUSE;
1923	else if (flow_ctrl & FLOW_CTRL_TX)
1924		miireg = ADVERTISE_1000XPSE_ASYM;
1925	else if (flow_ctrl & FLOW_CTRL_RX)
1926		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1927	else
1928		miireg = 0;
1929
1930	return miireg;
1931}
1932
1933static u32 tg3_decode_flowctrl_1000X(u32 adv)
1934{
1935	u32 flowctrl = 0;
1936
1937	if (adv & ADVERTISE_1000XPAUSE) {
1938		flowctrl |= FLOW_CTRL_RX;
1939		if (!(adv & ADVERTISE_1000XPSE_ASYM))
1940			flowctrl |= FLOW_CTRL_TX;
1941	} else if (adv & ADVERTISE_1000XPSE_ASYM)
1942		flowctrl |= FLOW_CTRL_TX;
1943
1944	return flowctrl;
1945}
1946
1947static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1948{
1949	u8 cap = 0;
1950
1951	if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1952		cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1953	} else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1954		if (lcladv & ADVERTISE_1000XPAUSE)
1955			cap = FLOW_CTRL_RX;
1956		if (rmtadv & ADVERTISE_1000XPAUSE)
1957			cap = FLOW_CTRL_TX;
1958	}
1959
1960	return cap;
1961}
1962
1963static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1964{
1965	u8 autoneg;
1966	u8 flowctrl = 0;
1967	u32 old_rx_mode = tp->rx_mode;
1968	u32 old_tx_mode = tp->tx_mode;
1969
1970	if (tg3_flag(tp, USE_PHYLIB))
1971		autoneg = tp->mdio_bus->phy_map[tp->phy_addr]->autoneg;
1972	else
1973		autoneg = tp->link_config.autoneg;
1974
1975	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1976		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1977			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1978		else
1979			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1980	} else
1981		flowctrl = tp->link_config.flowctrl;
1982
1983	tp->link_config.active_flowctrl = flowctrl;
1984
1985	if (flowctrl & FLOW_CTRL_RX)
1986		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1987	else
1988		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1989
1990	if (old_rx_mode != tp->rx_mode)
1991		tw32_f(MAC_RX_MODE, tp->rx_mode);
1992
1993	if (flowctrl & FLOW_CTRL_TX)
1994		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1995	else
1996		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1997
1998	if (old_tx_mode != tp->tx_mode)
1999		tw32_f(MAC_TX_MODE, tp->tx_mode);
2000}
2001
2002static void tg3_adjust_link(struct net_device *dev)
2003{
2004	u8 oldflowctrl, linkmesg = 0;
2005	u32 mac_mode, lcl_adv, rmt_adv;
2006	struct tg3 *tp = netdev_priv(dev);
2007	struct phy_device *phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2008
2009	spin_lock_bh(&tp->lock);
2010
2011	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2012				    MAC_MODE_HALF_DUPLEX);
2013
2014	oldflowctrl = tp->link_config.active_flowctrl;
2015
2016	if (phydev->link) {
2017		lcl_adv = 0;
2018		rmt_adv = 0;
2019
2020		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2021			mac_mode |= MAC_MODE_PORT_MODE_MII;
2022		else if (phydev->speed == SPEED_1000 ||
2023			 tg3_asic_rev(tp) != ASIC_REV_5785)
2024			mac_mode |= MAC_MODE_PORT_MODE_GMII;
2025		else
2026			mac_mode |= MAC_MODE_PORT_MODE_MII;
2027
2028		if (phydev->duplex == DUPLEX_HALF)
2029			mac_mode |= MAC_MODE_HALF_DUPLEX;
2030		else {
2031			lcl_adv = mii_advertise_flowctrl(
2032				  tp->link_config.flowctrl);
2033
2034			if (phydev->pause)
2035				rmt_adv = LPA_PAUSE_CAP;
2036			if (phydev->asym_pause)
2037				rmt_adv |= LPA_PAUSE_ASYM;
2038		}
2039
2040		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2041	} else
2042		mac_mode |= MAC_MODE_PORT_MODE_GMII;
2043
2044	if (mac_mode != tp->mac_mode) {
2045		tp->mac_mode = mac_mode;
2046		tw32_f(MAC_MODE, tp->mac_mode);
2047		udelay(40);
2048	}
2049
2050	if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2051		if (phydev->speed == SPEED_10)
2052			tw32(MAC_MI_STAT,
2053			     MAC_MI_STAT_10MBPS_MODE |
2054			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2055		else
2056			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2057	}
2058
2059	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2060		tw32(MAC_TX_LENGTHS,
2061		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062		      (6 << TX_LENGTHS_IPG_SHIFT) |
2063		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064	else
2065		tw32(MAC_TX_LENGTHS,
2066		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2067		      (6 << TX_LENGTHS_IPG_SHIFT) |
2068		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2069
2070	if (phydev->link != tp->old_link ||
2071	    phydev->speed != tp->link_config.active_speed ||
2072	    phydev->duplex != tp->link_config.active_duplex ||
2073	    oldflowctrl != tp->link_config.active_flowctrl)
2074		linkmesg = 1;
2075
2076	tp->old_link = phydev->link;
2077	tp->link_config.active_speed = phydev->speed;
2078	tp->link_config.active_duplex = phydev->duplex;
2079
2080	spin_unlock_bh(&tp->lock);
2081
2082	if (linkmesg)
2083		tg3_link_report(tp);
2084}
2085
2086static int tg3_phy_init(struct tg3 *tp)
2087{
2088	struct phy_device *phydev;
2089
2090	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2091		return 0;
2092
2093	/* Bring the PHY back to a known state. */
2094	tg3_bmcr_reset(tp);
2095
2096	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2097
2098	/* Attach the MAC to the PHY. */
2099	phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2100			     tg3_adjust_link, phydev->interface);
2101	if (IS_ERR(phydev)) {
2102		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2103		return PTR_ERR(phydev);
2104	}
2105
2106	/* Mask with MAC supported features. */
2107	switch (phydev->interface) {
2108	case PHY_INTERFACE_MODE_GMII:
2109	case PHY_INTERFACE_MODE_RGMII:
2110		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2111			phydev->supported &= (PHY_GBIT_FEATURES |
2112					      SUPPORTED_Pause |
2113					      SUPPORTED_Asym_Pause);
2114			break;
2115		}
2116		/* fallthru */
2117	case PHY_INTERFACE_MODE_MII:
2118		phydev->supported &= (PHY_BASIC_FEATURES |
2119				      SUPPORTED_Pause |
2120				      SUPPORTED_Asym_Pause);
2121		break;
2122	default:
2123		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2124		return -EINVAL;
2125	}
2126
2127	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2128
2129	phydev->advertising = phydev->supported;
2130
2131	return 0;
2132}
2133
2134static void tg3_phy_start(struct tg3 *tp)
2135{
2136	struct phy_device *phydev;
2137
2138	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2139		return;
2140
2141	phydev = tp->mdio_bus->phy_map[tp->phy_addr];
2142
2143	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2144		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2145		phydev->speed = tp->link_config.speed;
2146		phydev->duplex = tp->link_config.duplex;
2147		phydev->autoneg = tp->link_config.autoneg;
2148		phydev->advertising = tp->link_config.advertising;
2149	}
2150
2151	phy_start(phydev);
2152
2153	phy_start_aneg(phydev);
2154}
2155
2156static void tg3_phy_stop(struct tg3 *tp)
2157{
2158	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2159		return;
2160
2161	phy_stop(tp->mdio_bus->phy_map[tp->phy_addr]);
2162}
2163
2164static void tg3_phy_fini(struct tg3 *tp)
2165{
2166	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2167		phy_disconnect(tp->mdio_bus->phy_map[tp->phy_addr]);
2168		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2169	}
2170}
2171
2172static int tg3_phy_set_extloopbk(struct tg3 *tp)
2173{
2174	int err;
2175	u32 val;
2176
2177	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2178		return 0;
2179
2180	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2181		/* Cannot do read-modify-write on 5401 */
2182		err = tg3_phy_auxctl_write(tp,
2183					   MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2184					   MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2185					   0x4c20);
2186		goto done;
2187	}
2188
2189	err = tg3_phy_auxctl_read(tp,
2190				  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2191	if (err)
2192		return err;
2193
2194	val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2195	err = tg3_phy_auxctl_write(tp,
2196				   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197
2198done:
2199	return err;
2200}
2201
2202static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2203{
2204	u32 phytest;
2205
2206	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2207		u32 phy;
2208
2209		tg3_writephy(tp, MII_TG3_FET_TEST,
2210			     phytest | MII_TG3_FET_SHADOW_EN);
2211		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2212			if (enable)
2213				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2214			else
2215				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2216			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2217		}
2218		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2219	}
2220}
2221
2222static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2223{
2224	u32 reg;
2225
2226	if (!tg3_flag(tp, 5705_PLUS) ||
2227	    (tg3_flag(tp, 5717_PLUS) &&
2228	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2229		return;
2230
2231	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2232		tg3_phy_fet_toggle_apd(tp, enable);
2233		return;
2234	}
2235
2236	reg = MII_TG3_MISC_SHDW_SCR5_LPED |
2237	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2238	      MII_TG3_MISC_SHDW_SCR5_SDTL |
2239	      MII_TG3_MISC_SHDW_SCR5_C125OE;
2240	if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2241		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2242
2243	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_SCR5_SEL, reg);
2244
2245
2246	reg = MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2247	if (enable)
2248		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2249
2250	tg3_phy_shdw_write(tp, MII_TG3_MISC_SHDW_APD_SEL, reg);
2251}
2252
2253static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2254{
2255	u32 phy;
2256
2257	if (!tg3_flag(tp, 5705_PLUS) ||
2258	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2259		return;
2260
2261	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2262		u32 ephy;
2263
2264		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2265			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2266
2267			tg3_writephy(tp, MII_TG3_FET_TEST,
2268				     ephy | MII_TG3_FET_SHADOW_EN);
2269			if (!tg3_readphy(tp, reg, &phy)) {
2270				if (enable)
2271					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272				else
2273					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2274				tg3_writephy(tp, reg, phy);
2275			}
2276			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2277		}
2278	} else {
2279		int ret;
2280
2281		ret = tg3_phy_auxctl_read(tp,
2282					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2283		if (!ret) {
2284			if (enable)
2285				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286			else
2287				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2288			tg3_phy_auxctl_write(tp,
2289					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2290		}
2291	}
2292}
2293
2294static void tg3_phy_set_wirespeed(struct tg3 *tp)
2295{
2296	int ret;
2297	u32 val;
2298
2299	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2300		return;
2301
2302	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2303	if (!ret)
2304		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2305				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2306}
2307
2308static void tg3_phy_apply_otp(struct tg3 *tp)
2309{
2310	u32 otp, phy;
2311
2312	if (!tp->phy_otp)
2313		return;
2314
2315	otp = tp->phy_otp;
2316
2317	if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2318		return;
2319
2320	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2321	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2322	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2323
2324	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2325	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2326	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2327
2328	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2329	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2330	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2331
2332	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2333	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2334
2335	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2336	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2337
2338	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2339	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2340	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2341
2342	tg3_phy_toggle_auxctl_smdsp(tp, false);
2343}
2344
2345static void tg3_eee_pull_config(struct tg3 *tp, struct ethtool_eee *eee)
2346{
2347	u32 val;
2348	struct ethtool_eee *dest = &tp->eee;
2349
2350	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2351		return;
2352
2353	if (eee)
2354		dest = eee;
2355
2356	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, TG3_CL45_D7_EEERES_STAT, &val))
2357		return;
2358
2359	/* Pull eee_active */
2360	if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2361	    val == TG3_CL45_D7_EEERES_STAT_LP_100TX) {
2362		dest->eee_active = 1;
2363	} else
2364		dest->eee_active = 0;
2365
2366	/* Pull lp advertised settings */
2367	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE, &val))
2368		return;
2369	dest->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2370
2371	/* Pull advertised and eee_enabled settings */
2372	if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
2373		return;
2374	dest->eee_enabled = !!val;
2375	dest->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
2376
2377	/* Pull tx_lpi_enabled */
2378	val = tr32(TG3_CPMU_EEE_MODE);
2379	dest->tx_lpi_enabled = !!(val & TG3_CPMU_EEEMD_LPI_IN_TX);
2380
2381	/* Pull lpi timer value */
2382	dest->tx_lpi_timer = tr32(TG3_CPMU_EEE_DBTMR1) & 0xffff;
2383}
2384
2385static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2386{
2387	u32 val;
2388
2389	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2390		return;
2391
2392	tp->setlpicnt = 0;
2393
2394	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2395	    current_link_up &&
2396	    tp->link_config.active_duplex == DUPLEX_FULL &&
2397	    (tp->link_config.active_speed == SPEED_100 ||
2398	     tp->link_config.active_speed == SPEED_1000)) {
2399		u32 eeectl;
2400
2401		if (tp->link_config.active_speed == SPEED_1000)
2402			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2403		else
2404			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2405
2406		tw32(TG3_CPMU_EEE_CTRL, eeectl);
2407
2408		tg3_eee_pull_config(tp, NULL);
2409		if (tp->eee.eee_active)
2410			tp->setlpicnt = 2;
2411	}
2412
2413	if (!tp->setlpicnt) {
2414		if (current_link_up &&
2415		   !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2416			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2417			tg3_phy_toggle_auxctl_smdsp(tp, false);
2418		}
2419
2420		val = tr32(TG3_CPMU_EEE_MODE);
2421		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2422	}
2423}
2424
2425static void tg3_phy_eee_enable(struct tg3 *tp)
2426{
2427	u32 val;
2428
2429	if (tp->link_config.active_speed == SPEED_1000 &&
2430	    (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2431	     tg3_asic_rev(tp) == ASIC_REV_5719 ||
2432	     tg3_flag(tp, 57765_CLASS)) &&
2433	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2434		val = MII_TG3_DSP_TAP26_ALNOKO |
2435		      MII_TG3_DSP_TAP26_RMRXSTO;
2436		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2437		tg3_phy_toggle_auxctl_smdsp(tp, false);
2438	}
2439
2440	val = tr32(TG3_CPMU_EEE_MODE);
2441	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2442}
2443
2444static int tg3_wait_macro_done(struct tg3 *tp)
2445{
2446	int limit = 100;
2447
2448	while (limit--) {
2449		u32 tmp32;
2450
2451		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2452			if ((tmp32 & 0x1000) == 0)
2453				break;
2454		}
2455	}
2456	if (limit < 0)
2457		return -EBUSY;
2458
2459	return 0;
2460}
2461
2462static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2463{
2464	static const u32 test_pat[4][6] = {
2465	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2466	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2467	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2468	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2469	};
2470	int chan;
2471
2472	for (chan = 0; chan < 4; chan++) {
2473		int i;
2474
2475		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2476			     (chan * 0x2000) | 0x0200);
2477		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2478
2479		for (i = 0; i < 6; i++)
2480			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2481				     test_pat[chan][i]);
2482
2483		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2484		if (tg3_wait_macro_done(tp)) {
2485			*resetp = 1;
2486			return -EBUSY;
2487		}
2488
2489		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2490			     (chan * 0x2000) | 0x0200);
2491		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2492		if (tg3_wait_macro_done(tp)) {
2493			*resetp = 1;
2494			return -EBUSY;
2495		}
2496
2497		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2498		if (tg3_wait_macro_done(tp)) {
2499			*resetp = 1;
2500			return -EBUSY;
2501		}
2502
2503		for (i = 0; i < 6; i += 2) {
2504			u32 low, high;
2505
2506			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2507			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2508			    tg3_wait_macro_done(tp)) {
2509				*resetp = 1;
2510				return -EBUSY;
2511			}
2512			low &= 0x7fff;
2513			high &= 0x000f;
2514			if (low != test_pat[chan][i] ||
2515			    high != test_pat[chan][i+1]) {
2516				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2517				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2518				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2519
2520				return -EBUSY;
2521			}
2522		}
2523	}
2524
2525	return 0;
2526}
2527
2528static int tg3_phy_reset_chanpat(struct tg3 *tp)
2529{
2530	int chan;
2531
2532	for (chan = 0; chan < 4; chan++) {
2533		int i;
2534
2535		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2536			     (chan * 0x2000) | 0x0200);
2537		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2538		for (i = 0; i < 6; i++)
2539			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2540		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2541		if (tg3_wait_macro_done(tp))
2542			return -EBUSY;
2543	}
2544
2545	return 0;
2546}
2547
2548static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2549{
2550	u32 reg32, phy9_orig;
2551	int retries, do_phy_reset, err;
2552
2553	retries = 10;
2554	do_phy_reset = 1;
2555	do {
2556		if (do_phy_reset) {
2557			err = tg3_bmcr_reset(tp);
2558			if (err)
2559				return err;
2560			do_phy_reset = 0;
2561		}
2562
2563		/* Disable transmitter and interrupt.  */
2564		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2565			continue;
2566
2567		reg32 |= 0x3000;
2568		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2569
2570		/* Set full-duplex, 1000 mbps.  */
2571		tg3_writephy(tp, MII_BMCR,
2572			     BMCR_FULLDPLX | BMCR_SPEED1000);
2573
2574		/* Set to master mode.  */
2575		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2576			continue;
2577
2578		tg3_writephy(tp, MII_CTRL1000,
2579			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2580
2581		err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2582		if (err)
2583			return err;
2584
2585		/* Block the PHY control access.  */
2586		tg3_phydsp_write(tp, 0x8005, 0x0800);
2587
2588		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2589		if (!err)
2590			break;
2591	} while (--retries);
2592
2593	err = tg3_phy_reset_chanpat(tp);
2594	if (err)
2595		return err;
2596
2597	tg3_phydsp_write(tp, 0x8005, 0x0000);
2598
2599	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2600	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2601
2602	tg3_phy_toggle_auxctl_smdsp(tp, false);
2603
2604	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2605
2606	err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
2607	if (err)
2608		return err;
2609
2610	reg32 &= ~0x3000;
2611	tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2612
2613	return 0;
2614}
2615
2616static void tg3_carrier_off(struct tg3 *tp)
2617{
2618	netif_carrier_off(tp->dev);
2619	tp->link_up = false;
2620}
2621
2622static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2623{
2624	if (tg3_flag(tp, ENABLE_ASF))
2625		netdev_warn(tp->dev,
2626			    "Management side-band traffic will be interrupted during phy settings change\n");
2627}
2628
2629/* This will reset the tigon3 PHY if there is no valid
2630 * link unless the FORCE argument is non-zero.
2631 */
2632static int tg3_phy_reset(struct tg3 *tp)
2633{
2634	u32 val, cpmuctrl;
2635	int err;
2636
2637	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2638		val = tr32(GRC_MISC_CFG);
2639		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2640		udelay(40);
2641	}
2642	err  = tg3_readphy(tp, MII_BMSR, &val);
2643	err |= tg3_readphy(tp, MII_BMSR, &val);
2644	if (err != 0)
2645		return -EBUSY;
2646
2647	if (netif_running(tp->dev) && tp->link_up) {
2648		netif_carrier_off(tp->dev);
2649		tg3_link_report(tp);
2650	}
2651
2652	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2653	    tg3_asic_rev(tp) == ASIC_REV_5704 ||
2654	    tg3_asic_rev(tp) == ASIC_REV_5705) {
2655		err = tg3_phy_reset_5703_4_5(tp);
2656		if (err)
2657			return err;
2658		goto out;
2659	}
2660
2661	cpmuctrl = 0;
2662	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2663	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2664		cpmuctrl = tr32(TG3_CPMU_CTRL);
2665		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2666			tw32(TG3_CPMU_CTRL,
2667			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2668	}
2669
2670	err = tg3_bmcr_reset(tp);
2671	if (err)
2672		return err;
2673
2674	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2675		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2676		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2677
2678		tw32(TG3_CPMU_CTRL, cpmuctrl);
2679	}
2680
2681	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2682	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2683		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2684		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2685		    CPMU_LSPD_1000MB_MACCLK_12_5) {
2686			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2687			udelay(40);
2688			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2689		}
2690	}
2691
2692	if (tg3_flag(tp, 5717_PLUS) &&
2693	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2694		return 0;
2695
2696	tg3_phy_apply_otp(tp);
2697
2698	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2699		tg3_phy_toggle_apd(tp, true);
2700	else
2701		tg3_phy_toggle_apd(tp, false);
2702
2703out:
2704	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2705	    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2706		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2707		tg3_phydsp_write(tp, 0x000a, 0x0323);
2708		tg3_phy_toggle_auxctl_smdsp(tp, false);
2709	}
2710
2711	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2712		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2713		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2714	}
2715
2716	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2717		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2718			tg3_phydsp_write(tp, 0x000a, 0x310b);
2719			tg3_phydsp_write(tp, 0x201f, 0x9506);
2720			tg3_phydsp_write(tp, 0x401f, 0x14e2);
2721			tg3_phy_toggle_auxctl_smdsp(tp, false);
2722		}
2723	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2724		if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2725			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2726			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2727				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2728				tg3_writephy(tp, MII_TG3_TEST1,
2729					     MII_TG3_TEST1_TRIM_EN | 0x4);
2730			} else
2731				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2732
2733			tg3_phy_toggle_auxctl_smdsp(tp, false);
2734		}
2735	}
2736
2737	/* Set Extended packet length bit (bit 14) on all chips that */
2738	/* support jumbo frames */
2739	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2740		/* Cannot do read-modify-write on 5401 */
2741		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2742	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2743		/* Set bit 14 with read-modify-write to preserve other bits */
2744		err = tg3_phy_auxctl_read(tp,
2745					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2746		if (!err)
2747			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2748					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2749	}
2750
2751	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
2752	 * jumbo frames transmission.
2753	 */
2754	if (tg3_flag(tp, JUMBO_CAPABLE)) {
2755		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2756			tg3_writephy(tp, MII_TG3_EXT_CTRL,
2757				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2758	}
2759
2760	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2761		/* adjust output voltage */
2762		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2763	}
2764
2765	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2766		tg3_phydsp_write(tp, 0xffb, 0x4000);
2767
2768	tg3_phy_toggle_automdix(tp, true);
2769	tg3_phy_set_wirespeed(tp);
2770	return 0;
2771}
2772
2773#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
2774#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
2775#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
2776					  TG3_GPIO_MSG_NEED_VAUX)
2777#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2778	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2779	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2780	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2781	 (TG3_GPIO_MSG_DRVR_PRES << 12))
2782
2783#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2784	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2785	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2786	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2787	 (TG3_GPIO_MSG_NEED_VAUX << 12))
2788
2789static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2790{
2791	u32 status, shift;
2792
2793	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2794	    tg3_asic_rev(tp) == ASIC_REV_5719)
2795		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2796	else
2797		status = tr32(TG3_CPMU_DRV_STATUS);
2798
2799	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2800	status &= ~(TG3_GPIO_MSG_MASK << shift);
2801	status |= (newstat << shift);
2802
2803	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2804	    tg3_asic_rev(tp) == ASIC_REV_5719)
2805		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2806	else
2807		tw32(TG3_CPMU_DRV_STATUS, status);
2808
2809	return status >> TG3_APE_GPIO_MSG_SHIFT;
2810}
2811
2812static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2813{
2814	if (!tg3_flag(tp, IS_NIC))
2815		return 0;
2816
2817	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2818	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2819	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2820		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2821			return -EIO;
2822
2823		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2824
2825		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2826			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2827
2828		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2829	} else {
2830		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2831			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2832	}
2833
2834	return 0;
2835}
2836
2837static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2838{
2839	u32 grc_local_ctrl;
2840
2841	if (!tg3_flag(tp, IS_NIC) ||
2842	    tg3_asic_rev(tp) == ASIC_REV_5700 ||
2843	    tg3_asic_rev(tp) == ASIC_REV_5701)
2844		return;
2845
2846	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2847
2848	tw32_wait_f(GRC_LOCAL_CTRL,
2849		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2850		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852	tw32_wait_f(GRC_LOCAL_CTRL,
2853		    grc_local_ctrl,
2854		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2855
2856	tw32_wait_f(GRC_LOCAL_CTRL,
2857		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2858		    TG3_GRC_LCLCTL_PWRSW_DELAY);
2859}
2860
2861static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2862{
2863	if (!tg3_flag(tp, IS_NIC))
2864		return;
2865
2866	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2867	    tg3_asic_rev(tp) == ASIC_REV_5701) {
2868		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2869			    (GRC_LCLCTRL_GPIO_OE0 |
2870			     GRC_LCLCTRL_GPIO_OE1 |
2871			     GRC_LCLCTRL_GPIO_OE2 |
2872			     GRC_LCLCTRL_GPIO_OUTPUT0 |
2873			     GRC_LCLCTRL_GPIO_OUTPUT1),
2874			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2875	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2876		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2877		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2878		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2879				     GRC_LCLCTRL_GPIO_OE1 |
2880				     GRC_LCLCTRL_GPIO_OE2 |
2881				     GRC_LCLCTRL_GPIO_OUTPUT0 |
2882				     GRC_LCLCTRL_GPIO_OUTPUT1 |
2883				     tp->grc_local_ctrl;
2884		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2885			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2886
2887		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2888		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2889			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2890
2891		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2892		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2893			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2894	} else {
2895		u32 no_gpio2;
2896		u32 grc_local_ctrl = 0;
2897
2898		/* Workaround to prevent overdrawing Amps. */
2899		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2900			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2901			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2902				    grc_local_ctrl,
2903				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2904		}
2905
2906		/* On 5753 and variants, GPIO2 cannot be used. */
2907		no_gpio2 = tp->nic_sram_data_cfg &
2908			   NIC_SRAM_DATA_CFG_NO_GPIO2;
2909
2910		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2911				  GRC_LCLCTRL_GPIO_OE1 |
2912				  GRC_LCLCTRL_GPIO_OE2 |
2913				  GRC_LCLCTRL_GPIO_OUTPUT1 |
2914				  GRC_LCLCTRL_GPIO_OUTPUT2;
2915		if (no_gpio2) {
2916			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2917					    GRC_LCLCTRL_GPIO_OUTPUT2);
2918		}
2919		tw32_wait_f(GRC_LOCAL_CTRL,
2920			    tp->grc_local_ctrl | grc_local_ctrl,
2921			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2922
2923		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2924
2925		tw32_wait_f(GRC_LOCAL_CTRL,
2926			    tp->grc_local_ctrl | grc_local_ctrl,
2927			    TG3_GRC_LCLCTL_PWRSW_DELAY);
2928
2929		if (!no_gpio2) {
2930			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2931			tw32_wait_f(GRC_LOCAL_CTRL,
2932				    tp->grc_local_ctrl | grc_local_ctrl,
2933				    TG3_GRC_LCLCTL_PWRSW_DELAY);
2934		}
2935	}
2936}
2937
2938static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2939{
2940	u32 msg = 0;
2941
2942	/* Serialize power state transitions */
2943	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2944		return;
2945
2946	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2947		msg = TG3_GPIO_MSG_NEED_VAUX;
2948
2949	msg = tg3_set_function_status(tp, msg);
2950
2951	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2952		goto done;
2953
2954	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2955		tg3_pwrsrc_switch_to_vaux(tp);
2956	else
2957		tg3_pwrsrc_die_with_vmain(tp);
2958
2959done:
2960	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2961}
2962
2963static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2964{
2965	bool need_vaux = false;
2966
2967	/* The GPIOs do something completely different on 57765. */
2968	if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2969		return;
2970
2971	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2972	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
2973	    tg3_asic_rev(tp) == ASIC_REV_5720) {
2974		tg3_frob_aux_power_5717(tp, include_wol ?
2975					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2976		return;
2977	}
2978
2979	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2980		struct net_device *dev_peer;
2981
2982		dev_peer = pci_get_drvdata(tp->pdev_peer);
2983
2984		/* remove_one() may have been run on the peer. */
2985		if (dev_peer) {
2986			struct tg3 *tp_peer = netdev_priv(dev_peer);
2987
2988			if (tg3_flag(tp_peer, INIT_COMPLETE))
2989				return;
2990
2991			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2992			    tg3_flag(tp_peer, ENABLE_ASF))
2993				need_vaux = true;
2994		}
2995	}
2996
2997	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2998	    tg3_flag(tp, ENABLE_ASF))
2999		need_vaux = true;
3000
3001	if (need_vaux)
3002		tg3_pwrsrc_switch_to_vaux(tp);
3003	else
3004		tg3_pwrsrc_die_with_vmain(tp);
3005}
3006
3007static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
3008{
3009	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
3010		return 1;
3011	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
3012		if (speed != SPEED_10)
3013			return 1;
3014	} else if (speed == SPEED_10)
3015		return 1;
3016
3017	return 0;
3018}
3019
3020static bool tg3_phy_power_bug(struct tg3 *tp)
3021{
3022	switch (tg3_asic_rev(tp)) {
3023	case ASIC_REV_5700:
3024	case ASIC_REV_5704:
3025		return true;
3026	case ASIC_REV_5780:
3027		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3028			return true;
3029		return false;
3030	case ASIC_REV_5717:
3031		if (!tp->pci_fn)
3032			return true;
3033		return false;
3034	case ASIC_REV_5719:
3035	case ASIC_REV_5720:
3036		if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3037		    !tp->pci_fn)
3038			return true;
3039		return false;
3040	}
3041
3042	return false;
3043}
3044
3045static bool tg3_phy_led_bug(struct tg3 *tp)
3046{
3047	switch (tg3_asic_rev(tp)) {
3048	case ASIC_REV_5719:
3049	case ASIC_REV_5720:
3050		if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3051		    !tp->pci_fn)
3052			return true;
3053		return false;
3054	}
3055
3056	return false;
3057}
3058
3059static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3060{
3061	u32 val;
3062
3063	if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3064		return;
3065
3066	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3067		if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3068			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3069			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3070
3071			sg_dig_ctrl |=
3072				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3073			tw32(SG_DIG_CTRL, sg_dig_ctrl);
3074			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3075		}
3076		return;
3077	}
3078
3079	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3080		tg3_bmcr_reset(tp);
3081		val = tr32(GRC_MISC_CFG);
3082		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3083		udelay(40);
3084		return;
3085	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3086		u32 phytest;
3087		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3088			u32 phy;
3089
3090			tg3_writephy(tp, MII_ADVERTISE, 0);
3091			tg3_writephy(tp, MII_BMCR,
3092				     BMCR_ANENABLE | BMCR_ANRESTART);
3093
3094			tg3_writephy(tp, MII_TG3_FET_TEST,
3095				     phytest | MII_TG3_FET_SHADOW_EN);
3096			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3097				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3098				tg3_writephy(tp,
3099					     MII_TG3_FET_SHDW_AUXMODE4,
3100					     phy);
3101			}
3102			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3103		}
3104		return;
3105	} else if (do_low_power) {
3106		if (!tg3_phy_led_bug(tp))
3107			tg3_writephy(tp, MII_TG3_EXT_CTRL,
3108				     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3109
3110		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3111		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3112		      MII_TG3_AUXCTL_PCTL_VREG_11V;
3113		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3114	}
3115
3116	/* The PHY should not be powered down on some chips because
3117	 * of bugs.
3118	 */
3119	if (tg3_phy_power_bug(tp))
3120		return;
3121
3122	if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3123	    tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3124		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3125		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3126		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3127		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3128	}
3129
3130	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3131}
3132
3133/* tp->lock is held. */
3134static int tg3_nvram_lock(struct tg3 *tp)
3135{
3136	if (tg3_flag(tp, NVRAM)) {
3137		int i;
3138
3139		if (tp->nvram_lock_cnt == 0) {
3140			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3141			for (i = 0; i < 8000; i++) {
3142				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3143					break;
3144				udelay(20);
3145			}
3146			if (i == 8000) {
3147				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3148				return -ENODEV;
3149			}
3150		}
3151		tp->nvram_lock_cnt++;
3152	}
3153	return 0;
3154}
3155
3156/* tp->lock is held. */
3157static void tg3_nvram_unlock(struct tg3 *tp)
3158{
3159	if (tg3_flag(tp, NVRAM)) {
3160		if (tp->nvram_lock_cnt > 0)
3161			tp->nvram_lock_cnt--;
3162		if (tp->nvram_lock_cnt == 0)
3163			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3164	}
3165}
3166
3167/* tp->lock is held. */
3168static void tg3_enable_nvram_access(struct tg3 *tp)
3169{
3170	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3171		u32 nvaccess = tr32(NVRAM_ACCESS);
3172
3173		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3174	}
3175}
3176
3177/* tp->lock is held. */
3178static void tg3_disable_nvram_access(struct tg3 *tp)
3179{
3180	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3181		u32 nvaccess = tr32(NVRAM_ACCESS);
3182
3183		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3184	}
3185}
3186
3187static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3188					u32 offset, u32 *val)
3189{
3190	u32 tmp;
3191	int i;
3192
3193	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3194		return -EINVAL;
3195
3196	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3197					EEPROM_ADDR_DEVID_MASK |
3198					EEPROM_ADDR_READ);
3199	tw32(GRC_EEPROM_ADDR,
3200	     tmp |
3201	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
3202	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3203	      EEPROM_ADDR_ADDR_MASK) |
3204	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
3205
3206	for (i = 0; i < 1000; i++) {
3207		tmp = tr32(GRC_EEPROM_ADDR);
3208
3209		if (tmp & EEPROM_ADDR_COMPLETE)
3210			break;
3211		msleep(1);
3212	}
3213	if (!(tmp & EEPROM_ADDR_COMPLETE))
3214		return -EBUSY;
3215
3216	tmp = tr32(GRC_EEPROM_DATA);
3217
3218	/*
3219	 * The data will always be opposite the native endian
3220	 * format.  Perform a blind byteswap to compensate.
3221	 */
3222	*val = swab32(tmp);
3223
3224	return 0;
3225}
3226
3227#define NVRAM_CMD_TIMEOUT 5000
3228
3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230{
3231	int i;
3232
3233	tw32(NVRAM_CMD, nvram_cmd);
3234	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3235		usleep_range(10, 40);
3236		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237			udelay(10);
3238			break;
3239		}
3240	}
3241
3242	if (i == NVRAM_CMD_TIMEOUT)
3243		return -EBUSY;
3244
3245	return 0;
3246}
3247
3248static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3249{
3250	if (tg3_flag(tp, NVRAM) &&
3251	    tg3_flag(tp, NVRAM_BUFFERED) &&
3252	    tg3_flag(tp, FLASH) &&
3253	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3254	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3255
3256		addr = ((addr / tp->nvram_pagesize) <<
3257			ATMEL_AT45DB0X1B_PAGE_POS) +
3258		       (addr % tp->nvram_pagesize);
3259
3260	return addr;
3261}
3262
3263static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3264{
3265	if (tg3_flag(tp, NVRAM) &&
3266	    tg3_flag(tp, NVRAM_BUFFERED) &&
3267	    tg3_flag(tp, FLASH) &&
3268	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3269	    (tp->nvram_jedecnum == JEDEC_ATMEL))
3270
3271		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3272			tp->nvram_pagesize) +
3273		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3274
3275	return addr;
3276}
3277
3278/* NOTE: Data read in from NVRAM is byteswapped according to
3279 * the byteswapping settings for all other register accesses.
3280 * tg3 devices are BE devices, so on a BE machine, the data
3281 * returned will be exactly as it is seen in NVRAM.  On a LE
3282 * machine, the 32-bit value will be byteswapped.
3283 */
3284static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3285{
3286	int ret;
3287
3288	if (!tg3_flag(tp, NVRAM))
3289		return tg3_nvram_read_using_eeprom(tp, offset, val);
3290
3291	offset = tg3_nvram_phys_addr(tp, offset);
3292
3293	if (offset > NVRAM_ADDR_MSK)
3294		return -EINVAL;
3295
3296	ret = tg3_nvram_lock(tp);
3297	if (ret)
3298		return ret;
3299
3300	tg3_enable_nvram_access(tp);
3301
3302	tw32(NVRAM_ADDR, offset);
3303	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3304		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3305
3306	if (ret == 0)
3307		*val = tr32(NVRAM_RDDATA);
3308
3309	tg3_disable_nvram_access(tp);
3310
3311	tg3_nvram_unlock(tp);
3312
3313	return ret;
3314}
3315
3316/* Ensures NVRAM data is in bytestream format. */
3317static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3318{
3319	u32 v;
3320	int res = tg3_nvram_read(tp, offset, &v);
3321	if (!res)
3322		*val = cpu_to_be32(v);
3323	return res;
3324}
3325
3326static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3327				    u32 offset, u32 len, u8 *buf)
3328{
3329	int i, j, rc = 0;
3330	u32 val;
3331
3332	for (i = 0; i < len; i += 4) {
3333		u32 addr;
3334		__be32 data;
3335
3336		addr = offset + i;
3337
3338		memcpy(&data, buf + i, 4);
3339
3340		/*
3341		 * The SEEPROM interface expects the data to always be opposite
3342		 * the native endian format.  We accomplish this by reversing
3343		 * all the operations that would have been performed on the
3344		 * data from a call to tg3_nvram_read_be32().
3345		 */
3346		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3347
3348		val = tr32(GRC_EEPROM_ADDR);
3349		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3350
3351		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3352			EEPROM_ADDR_READ);
3353		tw32(GRC_EEPROM_ADDR, val |
3354			(0 << EEPROM_ADDR_DEVID_SHIFT) |
3355			(addr & EEPROM_ADDR_ADDR_MASK) |
3356			EEPROM_ADDR_START |
3357			EEPROM_ADDR_WRITE);
3358
3359		for (j = 0; j < 1000; j++) {
3360			val = tr32(GRC_EEPROM_ADDR);
3361
3362			if (val & EEPROM_ADDR_COMPLETE)
3363				break;
3364			msleep(1);
3365		}
3366		if (!(val & EEPROM_ADDR_COMPLETE)) {
3367			rc = -EBUSY;
3368			break;
3369		}
3370	}
3371
3372	return rc;
3373}
3374
3375/* offset and length are dword aligned */
3376static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3377		u8 *buf)
3378{
3379	int ret = 0;
3380	u32 pagesize = tp->nvram_pagesize;
3381	u32 pagemask = pagesize - 1;
3382	u32 nvram_cmd;
3383	u8 *tmp;
3384
3385	tmp = kmalloc(pagesize, GFP_KERNEL);
3386	if (tmp == NULL)
3387		return -ENOMEM;
3388
3389	while (len) {
3390		int j;
3391		u32 phy_addr, page_off, size;
3392
3393		phy_addr = offset & ~pagemask;
3394
3395		for (j = 0; j < pagesize; j += 4) {
3396			ret = tg3_nvram_read_be32(tp, phy_addr + j,
3397						  (__be32 *) (tmp + j));
3398			if (ret)
3399				break;
3400		}
3401		if (ret)
3402			break;
3403
3404		page_off = offset & pagemask;
3405		size = pagesize;
3406		if (len < size)
3407			size = len;
3408
3409		len -= size;
3410
3411		memcpy(tmp + page_off, buf, size);
3412
3413		offset = offset + (pagesize - page_off);
3414
3415		tg3_enable_nvram_access(tp);
3416
3417		/*
3418		 * Before we can erase the flash page, we need
3419		 * to issue a special "write enable" command.
3420		 */
3421		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3422
3423		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3424			break;
3425
3426		/* Erase the target page */
3427		tw32(NVRAM_ADDR, phy_addr);
3428
3429		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3430			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3431
3432		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3433			break;
3434
3435		/* Issue another write enable to start the write. */
3436		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3437
3438		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3439			break;
3440
3441		for (j = 0; j < pagesize; j += 4) {
3442			__be32 data;
3443
3444			data = *((__be32 *) (tmp + j));
3445
3446			tw32(NVRAM_WRDATA, be32_to_cpu(data));
3447
3448			tw32(NVRAM_ADDR, phy_addr + j);
3449
3450			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3451				NVRAM_CMD_WR;
3452
3453			if (j == 0)
3454				nvram_cmd |= NVRAM_CMD_FIRST;
3455			else if (j == (pagesize - 4))
3456				nvram_cmd |= NVRAM_CMD_LAST;
3457
3458			ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3459			if (ret)
3460				break;
3461		}
3462		if (ret)
3463			break;
3464	}
3465
3466	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3467	tg3_nvram_exec_cmd(tp, nvram_cmd);
3468
3469	kfree(tmp);
3470
3471	return ret;
3472}
3473
3474/* offset and length are dword aligned */
3475static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3476		u8 *buf)
3477{
3478	int i, ret = 0;
3479
3480	for (i = 0; i < len; i += 4, offset += 4) {
3481		u32 page_off, phy_addr, nvram_cmd;
3482		__be32 data;
3483
3484		memcpy(&data, buf + i, 4);
3485		tw32(NVRAM_WRDATA, be32_to_cpu(data));
3486
3487		page_off = offset % tp->nvram_pagesize;
3488
3489		phy_addr = tg3_nvram_phys_addr(tp, offset);
3490
3491		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3492
3493		if (page_off == 0 || i == 0)
3494			nvram_cmd |= NVRAM_CMD_FIRST;
3495		if (page_off == (tp->nvram_pagesize - 4))
3496			nvram_cmd |= NVRAM_CMD_LAST;
3497
3498		if (i == (len - 4))
3499			nvram_cmd |= NVRAM_CMD_LAST;
3500
3501		if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3502		    !tg3_flag(tp, FLASH) ||
3503		    !tg3_flag(tp, 57765_PLUS))
3504			tw32(NVRAM_ADDR, phy_addr);
3505
3506		if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3507		    !tg3_flag(tp, 5755_PLUS) &&
3508		    (tp->nvram_jedecnum == JEDEC_ST) &&
3509		    (nvram_cmd & NVRAM_CMD_FIRST)) {
3510			u32 cmd;
3511
3512			cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3513			ret = tg3_nvram_exec_cmd(tp, cmd);
3514			if (ret)
3515				break;
3516		}
3517		if (!tg3_flag(tp, FLASH)) {
3518			/* We always do complete word writes to eeprom. */
3519			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3520		}
3521
3522		ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3523		if (ret)
3524			break;
3525	}
3526	return ret;
3527}
3528
3529/* offset and length are dword aligned */
3530static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3531{
3532	int ret;
3533
3534	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3535		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3536		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
3537		udelay(40);
3538	}
3539
3540	if (!tg3_flag(tp, NVRAM)) {
3541		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3542	} else {
3543		u32 grc_mode;
3544
3545		ret = tg3_nvram_lock(tp);
3546		if (ret)
3547			return ret;
3548
3549		tg3_enable_nvram_access(tp);
3550		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3551			tw32(NVRAM_WRITE1, 0x406);
3552
3553		grc_mode = tr32(GRC_MODE);
3554		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3555
3556		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3557			ret = tg3_nvram_write_block_buffered(tp, offset, len,
3558				buf);
3559		} else {
3560			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3561				buf);
3562		}
3563
3564		grc_mode = tr32(GRC_MODE);
3565		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3566
3567		tg3_disable_nvram_access(tp);
3568		tg3_nvram_unlock(tp);
3569	}
3570
3571	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3572		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3573		udelay(40);
3574	}
3575
3576	return ret;
3577}
3578
3579#define RX_CPU_SCRATCH_BASE	0x30000
3580#define RX_CPU_SCRATCH_SIZE	0x04000
3581#define TX_CPU_SCRATCH_BASE	0x34000
3582#define TX_CPU_SCRATCH_SIZE	0x04000
3583
3584/* tp->lock is held. */
3585static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3586{
3587	int i;
3588	const int iters = 10000;
3589
3590	for (i = 0; i < iters; i++) {
3591		tw32(cpu_base + CPU_STATE, 0xffffffff);
3592		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3593		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3594			break;
3595		if (pci_channel_offline(tp->pdev))
3596			return -EBUSY;
3597	}
3598
3599	return (i == iters) ? -EBUSY : 0;
3600}
3601
3602/* tp->lock is held. */
3603static int tg3_rxcpu_pause(struct tg3 *tp)
3604{
3605	int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3606
3607	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3608	tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3609	udelay(10);
3610
3611	return rc;
3612}
3613
3614/* tp->lock is held. */
3615static int tg3_txcpu_pause(struct tg3 *tp)
3616{
3617	return tg3_pause_cpu(tp, TX_CPU_BASE);
3618}
3619
3620/* tp->lock is held. */
3621static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3622{
3623	tw32(cpu_base + CPU_STATE, 0xffffffff);
3624	tw32_f(cpu_base + CPU_MODE,  0x00000000);
3625}
3626
3627/* tp->lock is held. */
3628static void tg3_rxcpu_resume(struct tg3 *tp)
3629{
3630	tg3_resume_cpu(tp, RX_CPU_BASE);
3631}
3632
3633/* tp->lock is held. */
3634static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3635{
3636	int rc;
3637
3638	BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3639
3640	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3641		u32 val = tr32(GRC_VCPU_EXT_CTRL);
3642
3643		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3644		return 0;
3645	}
3646	if (cpu_base == RX_CPU_BASE) {
3647		rc = tg3_rxcpu_pause(tp);
3648	} else {
3649		/*
3650		 * There is only an Rx CPU for the 5750 derivative in the
3651		 * BCM4785.
3652		 */
3653		if (tg3_flag(tp, IS_SSB_CORE))
3654			return 0;
3655
3656		rc = tg3_txcpu_pause(tp);
3657	}
3658
3659	if (rc) {
3660		netdev_err(tp->dev, "%s timed out, %s CPU\n",
3661			   __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3662		return -ENODEV;
3663	}
3664
3665	/* Clear firmware's nvram arbitration. */
3666	if (tg3_flag(tp, NVRAM))
3667		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3668	return 0;
3669}
3670
3671static int tg3_fw_data_len(struct tg3 *tp,
3672			   const struct tg3_firmware_hdr *fw_hdr)
3673{
3674	int fw_len;
3675
3676	/* Non fragmented firmware have one firmware header followed by a
3677	 * contiguous chunk of data to be written. The length field in that
3678	 * header is not the length of data to be written but the complete
3679	 * length of the bss. The data length is determined based on
3680	 * tp->fw->size minus headers.
3681	 *
3682	 * Fragmented firmware have a main header followed by multiple
3683	 * fragments. Each fragment is identical to non fragmented firmware
3684	 * with a firmware header followed by a contiguous chunk of data. In
3685	 * the main header, the length field is unused and set to 0xffffffff.
3686	 * In each fragment header the length is the entire size of that
3687	 * fragment i.e. fragment data + header length. Data length is
3688	 * therefore length field in the header minus TG3_FW_HDR_LEN.
3689	 */
3690	if (tp->fw_len == 0xffffffff)
3691		fw_len = be32_to_cpu(fw_hdr->len);
3692	else
3693		fw_len = tp->fw->size;
3694
3695	return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3696}
3697
3698/* tp->lock is held. */
3699static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3700				 u32 cpu_scratch_base, int cpu_scratch_size,
3701				 const struct tg3_firmware_hdr *fw_hdr)
3702{
3703	int err, i;
3704	void (*write_op)(struct tg3 *, u32, u32);
3705	int total_len = tp->fw->size;
3706
3707	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3708		netdev_err(tp->dev,
3709			   "%s: Trying to load TX cpu firmware which is 5705\n",
3710			   __func__);
3711		return -EINVAL;
3712	}
3713
3714	if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3715		write_op = tg3_write_mem;
3716	else
3717		write_op = tg3_write_indirect_reg32;
3718
3719	if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3720		/* It is possible that bootcode is still loading at this point.
3721		 * Get the nvram lock first before halting the cpu.
3722		 */
3723		int lock_err = tg3_nvram_lock(tp);
3724		err = tg3_halt_cpu(tp, cpu_base);
3725		if (!lock_err)
3726			tg3_nvram_unlock(tp);
3727		if (err)
3728			goto out;
3729
3730		for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3731			write_op(tp, cpu_scratch_base + i, 0);
3732		tw32(cpu_base + CPU_STATE, 0xffffffff);
3733		tw32(cpu_base + CPU_MODE,
3734		     tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3735	} else {
3736		/* Subtract additional main header for fragmented firmware and
3737		 * advance to the first fragment
3738		 */
3739		total_len -= TG3_FW_HDR_LEN;
3740		fw_hdr++;
3741	}
3742
3743	do {
3744		u32 *fw_data = (u32 *)(fw_hdr + 1);
3745		for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3746			write_op(tp, cpu_scratch_base +
3747				     (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3748				     (i * sizeof(u32)),
3749				 be32_to_cpu(fw_data[i]));
3750
3751		total_len -= be32_to_cpu(fw_hdr->len);
3752
3753		/* Advance to next fragment */
3754		fw_hdr = (struct tg3_firmware_hdr *)
3755			 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3756	} while (total_len > 0);
3757
3758	err = 0;
3759
3760out:
3761	return err;
3762}
3763
3764/* tp->lock is held. */
3765static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3766{
3767	int i;
3768	const int iters = 5;
3769
3770	tw32(cpu_base + CPU_STATE, 0xffffffff);
3771	tw32_f(cpu_base + CPU_PC, pc);
3772
3773	for (i = 0; i < iters; i++) {
3774		if (tr32(cpu_base + CPU_PC) == pc)
3775			break;
3776		tw32(cpu_base + CPU_STATE, 0xffffffff);
3777		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3778		tw32_f(cpu_base + CPU_PC, pc);
3779		udelay(1000);
3780	}
3781
3782	return (i == iters) ? -EBUSY : 0;
3783}
3784
3785/* tp->lock is held. */
3786static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3787{
3788	const struct tg3_firmware_hdr *fw_hdr;
3789	int err;
3790
3791	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3792
3793	/* Firmware blob starts with version numbers, followed by
3794	   start address and length. We are setting complete length.
3795	   length = end_address_of_bss - start_address_of_text.
3796	   Remainder is the blob to be loaded contiguously
3797	   from start address. */
3798
3799	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3800				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3801				    fw_hdr);
3802	if (err)
3803		return err;
3804
3805	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3806				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3807				    fw_hdr);
3808	if (err)
3809		return err;
3810
3811	/* Now startup only the RX cpu. */
3812	err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3813				       be32_to_cpu(fw_hdr->base_addr));
3814	if (err) {
3815		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3816			   "should be %08x\n", __func__,
3817			   tr32(RX_CPU_BASE + CPU_PC),
3818				be32_to_cpu(fw_hdr->base_addr));
3819		return -ENODEV;
3820	}
3821
3822	tg3_rxcpu_resume(tp);
3823
3824	return 0;
3825}
3826
3827static int tg3_validate_rxcpu_state(struct tg3 *tp)
3828{
3829	const int iters = 1000;
3830	int i;
3831	u32 val;
3832
3833	/* Wait for boot code to complete initialization and enter service
3834	 * loop. It is then safe to download service patches
3835	 */
3836	for (i = 0; i < iters; i++) {
3837		if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3838			break;
3839
3840		udelay(10);
3841	}
3842
3843	if (i == iters) {
3844		netdev_err(tp->dev, "Boot code not ready for service patches\n");
3845		return -EBUSY;
3846	}
3847
3848	val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3849	if (val & 0xff) {
3850		netdev_warn(tp->dev,
3851			    "Other patches exist. Not downloading EEE patch\n");
3852		return -EEXIST;
3853	}
3854
3855	return 0;
3856}
3857
3858/* tp->lock is held. */
3859static void tg3_load_57766_firmware(struct tg3 *tp)
3860{
3861	struct tg3_firmware_hdr *fw_hdr;
3862
3863	if (!tg3_flag(tp, NO_NVRAM))
3864		return;
3865
3866	if (tg3_validate_rxcpu_state(tp))
3867		return;
3868
3869	if (!tp->fw)
3870		return;
3871
3872	/* This firmware blob has a different format than older firmware
3873	 * releases as given below. The main difference is we have fragmented
3874	 * data to be written to non-contiguous locations.
3875	 *
3876	 * In the beginning we have a firmware header identical to other
3877	 * firmware which consists of version, base addr and length. The length
3878	 * here is unused and set to 0xffffffff.
3879	 *
3880	 * This is followed by a series of firmware fragments which are
3881	 * individually identical to previous firmware. i.e. they have the
3882	 * firmware header and followed by data for that fragment. The version
3883	 * field of the individual fragment header is unused.
3884	 */
3885
3886	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3887	if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3888		return;
3889
3890	if (tg3_rxcpu_pause(tp))
3891		return;
3892
3893	/* tg3_load_firmware_cpu() will always succeed for the 57766 */
3894	tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3895
3896	tg3_rxcpu_resume(tp);
3897}
3898
3899/* tp->lock is held. */
3900static int tg3_load_tso_firmware(struct tg3 *tp)
3901{
3902	const struct tg3_firmware_hdr *fw_hdr;
3903	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3904	int err;
3905
3906	if (!tg3_flag(tp, FW_TSO))
3907		return 0;
3908
3909	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3910
3911	/* Firmware blob starts with version numbers, followed by
3912	   start address and length. We are setting complete length.
3913	   length = end_address_of_bss - start_address_of_text.
3914	   Remainder is the blob to be loaded contiguously
3915	   from start address. */
3916
3917	cpu_scratch_size = tp->fw_len;
3918
3919	if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3920		cpu_base = RX_CPU_BASE;
3921		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3922	} else {
3923		cpu_base = TX_CPU_BASE;
3924		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3925		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3926	}
3927
3928	err = tg3_load_firmware_cpu(tp, cpu_base,
3929				    cpu_scratch_base, cpu_scratch_size,
3930				    fw_hdr);
3931	if (err)
3932		return err;
3933
3934	/* Now startup the cpu. */
3935	err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3936				       be32_to_cpu(fw_hdr->base_addr));
3937	if (err) {
3938		netdev_err(tp->dev,
3939			   "%s fails to set CPU PC, is %08x should be %08x\n",
3940			   __func__, tr32(cpu_base + CPU_PC),
3941			   be32_to_cpu(fw_hdr->base_addr));
3942		return -ENODEV;
3943	}
3944
3945	tg3_resume_cpu(tp, cpu_base);
3946	return 0;
3947}
3948
3949/* tp->lock is held. */
3950static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
3951{
3952	u32 addr_high, addr_low;
3953
3954	addr_high = ((mac_addr[0] << 8) | mac_addr[1]);
3955	addr_low = ((mac_addr[2] << 24) | (mac_addr[3] << 16) |
3956		    (mac_addr[4] <<  8) | mac_addr[5]);
3957
3958	if (index < 4) {
3959		tw32(MAC_ADDR_0_HIGH + (index * 8), addr_high);
3960		tw32(MAC_ADDR_0_LOW + (index * 8), addr_low);
3961	} else {
3962		index -= 4;
3963		tw32(MAC_EXTADDR_0_HIGH + (index * 8), addr_high);
3964		tw32(MAC_EXTADDR_0_LOW + (index * 8), addr_low);
3965	}
3966}
3967
3968/* tp->lock is held. */
3969static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3970{
3971	u32 addr_high;
3972	int i;
3973
3974	for (i = 0; i < 4; i++) {
3975		if (i == 1 && skip_mac_1)
3976			continue;
3977		__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3978	}
3979
3980	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3981	    tg3_asic_rev(tp) == ASIC_REV_5704) {
3982		for (i = 4; i < 16; i++)
3983			__tg3_set_one_mac_addr(tp, tp->dev->dev_addr, i);
3984	}
3985
3986	addr_high = (tp->dev->dev_addr[0] +
3987		     tp->dev->dev_addr[1] +
3988		     tp->dev->dev_addr[2] +
3989		     tp->dev->dev_addr[3] +
3990		     tp->dev->dev_addr[4] +
3991		     tp->dev->dev_addr[5]) &
3992		TX_BACKOFF_SEED_MASK;
3993	tw32(MAC_TX_BACKOFF_SEED, addr_high);
3994}
3995
3996static void tg3_enable_register_access(struct tg3 *tp)
3997{
3998	/*
3999	 * Make sure register accesses (indirect or otherwise) will function
4000	 * correctly.
4001	 */
4002	pci_write_config_dword(tp->pdev,
4003			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
4004}
4005
4006static int tg3_power_up(struct tg3 *tp)
4007{
4008	int err;
4009
4010	tg3_enable_register_access(tp);
4011
4012	err = pci_set_power_state(tp->pdev, PCI_D0);
4013	if (!err) {
4014		/* Switch out of Vaux if it is a NIC */
4015		tg3_pwrsrc_switch_to_vmain(tp);
4016	} else {
4017		netdev_err(tp->dev, "Transition to D0 failed\n");
4018	}
4019
4020	return err;
4021}
4022
4023static int tg3_setup_phy(struct tg3 *, bool);
4024
4025static int tg3_power_down_prepare(struct tg3 *tp)
4026{
4027	u32 misc_host_ctrl;
4028	bool device_should_wake, do_low_power;
4029
4030	tg3_enable_register_access(tp);
4031
4032	/* Restore the CLKREQ setting. */
4033	if (tg3_flag(tp, CLKREQ_BUG))
4034		pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4035					 PCI_EXP_LNKCTL_CLKREQ_EN);
4036
4037	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
4038	tw32(TG3PCI_MISC_HOST_CTRL,
4039	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
4040
4041	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
4042			     tg3_flag(tp, WOL_ENABLE);
4043
4044	if (tg3_flag(tp, USE_PHYLIB)) {
4045		do_low_power = false;
4046		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
4047		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4048			struct phy_device *phydev;
4049			u32 phyid, advertising;
4050
4051			phydev = tp->mdio_bus->phy_map[tp->phy_addr];
4052
4053			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4054
4055			tp->link_config.speed = phydev->speed;
4056			tp->link_config.duplex = phydev->duplex;
4057			tp->link_config.autoneg = phydev->autoneg;
4058			tp->link_config.advertising = phydev->advertising;
4059
4060			advertising = ADVERTISED_TP |
4061				      ADVERTISED_Pause |
4062				      ADVERTISED_Autoneg |
4063				      ADVERTISED_10baseT_Half;
4064
4065			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4066				if (tg3_flag(tp, WOL_SPEED_100MB))
4067					advertising |=
4068						ADVERTISED_100baseT_Half |
4069						ADVERTISED_100baseT_Full |
4070						ADVERTISED_10baseT_Full;
4071				else
4072					advertising |= ADVERTISED_10baseT_Full;
4073			}
4074
4075			phydev->advertising = advertising;
4076
4077			phy_start_aneg(phydev);
4078
4079			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4080			if (phyid != PHY_ID_BCMAC131) {
4081				phyid &= PHY_BCM_OUI_MASK;
4082				if (phyid == PHY_BCM_OUI_1 ||
4083				    phyid == PHY_BCM_OUI_2 ||
4084				    phyid == PHY_BCM_OUI_3)
4085					do_low_power = true;
4086			}
4087		}
4088	} else {
4089		do_low_power = true;
4090
4091		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4092			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4093
4094		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4095			tg3_setup_phy(tp, false);
4096	}
4097
4098	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4099		u32 val;
4100
4101		val = tr32(GRC_VCPU_EXT_CTRL);
4102		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4103	} else if (!tg3_flag(tp, ENABLE_ASF)) {
4104		int i;
4105		u32 val;
4106
4107		for (i = 0; i < 200; i++) {
4108			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4109			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4110				break;
4111			msleep(1);
4112		}
4113	}
4114	if (tg3_flag(tp, WOL_CAP))
4115		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4116						     WOL_DRV_STATE_SHUTDOWN |
4117						     WOL_DRV_WOL |
4118						     WOL_SET_MAGIC_PKT);
4119
4120	if (device_should_wake) {
4121		u32 mac_mode;
4122
4123		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4124			if (do_low_power &&
4125			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4126				tg3_phy_auxctl_write(tp,
4127					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4128					       MII_TG3_AUXCTL_PCTL_WOL_EN |
4129					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4130					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4131				udelay(40);
4132			}
4133
4134			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4135				mac_mode = MAC_MODE_PORT_MODE_GMII;
4136			else if (tp->phy_flags &
4137				 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4138				if (tp->link_config.active_speed == SPEED_1000)
4139					mac_mode = MAC_MODE_PORT_MODE_GMII;
4140				else
4141					mac_mode = MAC_MODE_PORT_MODE_MII;
4142			} else
4143				mac_mode = MAC_MODE_PORT_MODE_MII;
4144
4145			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4146			if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4147				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4148					     SPEED_100 : SPEED_10;
4149				if (tg3_5700_link_polarity(tp, speed))
4150					mac_mode |= MAC_MODE_LINK_POLARITY;
4151				else
4152					mac_mode &= ~MAC_MODE_LINK_POLARITY;
4153			}
4154		} else {
4155			mac_mode = MAC_MODE_PORT_MODE_TBI;
4156		}
4157
4158		if (!tg3_flag(tp, 5750_PLUS))
4159			tw32(MAC_LED_CTRL, tp->led_ctrl);
4160
4161		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4162		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4163		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4164			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4165
4166		if (tg3_flag(tp, ENABLE_APE))
4167			mac_mode |= MAC_MODE_APE_TX_EN |
4168				    MAC_MODE_APE_RX_EN |
4169				    MAC_MODE_TDE_ENABLE;
4170
4171		tw32_f(MAC_MODE, mac_mode);
4172		udelay(100);
4173
4174		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4175		udelay(10);
4176	}
4177
4178	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4179	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4180	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
4181		u32 base_val;
4182
4183		base_val = tp->pci_clock_ctrl;
4184		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4185			     CLOCK_CTRL_TXCLK_DISABLE);
4186
4187		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4188			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
4189	} else if (tg3_flag(tp, 5780_CLASS) ||
4190		   tg3_flag(tp, CPMU_PRESENT) ||
4191		   tg3_asic_rev(tp) == ASIC_REV_5906) {
4192		/* do nothing */
4193	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4194		u32 newbits1, newbits2;
4195
4196		if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4197		    tg3_asic_rev(tp) == ASIC_REV_5701) {
4198			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4199				    CLOCK_CTRL_TXCLK_DISABLE |
4200				    CLOCK_CTRL_ALTCLK);
4201			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4202		} else if (tg3_flag(tp, 5705_PLUS)) {
4203			newbits1 = CLOCK_CTRL_625_CORE;
4204			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4205		} else {
4206			newbits1 = CLOCK_CTRL_ALTCLK;
4207			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4208		}
4209
4210		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4211			    40);
4212
4213		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4214			    40);
4215
4216		if (!tg3_flag(tp, 5705_PLUS)) {
4217			u32 newbits3;
4218
4219			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4220			    tg3_asic_rev(tp) == ASIC_REV_5701) {
4221				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4222					    CLOCK_CTRL_TXCLK_DISABLE |
4223					    CLOCK_CTRL_44MHZ_CORE);
4224			} else {
4225				newbits3 = CLOCK_CTRL_44MHZ_CORE;
4226			}
4227
4228			tw32_wait_f(TG3PCI_CLOCK_CTRL,
4229				    tp->pci_clock_ctrl | newbits3, 40);
4230		}
4231	}
4232
4233	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4234		tg3_power_down_phy(tp, do_low_power);
4235
4236	tg3_frob_aux_power(tp, true);
4237
4238	/* Workaround for unstable PLL clock */
4239	if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4240	    ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4241	     (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4242		u32 val = tr32(0x7d00);
4243
4244		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4245		tw32(0x7d00, val);
4246		if (!tg3_flag(tp, ENABLE_ASF)) {
4247			int err;
4248
4249			err = tg3_nvram_lock(tp);
4250			tg3_halt_cpu(tp, RX_CPU_BASE);
4251			if (!err)
4252				tg3_nvram_unlock(tp);
4253		}
4254	}
4255
4256	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4257
4258	tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
4259
4260	return 0;
4261}
4262
4263static void tg3_power_down(struct tg3 *tp)
4264{
4265	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4266	pci_set_power_state(tp->pdev, PCI_D3hot);
4267}
4268
4269static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4270{
4271	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4272	case MII_TG3_AUX_STAT_10HALF:
4273		*speed = SPEED_10;
4274		*duplex = DUPLEX_HALF;
4275		break;
4276
4277	case MII_TG3_AUX_STAT_10FULL:
4278		*speed = SPEED_10;
4279		*duplex = DUPLEX_FULL;
4280		break;
4281
4282	case MII_TG3_AUX_STAT_100HALF:
4283		*speed = SPEED_100;
4284		*duplex = DUPLEX_HALF;
4285		break;
4286
4287	case MII_TG3_AUX_STAT_100FULL:
4288		*speed = SPEED_100;
4289		*duplex = DUPLEX_FULL;
4290		break;
4291
4292	case MII_TG3_AUX_STAT_1000HALF:
4293		*speed = SPEED_1000;
4294		*duplex = DUPLEX_HALF;
4295		break;
4296
4297	case MII_TG3_AUX_STAT_1000FULL:
4298		*speed = SPEED_1000;
4299		*duplex = DUPLEX_FULL;
4300		break;
4301
4302	default:
4303		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4304			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4305				 SPEED_10;
4306			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4307				  DUPLEX_HALF;
4308			break;
4309		}
4310		*speed = SPEED_UNKNOWN;
4311		*duplex = DUPLEX_UNKNOWN;
4312		break;
4313	}
4314}
4315
4316static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4317{
4318	int err = 0;
4319	u32 val, new_adv;
4320
4321	new_adv = ADVERTISE_CSMA;
4322	new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4323	new_adv |= mii_advertise_flowctrl(flowctrl);
4324
4325	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4326	if (err)
4327		goto done;
4328
4329	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4330		new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4331
4332		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4333		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4334			new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4335
4336		err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4337		if (err)
4338			goto done;
4339	}
4340
4341	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4342		goto done;
4343
4344	tw32(TG3_CPMU_EEE_MODE,
4345	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4346
4347	err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4348	if (!err) {
4349		u32 err2;
4350
4351		val = 0;
4352		/* Advertise 100-BaseTX EEE ability */
4353		if (advertise & ADVERTISED_100baseT_Full)
4354			val |= MDIO_AN_EEE_ADV_100TX;
4355		/* Advertise 1000-BaseT EEE ability */
4356		if (advertise & ADVERTISED_1000baseT_Full)
4357			val |= MDIO_AN_EEE_ADV_1000T;
4358
4359		if (!tp->eee.eee_enabled) {
4360			val = 0;
4361			tp->eee.advertised = 0;
4362		} else {
4363			tp->eee.advertised = advertise &
4364					     (ADVERTISED_100baseT_Full |
4365					      ADVERTISED_1000baseT_Full);
4366		}
4367
4368		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4369		if (err)
4370			val = 0;
4371
4372		switch (tg3_asic_rev(tp)) {
4373		case ASIC_REV_5717:
4374		case ASIC_REV_57765:
4375		case ASIC_REV_57766:
4376		case ASIC_REV_5719:
4377			/* If we advertised any eee advertisements above... */
4378			if (val)
4379				val = MII_TG3_DSP_TAP26_ALNOKO |
4380				      MII_TG3_DSP_TAP26_RMRXSTO |
4381				      MII_TG3_DSP_TAP26_OPCSINPT;
4382			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4383			/* Fall through */
4384		case ASIC_REV_5720:
4385		case ASIC_REV_5762:
4386			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4387				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4388						 MII_TG3_DSP_CH34TP2_HIBW01);
4389		}
4390
4391		err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4392		if (!err)
4393			err = err2;
4394	}
4395
4396done:
4397	return err;
4398}
4399
4400static void tg3_phy_copper_begin(struct tg3 *tp)
4401{
4402	if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4403	    (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4404		u32 adv, fc;
4405
4406		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4407		    !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4408			adv = ADVERTISED_10baseT_Half |
4409			      ADVERTISED_10baseT_Full;
4410			if (tg3_flag(tp, WOL_SPEED_100MB))
4411				adv |= ADVERTISED_100baseT_Half |
4412				       ADVERTISED_100baseT_Full;
4413			if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK) {
4414				if (!(tp->phy_flags &
4415				      TG3_PHYFLG_DISABLE_1G_HD_ADV))
4416					adv |= ADVERTISED_1000baseT_Half;
4417				adv |= ADVERTISED_1000baseT_Full;
4418			}
4419
4420			fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4421		} else {
4422			adv = tp->link_config.advertising;
4423			if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4424				adv &= ~(ADVERTISED_1000baseT_Half |
4425					 ADVERTISED_1000baseT_Full);
4426
4427			fc = tp->link_config.flowctrl;
4428		}
4429
4430		tg3_phy_autoneg_cfg(tp, adv, fc);
4431
4432		if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4433		    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4434			/* Normally during power down we want to autonegotiate
4435			 * the lowest possible speed for WOL. However, to avoid
4436			 * link flap, we leave it untouched.
4437			 */
4438			return;
4439		}
4440
4441		tg3_writephy(tp, MII_BMCR,
4442			     BMCR_ANENABLE | BMCR_ANRESTART);
4443	} else {
4444		int i;
4445		u32 bmcr, orig_bmcr;
4446
4447		tp->link_config.active_speed = tp->link_config.speed;
4448		tp->link_config.active_duplex = tp->link_config.duplex;
4449
4450		if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4451			/* With autoneg disabled, 5715 only links up when the
4452			 * advertisement register has the configured speed
4453			 * enabled.
4454			 */
4455			tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4456		}
4457
4458		bmcr = 0;
4459		switch (tp->link_config.speed) {
4460		default:
4461		case SPEED_10:
4462			break;
4463
4464		case SPEED_100:
4465			bmcr |= BMCR_SPEED100;
4466			break;
4467
4468		case SPEED_1000:
4469			bmcr |= BMCR_SPEED1000;
4470			break;
4471		}
4472
4473		if (tp->link_config.duplex == DUPLEX_FULL)
4474			bmcr |= BMCR_FULLDPLX;
4475
4476		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4477		    (bmcr != orig_bmcr)) {
4478			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4479			for (i = 0; i < 1500; i++) {
4480				u32 tmp;
4481
4482				udelay(10);
4483				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4484				    tg3_readphy(tp, MII_BMSR, &tmp))
4485					continue;
4486				if (!(tmp & BMSR_LSTATUS)) {
4487					udelay(40);
4488					break;
4489				}
4490			}
4491			tg3_writephy(tp, MII_BMCR, bmcr);
4492			udelay(40);
4493		}
4494	}
4495}
4496
4497static int tg3_phy_pull_config(struct tg3 *tp)
4498{
4499	int err;
4500	u32 val;
4501
4502	err = tg3_readphy(tp, MII_BMCR, &val);
4503	if (err)
4504		goto done;
4505
4506	if (!(val & BMCR_ANENABLE)) {
4507		tp->link_config.autoneg = AUTONEG_DISABLE;
4508		tp->link_config.advertising = 0;
4509		tg3_flag_clear(tp, PAUSE_AUTONEG);
4510
4511		err = -EIO;
4512
4513		switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4514		case 0:
4515			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4516				goto done;
4517
4518			tp->link_config.speed = SPEED_10;
4519			break;
4520		case BMCR_SPEED100:
4521			if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4522				goto done;
4523
4524			tp->link_config.speed = SPEED_100;
4525			break;
4526		case BMCR_SPEED1000:
4527			if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4528				tp->link_config.speed = SPEED_1000;
4529				break;
4530			}
4531			/* Fall through */
4532		default:
4533			goto done;
4534		}
4535
4536		if (val & BMCR_FULLDPLX)
4537			tp->link_config.duplex = DUPLEX_FULL;
4538		else
4539			tp->link_config.duplex = DUPLEX_HALF;
4540
4541		tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4542
4543		err = 0;
4544		goto done;
4545	}
4546
4547	tp->link_config.autoneg = AUTONEG_ENABLE;
4548	tp->link_config.advertising = ADVERTISED_Autoneg;
4549	tg3_flag_set(tp, PAUSE_AUTONEG);
4550
4551	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4552		u32 adv;
4553
4554		err = tg3_readphy(tp, MII_ADVERTISE, &val);
4555		if (err)
4556			goto done;
4557
4558		adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4559		tp->link_config.advertising |= adv | ADVERTISED_TP;
4560
4561		tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4562	} else {
4563		tp->link_config.advertising |= ADVERTISED_FIBRE;
4564	}
4565
4566	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4567		u32 adv;
4568
4569		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4570			err = tg3_readphy(tp, MII_CTRL1000, &val);
4571			if (err)
4572				goto done;
4573
4574			adv = mii_ctrl1000_to_ethtool_adv_t(val);
4575		} else {
4576			err = tg3_readphy(tp, MII_ADVERTISE, &val);
4577			if (err)
4578				goto done;
4579
4580			adv = tg3_decode_flowctrl_1000X(val);
4581			tp->link_config.flowctrl = adv;
4582
4583			val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4584			adv = mii_adv_to_ethtool_adv_x(val);
4585		}
4586
4587		tp->link_config.advertising |= adv;
4588	}
4589
4590done:
4591	return err;
4592}
4593
4594static int tg3_init_5401phy_dsp(struct tg3 *tp)
4595{
4596	int err;
4597
4598	/* Turn off tap power management. */
4599	/* Set Extended packet length bit */
4600	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4601
4602	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4603	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4604	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4605	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4606	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4607
4608	udelay(40);
4609
4610	return err;
4611}
4612
4613static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4614{
4615	struct ethtool_eee eee;
4616
4617	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4618		return true;
4619
4620	tg3_eee_pull_config(tp, &eee);
4621
4622	if (tp->eee.eee_enabled) {
4623		if (tp->eee.advertised != eee.advertised ||
4624		    tp->eee.tx_lpi_timer != eee.tx_lpi_timer ||
4625		    tp->eee.tx_lpi_enabled != eee.tx_lpi_enabled)
4626			return false;
4627	} else {
4628		/* EEE is disabled but we're advertising */
4629		if (eee.advertised)
4630			return false;
4631	}
4632
4633	return true;
4634}
4635
4636static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4637{
4638	u32 advmsk, tgtadv, advertising;
4639
4640	advertising = tp->link_config.advertising;
4641	tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4642
4643	advmsk = ADVERTISE_ALL;
4644	if (tp->link_config.active_duplex == DUPLEX_FULL) {
4645		tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4646		advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4647	}
4648
4649	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4650		return false;
4651
4652	if ((*lcladv & advmsk) != tgtadv)
4653		return false;
4654
4655	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4656		u32 tg3_ctrl;
4657
4658		tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4659
4660		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4661			return false;
4662
4663		if (tgtadv &&
4664		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4665		     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4666			tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4667			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4668				     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4669		} else {
4670			tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4671		}
4672
4673		if (tg3_ctrl != tgtadv)
4674			return false;
4675	}
4676
4677	return true;
4678}
4679
4680static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4681{
4682	u32 lpeth = 0;
4683
4684	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4685		u32 val;
4686
4687		if (tg3_readphy(tp, MII_STAT1000, &val))
4688			return false;
4689
4690		lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4691	}
4692
4693	if (tg3_readphy(tp, MII_LPA, rmtadv))
4694		return false;
4695
4696	lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4697	tp->link_config.rmt_adv = lpeth;
4698
4699	return true;
4700}
4701
4702static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4703{
4704	if (curr_link_up != tp->link_up) {
4705		if (curr_link_up) {
4706			netif_carrier_on(tp->dev);
4707		} else {
4708			netif_carrier_off(tp->dev);
4709			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4710				tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4711		}
4712
4713		tg3_link_report(tp);
4714		return true;
4715	}
4716
4717	return false;
4718}
4719
4720static void tg3_clear_mac_status(struct tg3 *tp)
4721{
4722	tw32(MAC_EVENT, 0);
4723
4724	tw32_f(MAC_STATUS,
4725	       MAC_STATUS_SYNC_CHANGED |
4726	       MAC_STATUS_CFG_CHANGED |
4727	       MAC_STATUS_MI_COMPLETION |
4728	       MAC_STATUS_LNKSTATE_CHANGED);
4729	udelay(40);
4730}
4731
4732static void tg3_setup_eee(struct tg3 *tp)
4733{
4734	u32 val;
4735
4736	val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
4737	      TG3_CPMU_EEE_LNKIDL_UART_IDL;
4738	if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
4739		val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
4740
4741	tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
4742
4743	tw32_f(TG3_CPMU_EEE_CTRL,
4744	       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
4745
4746	val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
4747	      (tp->eee.tx_lpi_enabled ? TG3_CPMU_EEEMD_LPI_IN_TX : 0) |
4748	      TG3_CPMU_EEEMD_LPI_IN_RX |
4749	      TG3_CPMU_EEEMD_EEE_ENABLE;
4750
4751	if (tg3_asic_rev(tp) != ASIC_REV_5717)
4752		val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
4753
4754	if (tg3_flag(tp, ENABLE_APE))
4755		val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
4756
4757	tw32_f(TG3_CPMU_EEE_MODE, tp->eee.eee_enabled ? val : 0);
4758
4759	tw32_f(TG3_CPMU_EEE_DBTMR1,
4760	       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
4761	       (tp->eee.tx_lpi_timer & 0xffff));
4762
4763	tw32_f(TG3_CPMU_EEE_DBTMR2,
4764	       TG3_CPMU_DBTMR2_APE_TX_2047US |
4765	       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
4766}
4767
4768static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4769{
4770	bool current_link_up;
4771	u32 bmsr, val;
4772	u32 lcl_adv, rmt_adv;
4773	u16 current_speed;
4774	u8 current_duplex;
4775	int i, err;
4776
4777	tg3_clear_mac_status(tp);
4778
4779	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4780		tw32_f(MAC_MI_MODE,
4781		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4782		udelay(80);
4783	}
4784
4785	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4786
4787	/* Some third-party PHYs need to be reset on link going
4788	 * down.
4789	 */
4790	if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4791	     tg3_asic_rev(tp) == ASIC_REV_5704 ||
4792	     tg3_asic_rev(tp) == ASIC_REV_5705) &&
4793	    tp->link_up) {
4794		tg3_readphy(tp, MII_BMSR, &bmsr);
4795		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4796		    !(bmsr & BMSR_LSTATUS))
4797			force_reset = true;
4798	}
4799	if (force_reset)
4800		tg3_phy_reset(tp);
4801
4802	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4803		tg3_readphy(tp, MII_BMSR, &bmsr);
4804		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4805		    !tg3_flag(tp, INIT_COMPLETE))
4806			bmsr = 0;
4807
4808		if (!(bmsr & BMSR_LSTATUS)) {
4809			err = tg3_init_5401phy_dsp(tp);
4810			if (err)
4811				return err;
4812
4813			tg3_readphy(tp, MII_BMSR, &bmsr);
4814			for (i = 0; i < 1000; i++) {
4815				udelay(10);
4816				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4817				    (bmsr & BMSR_LSTATUS)) {
4818					udelay(40);
4819					break;
4820				}
4821			}
4822
4823			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4824			    TG3_PHY_REV_BCM5401_B0 &&
4825			    !(bmsr & BMSR_LSTATUS) &&
4826			    tp->link_config.active_speed == SPEED_1000) {
4827				err = tg3_phy_reset(tp);
4828				if (!err)
4829					err = tg3_init_5401phy_dsp(tp);
4830				if (err)
4831					return err;
4832			}
4833		}
4834	} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4835		   tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4836		/* 5701 {A0,B0} CRC bug workaround */
4837		tg3_writephy(tp, 0x15, 0x0a75);
4838		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4839		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4840		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4841	}
4842
4843	/* Clear pending interrupts... */
4844	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4845	tg3_readphy(tp, MII_TG3_ISTAT, &val);
4846
4847	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4848		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4849	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4850		tg3_writephy(tp, MII_TG3_IMASK, ~0);
4851
4852	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4853	    tg3_asic_rev(tp) == ASIC_REV_5701) {
4854		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4855			tg3_writephy(tp, MII_TG3_EXT_CTRL,
4856				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4857		else
4858			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4859	}
4860
4861	current_link_up = false;
4862	current_speed = SPEED_UNKNOWN;
4863	current_duplex = DUPLEX_UNKNOWN;
4864	tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4865	tp->link_config.rmt_adv = 0;
4866
4867	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4868		err = tg3_phy_auxctl_read(tp,
4869					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4870					  &val);
4871		if (!err && !(val & (1 << 10))) {
4872			tg3_phy_auxctl_write(tp,
4873					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4874					     val | (1 << 10));
4875			goto relink;
4876		}
4877	}
4878
4879	bmsr = 0;
4880	for (i = 0; i < 100; i++) {
4881		tg3_readphy(tp, MII_BMSR, &bmsr);
4882		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4883		    (bmsr & BMSR_LSTATUS))
4884			break;
4885		udelay(40);
4886	}
4887
4888	if (bmsr & BMSR_LSTATUS) {
4889		u32 aux_stat, bmcr;
4890
4891		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4892		for (i = 0; i < 2000; i++) {
4893			udelay(10);
4894			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4895			    aux_stat)
4896				break;
4897		}
4898
4899		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4900					     &current_speed,
4901					     &current_duplex);
4902
4903		bmcr = 0;
4904		for (i = 0; i < 200; i++) {
4905			tg3_readphy(tp, MII_BMCR, &bmcr);
4906			if (tg3_readphy(tp, MII_BMCR, &bmcr))
4907				continue;
4908			if (bmcr && bmcr != 0x7fff)
4909				break;
4910			udelay(10);
4911		}
4912
4913		lcl_adv = 0;
4914		rmt_adv = 0;
4915
4916		tp->link_config.active_speed = current_speed;
4917		tp->link_config.active_duplex = current_duplex;
4918
4919		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4920			bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4921
4922			if ((bmcr & BMCR_ANENABLE) &&
4923			    eee_config_ok &&
4924			    tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4925			    tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4926				current_link_up = true;
4927
4928			/* EEE settings changes take effect only after a phy
4929			 * reset.  If we have skipped a reset due to Link Flap
4930			 * Avoidance being enabled, do it now.
4931			 */
4932			if (!eee_config_ok &&
4933			    (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4934			    !force_reset) {
4935				tg3_setup_eee(tp);
4936				tg3_phy_reset(tp);
4937			}
4938		} else {
4939			if (!(bmcr & BMCR_ANENABLE) &&
4940			    tp->link_config.speed == current_speed &&
4941			    tp->link_config.duplex == current_duplex) {
4942				current_link_up = true;
4943			}
4944		}
4945
4946		if (current_link_up &&
4947		    tp->link_config.active_duplex == DUPLEX_FULL) {
4948			u32 reg, bit;
4949
4950			if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4951				reg = MII_TG3_FET_GEN_STAT;
4952				bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4953			} else {
4954				reg = MII_TG3_EXT_STAT;
4955				bit = MII_TG3_EXT_STAT_MDIX;
4956			}
4957
4958			if (!tg3_readphy(tp, reg, &val) && (val & bit))
4959				tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4960
4961			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4962		}
4963	}
4964
4965relink:
4966	if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4967		tg3_phy_copper_begin(tp);
4968
4969		if (tg3_flag(tp, ROBOSWITCH)) {
4970			current_link_up = true;
4971			/* FIXME: when BCM5325 switch is used use 100 MBit/s */
4972			current_speed = SPEED_1000;
4973			current_duplex = DUPLEX_FULL;
4974			tp->link_config.active_speed = current_speed;
4975			tp->link_config.active_duplex = current_duplex;
4976		}
4977
4978		tg3_readphy(tp, MII_BMSR, &bmsr);
4979		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4980		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4981			current_link_up = true;
4982	}
4983
4984	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4985	if (current_link_up) {
4986		if (tp->link_config.active_speed == SPEED_100 ||
4987		    tp->link_config.active_speed == SPEED_10)
4988			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4989		else
4990			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4991	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4992		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4993	else
4994		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4995
4996	/* In order for the 5750 core in BCM4785 chip to work properly
4997	 * in RGMII mode, the Led Control Register must be set up.
4998	 */
4999	if (tg3_flag(tp, RGMII_MODE)) {
5000		u32 led_ctrl = tr32(MAC_LED_CTRL);
5001		led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
5002
5003		if (tp->link_config.active_speed == SPEED_10)
5004			led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
5005		else if (tp->link_config.active_speed == SPEED_100)
5006			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5007				     LED_CTRL_100MBPS_ON);
5008		else if (tp->link_config.active_speed == SPEED_1000)
5009			led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
5010				     LED_CTRL_1000MBPS_ON);
5011
5012		tw32(MAC_LED_CTRL, led_ctrl);
5013		udelay(40);
5014	}
5015
5016	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5017	if (tp->link_config.active_duplex == DUPLEX_HALF)
5018		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5019
5020	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
5021		if (current_link_up &&
5022		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
5023			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
5024		else
5025			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
5026	}
5027
5028	/* ??? Without this setting Netgear GA302T PHY does not
5029	 * ??? send/receive packets...
5030	 */
5031	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
5032	    tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
5033		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
5034		tw32_f(MAC_MI_MODE, tp->mi_mode);
5035		udelay(80);
5036	}
5037
5038	tw32_f(MAC_MODE, tp->mac_mode);
5039	udelay(40);
5040
5041	tg3_phy_eee_adjust(tp, current_link_up);
5042
5043	if (tg3_flag(tp, USE_LINKCHG_REG)) {
5044		/* Polled via timer. */
5045		tw32_f(MAC_EVENT, 0);
5046	} else {
5047		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5048	}
5049	udelay(40);
5050
5051	if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
5052	    current_link_up &&
5053	    tp->link_config.active_speed == SPEED_1000 &&
5054	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
5055		udelay(120);
5056		tw32_f(MAC_STATUS,
5057		     (MAC_STATUS_SYNC_CHANGED |
5058		      MAC_STATUS_CFG_CHANGED));
5059		udelay(40);
5060		tg3_write_mem(tp,
5061			      NIC_SRAM_FIRMWARE_MBOX,
5062			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
5063	}
5064
5065	/* Prevent send BD corruption. */
5066	if (tg3_flag(tp, CLKREQ_BUG)) {
5067		if (tp->link_config.active_speed == SPEED_100 ||
5068		    tp->link_config.active_speed == SPEED_10)
5069			pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
5070						   PCI_EXP_LNKCTL_CLKREQ_EN);
5071		else
5072			pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
5073						 PCI_EXP_LNKCTL_CLKREQ_EN);
5074	}
5075
5076	tg3_test_and_report_link_chg(tp, current_link_up);
5077
5078	return 0;
5079}
5080
5081struct tg3_fiber_aneginfo {
5082	int state;
5083#define ANEG_STATE_UNKNOWN		0
5084#define ANEG_STATE_AN_ENABLE		1
5085#define ANEG_STATE_RESTART_INIT		2
5086#define ANEG_STATE_RESTART		3
5087#define ANEG_STATE_DISABLE_LINK_OK	4
5088#define ANEG_STATE_ABILITY_DETECT_INIT	5
5089#define ANEG_STATE_ABILITY_DETECT	6
5090#define ANEG_STATE_ACK_DETECT_INIT	7
5091#define ANEG_STATE_ACK_DETECT		8
5092#define ANEG_STATE_COMPLETE_ACK_INIT	9
5093#define ANEG_STATE_COMPLETE_ACK		10
5094#define ANEG_STATE_IDLE_DETECT_INIT	11
5095#define ANEG_STATE_IDLE_DETECT		12
5096#define ANEG_STATE_LINK_OK		13
5097#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
5098#define ANEG_STATE_NEXT_PAGE_WAIT	15
5099
5100	u32 flags;
5101#define MR_AN_ENABLE		0x00000001
5102#define MR_RESTART_AN		0x00000002
5103#define MR_AN_COMPLETE		0x00000004
5104#define MR_PAGE_RX		0x00000008
5105#define MR_NP_LOADED		0x00000010
5106#define MR_TOGGLE_TX		0x00000020
5107#define MR_LP_ADV_FULL_DUPLEX	0x00000040
5108#define MR_LP_ADV_HALF_DUPLEX	0x00000080
5109#define MR_LP_ADV_SYM_PAUSE	0x00000100
5110#define MR_LP_ADV_ASYM_PAUSE	0x00000200
5111#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
5112#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
5113#define MR_LP_ADV_NEXT_PAGE	0x00001000
5114#define MR_TOGGLE_RX		0x00002000
5115#define MR_NP_RX		0x00004000
5116
5117#define MR_LINK_OK		0x80000000
5118
5119	unsigned long link_time, cur_time;
5120
5121	u32 ability_match_cfg;
5122	int ability_match_count;
5123
5124	char ability_match, idle_match, ack_match;
5125
5126	u32 txconfig, rxconfig;
5127#define ANEG_CFG_NP		0x00000080
5128#define ANEG_CFG_ACK		0x00000040
5129#define ANEG_CFG_RF2		0x00000020
5130#define ANEG_CFG_RF1		0x00000010
5131#define ANEG_CFG_PS2		0x00000001
5132#define ANEG_CFG_PS1		0x00008000
5133#define ANEG_CFG_HD		0x00004000
5134#define ANEG_CFG_FD		0x00002000
5135#define ANEG_CFG_INVAL		0x00001f06
5136
5137};
5138#define ANEG_OK		0
5139#define ANEG_DONE	1
5140#define ANEG_TIMER_ENAB	2
5141#define ANEG_FAILED	-1
5142
5143#define ANEG_STATE_SETTLE_TIME	10000
5144
5145static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5146				   struct tg3_fiber_aneginfo *ap)
5147{
5148	u16 flowctrl;
5149	unsigned long delta;
5150	u32 rx_cfg_reg;
5151	int ret;
5152
5153	if (ap->state == ANEG_STATE_UNKNOWN) {
5154		ap->rxconfig = 0;
5155		ap->link_time = 0;
5156		ap->cur_time = 0;
5157		ap->ability_match_cfg = 0;
5158		ap->ability_match_count = 0;
5159		ap->ability_match = 0;
5160		ap->idle_match = 0;
5161		ap->ack_match = 0;
5162	}
5163	ap->cur_time++;
5164
5165	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5166		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5167
5168		if (rx_cfg_reg != ap->ability_match_cfg) {
5169			ap->ability_match_cfg = rx_cfg_reg;
5170			ap->ability_match = 0;
5171			ap->ability_match_count = 0;
5172		} else {
5173			if (++ap->ability_match_count > 1) {
5174				ap->ability_match = 1;
5175				ap->ability_match_cfg = rx_cfg_reg;
5176			}
5177		}
5178		if (rx_cfg_reg & ANEG_CFG_ACK)
5179			ap->ack_match = 1;
5180		else
5181			ap->ack_match = 0;
5182
5183		ap->idle_match = 0;
5184	} else {
5185		ap->idle_match = 1;
5186		ap->ability_match_cfg = 0;
5187		ap->ability_match_count = 0;
5188		ap->ability_match = 0;
5189		ap->ack_match = 0;
5190
5191		rx_cfg_reg = 0;
5192	}
5193
5194	ap->rxconfig = rx_cfg_reg;
5195	ret = ANEG_OK;
5196
5197	switch (ap->state) {
5198	case ANEG_STATE_UNKNOWN:
5199		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5200			ap->state = ANEG_STATE_AN_ENABLE;
5201
5202		/* fallthru */
5203	case ANEG_STATE_AN_ENABLE:
5204		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5205		if (ap->flags & MR_AN_ENABLE) {
5206			ap->link_time = 0;
5207			ap->cur_time = 0;
5208			ap->ability_match_cfg = 0;
5209			ap->ability_match_count = 0;
5210			ap->ability_match = 0;
5211			ap->idle_match = 0;
5212			ap->ack_match = 0;
5213
5214			ap->state = ANEG_STATE_RESTART_INIT;
5215		} else {
5216			ap->state = ANEG_STATE_DISABLE_LINK_OK;
5217		}
5218		break;
5219
5220	case ANEG_STATE_RESTART_INIT:
5221		ap->link_time = ap->cur_time;
5222		ap->flags &= ~(MR_NP_LOADED);
5223		ap->txconfig = 0;
5224		tw32(MAC_TX_AUTO_NEG, 0);
5225		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5226		tw32_f(MAC_MODE, tp->mac_mode);
5227		udelay(40);
5228
5229		ret = ANEG_TIMER_ENAB;
5230		ap->state = ANEG_STATE_RESTART;
5231
5232		/* fallthru */
5233	case ANEG_STATE_RESTART:
5234		delta = ap->cur_time - ap->link_time;
5235		if (delta > ANEG_STATE_SETTLE_TIME)
5236			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5237		else
5238			ret = ANEG_TIMER_ENAB;
5239		break;
5240
5241	case ANEG_STATE_DISABLE_LINK_OK:
5242		ret = ANEG_DONE;
5243		break;
5244
5245	case ANEG_STATE_ABILITY_DETECT_INIT:
5246		ap->flags &= ~(MR_TOGGLE_TX);
5247		ap->txconfig = ANEG_CFG_FD;
5248		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5249		if (flowctrl & ADVERTISE_1000XPAUSE)
5250			ap->txconfig |= ANEG_CFG_PS1;
5251		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5252			ap->txconfig |= ANEG_CFG_PS2;
5253		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5254		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5255		tw32_f(MAC_MODE, tp->mac_mode);
5256		udelay(40);
5257
5258		ap->state = ANEG_STATE_ABILITY_DETECT;
5259		break;
5260
5261	case ANEG_STATE_ABILITY_DETECT:
5262		if (ap->ability_match != 0 && ap->rxconfig != 0)
5263			ap->state = ANEG_STATE_ACK_DETECT_INIT;
5264		break;
5265
5266	case ANEG_STATE_ACK_DETECT_INIT:
5267		ap->txconfig |= ANEG_CFG_ACK;
5268		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5269		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5270		tw32_f(MAC_MODE, tp->mac_mode);
5271		udelay(40);
5272
5273		ap->state = ANEG_STATE_ACK_DETECT;
5274
5275		/* fallthru */
5276	case ANEG_STATE_ACK_DETECT:
5277		if (ap->ack_match != 0) {
5278			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5279			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5280				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5281			} else {
5282				ap->state = ANEG_STATE_AN_ENABLE;
5283			}
5284		} else if (ap->ability_match != 0 &&
5285			   ap->rxconfig == 0) {
5286			ap->state = ANEG_STATE_AN_ENABLE;
5287		}
5288		break;
5289
5290	case ANEG_STATE_COMPLETE_ACK_INIT:
5291		if (ap->rxconfig & ANEG_CFG_INVAL) {
5292			ret = ANEG_FAILED;
5293			break;
5294		}
5295		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5296			       MR_LP_ADV_HALF_DUPLEX |
5297			       MR_LP_ADV_SYM_PAUSE |
5298			       MR_LP_ADV_ASYM_PAUSE |
5299			       MR_LP_ADV_REMOTE_FAULT1 |
5300			       MR_LP_ADV_REMOTE_FAULT2 |
5301			       MR_LP_ADV_NEXT_PAGE |
5302			       MR_TOGGLE_RX |
5303			       MR_NP_RX);
5304		if (ap->rxconfig & ANEG_CFG_FD)
5305			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5306		if (ap->rxconfig & ANEG_CFG_HD)
5307			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5308		if (ap->rxconfig & ANEG_CFG_PS1)
5309			ap->flags |= MR_LP_ADV_SYM_PAUSE;
5310		if (ap->rxconfig & ANEG_CFG_PS2)
5311			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5312		if (ap->rxconfig & ANEG_CFG_RF1)
5313			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5314		if (ap->rxconfig & ANEG_CFG_RF2)
5315			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5316		if (ap->rxconfig & ANEG_CFG_NP)
5317			ap->flags |= MR_LP_ADV_NEXT_PAGE;
5318
5319		ap->link_time = ap->cur_time;
5320
5321		ap->flags ^= (MR_TOGGLE_TX);
5322		if (ap->rxconfig & 0x0008)
5323			ap->flags |= MR_TOGGLE_RX;
5324		if (ap->rxconfig & ANEG_CFG_NP)
5325			ap->flags |= MR_NP_RX;
5326		ap->flags |= MR_PAGE_RX;
5327
5328		ap->state = ANEG_STATE_COMPLETE_ACK;
5329		ret = ANEG_TIMER_ENAB;
5330		break;
5331
5332	case ANEG_STATE_COMPLETE_ACK:
5333		if (ap->ability_match != 0 &&
5334		    ap->rxconfig == 0) {
5335			ap->state = ANEG_STATE_AN_ENABLE;
5336			break;
5337		}
5338		delta = ap->cur_time - ap->link_time;
5339		if (delta > ANEG_STATE_SETTLE_TIME) {
5340			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5341				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5342			} else {
5343				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5344				    !(ap->flags & MR_NP_RX)) {
5345					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5346				} else {
5347					ret = ANEG_FAILED;
5348				}
5349			}
5350		}
5351		break;
5352
5353	case ANEG_STATE_IDLE_DETECT_INIT:
5354		ap->link_time = ap->cur_time;
5355		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5356		tw32_f(MAC_MODE, tp->mac_mode);
5357		udelay(40);
5358
5359		ap->state = ANEG_STATE_IDLE_DETECT;
5360		ret = ANEG_TIMER_ENAB;
5361		break;
5362
5363	case ANEG_STATE_IDLE_DETECT:
5364		if (ap->ability_match != 0 &&
5365		    ap->rxconfig == 0) {
5366			ap->state = ANEG_STATE_AN_ENABLE;
5367			break;
5368		}
5369		delta = ap->cur_time - ap->link_time;
5370		if (delta > ANEG_STATE_SETTLE_TIME) {
5371			/* XXX another gem from the Broadcom driver :( */
5372			ap->state = ANEG_STATE_LINK_OK;
5373		}
5374		break;
5375
5376	case ANEG_STATE_LINK_OK:
5377		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5378		ret = ANEG_DONE;
5379		break;
5380
5381	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5382		/* ??? unimplemented */
5383		break;
5384
5385	case ANEG_STATE_NEXT_PAGE_WAIT:
5386		/* ??? unimplemented */
5387		break;
5388
5389	default:
5390		ret = ANEG_FAILED;
5391		break;
5392	}
5393
5394	return ret;
5395}
5396
5397static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5398{
5399	int res = 0;
5400	struct tg3_fiber_aneginfo aninfo;
5401	int status = ANEG_FAILED;
5402	unsigned int tick;
5403	u32 tmp;
5404
5405	tw32_f(MAC_TX_AUTO_NEG, 0);
5406
5407	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5408	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5409	udelay(40);
5410
5411	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5412	udelay(40);
5413
5414	memset(&aninfo, 0, sizeof(aninfo));
5415	aninfo.flags |= MR_AN_ENABLE;
5416	aninfo.state = ANEG_STATE_UNKNOWN;
5417	aninfo.cur_time = 0;
5418	tick = 0;
5419	while (++tick < 195000) {
5420		status = tg3_fiber_aneg_smachine(tp, &aninfo);
5421		if (status == ANEG_DONE || status == ANEG_FAILED)
5422			break;
5423
5424		udelay(1);
5425	}
5426
5427	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5428	tw32_f(MAC_MODE, tp->mac_mode);
5429	udelay(40);
5430
5431	*txflags = aninfo.txconfig;
5432	*rxflags = aninfo.flags;
5433
5434	if (status == ANEG_DONE &&
5435	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5436			     MR_LP_ADV_FULL_DUPLEX)))
5437		res = 1;
5438
5439	return res;
5440}
5441
5442static void tg3_init_bcm8002(struct tg3 *tp)
5443{
5444	u32 mac_status = tr32(MAC_STATUS);
5445	int i;
5446
5447	/* Reset when initting first time or we have a link. */
5448	if (tg3_flag(tp, INIT_COMPLETE) &&
5449	    !(mac_status & MAC_STATUS_PCS_SYNCED))
5450		return;
5451
5452	/* Set PLL lock range. */
5453	tg3_writephy(tp, 0x16, 0x8007);
5454
5455	/* SW reset */
5456	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5457
5458	/* Wait for reset to complete. */
5459	/* XXX schedule_timeout() ... */
5460	for (i = 0; i < 500; i++)
5461		udelay(10);
5462
5463	/* Config mode; select PMA/Ch 1 regs. */
5464	tg3_writephy(tp, 0x10, 0x8411);
5465
5466	/* Enable auto-lock and comdet, select txclk for tx. */
5467	tg3_writephy(tp, 0x11, 0x0a10);
5468
5469	tg3_writephy(tp, 0x18, 0x00a0);
5470	tg3_writephy(tp, 0x16, 0x41ff);
5471
5472	/* Assert and deassert POR. */
5473	tg3_writephy(tp, 0x13, 0x0400);
5474	udelay(40);
5475	tg3_writephy(tp, 0x13, 0x0000);
5476
5477	tg3_writephy(tp, 0x11, 0x0a50);
5478	udelay(40);
5479	tg3_writephy(tp, 0x11, 0x0a10);
5480
5481	/* Wait for signal to stabilize */
5482	/* XXX schedule_timeout() ... */
5483	for (i = 0; i < 15000; i++)
5484		udelay(10);
5485
5486	/* Deselect the channel register so we can read the PHYID
5487	 * later.
5488	 */
5489	tg3_writephy(tp, 0x10, 0x8011);
5490}
5491
5492static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5493{
5494	u16 flowctrl;
5495	bool current_link_up;
5496	u32 sg_dig_ctrl, sg_dig_status;
5497	u32 serdes_cfg, expected_sg_dig_ctrl;
5498	int workaround, port_a;
5499
5500	serdes_cfg = 0;
5501	expected_sg_dig_ctrl = 0;
5502	workaround = 0;
5503	port_a = 1;
5504	current_link_up = false;
5505
5506	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5507	    tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5508		workaround = 1;
5509		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5510			port_a = 0;
5511
5512		/* preserve bits 0-11,13,14 for signal pre-emphasis */
5513		/* preserve bits 20-23 for voltage regulator */
5514		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5515	}
5516
5517	sg_dig_ctrl = tr32(SG_DIG_CTRL);
5518
5519	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5520		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5521			if (workaround) {
5522				u32 val = serdes_cfg;
5523
5524				if (port_a)
5525					val |= 0xc010000;
5526				else
5527					val |= 0x4010000;
5528				tw32_f(MAC_SERDES_CFG, val);
5529			}
5530
5531			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5532		}
5533		if (mac_status & MAC_STATUS_PCS_SYNCED) {
5534			tg3_setup_flow_control(tp, 0, 0);
5535			current_link_up = true;
5536		}
5537		goto out;
5538	}
5539
5540	/* Want auto-negotiation.  */
5541	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5542
5543	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5544	if (flowctrl & ADVERTISE_1000XPAUSE)
5545		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5546	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5547		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5548
5549	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5550		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5551		    tp->serdes_counter &&
5552		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
5553				    MAC_STATUS_RCVD_CFG)) ==
5554		     MAC_STATUS_PCS_SYNCED)) {
5555			tp->serdes_counter--;
5556			current_link_up = true;
5557			goto out;
5558		}
5559restart_autoneg:
5560		if (workaround)
5561			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5562		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5563		udelay(5);
5564		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5565
5566		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5567		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5568	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5569				 MAC_STATUS_SIGNAL_DET)) {
5570		sg_dig_status = tr32(SG_DIG_STATUS);
5571		mac_status = tr32(MAC_STATUS);
5572
5573		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5574		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
5575			u32 local_adv = 0, remote_adv = 0;
5576
5577			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5578				local_adv |= ADVERTISE_1000XPAUSE;
5579			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5580				local_adv |= ADVERTISE_1000XPSE_ASYM;
5581
5582			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5583				remote_adv |= LPA_1000XPAUSE;
5584			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5585				remote_adv |= LPA_1000XPAUSE_ASYM;
5586
5587			tp->link_config.rmt_adv =
5588					   mii_adv_to_ethtool_adv_x(remote_adv);
5589
5590			tg3_setup_flow_control(tp, local_adv, remote_adv);
5591			current_link_up = true;
5592			tp->serdes_counter = 0;
5593			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5594		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5595			if (tp->serdes_counter)
5596				tp->serdes_counter--;
5597			else {
5598				if (workaround) {
5599					u32 val = serdes_cfg;
5600
5601					if (port_a)
5602						val |= 0xc010000;
5603					else
5604						val |= 0x4010000;
5605
5606					tw32_f(MAC_SERDES_CFG, val);
5607				}
5608
5609				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5610				udelay(40);
5611
5612				/* Link parallel detection - link is up */
5613				/* only if we have PCS_SYNC and not */
5614				/* receiving config code words */
5615				mac_status = tr32(MAC_STATUS);
5616				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5617				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
5618					tg3_setup_flow_control(tp, 0, 0);
5619					current_link_up = true;
5620					tp->phy_flags |=
5621						TG3_PHYFLG_PARALLEL_DETECT;
5622					tp->serdes_counter =
5623						SERDES_PARALLEL_DET_TIMEOUT;
5624				} else
5625					goto restart_autoneg;
5626			}
5627		}
5628	} else {
5629		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5630		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5631	}
5632
5633out:
5634	return current_link_up;
5635}
5636
5637static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5638{
5639	bool current_link_up = false;
5640
5641	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5642		goto out;
5643
5644	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5645		u32 txflags, rxflags;
5646		int i;
5647
5648		if (fiber_autoneg(tp, &txflags, &rxflags)) {
5649			u32 local_adv = 0, remote_adv = 0;
5650
5651			if (txflags & ANEG_CFG_PS1)
5652				local_adv |= ADVERTISE_1000XPAUSE;
5653			if (txflags & ANEG_CFG_PS2)
5654				local_adv |= ADVERTISE_1000XPSE_ASYM;
5655
5656			if (rxflags & MR_LP_ADV_SYM_PAUSE)
5657				remote_adv |= LPA_1000XPAUSE;
5658			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5659				remote_adv |= LPA_1000XPAUSE_ASYM;
5660
5661			tp->link_config.rmt_adv =
5662					   mii_adv_to_ethtool_adv_x(remote_adv);
5663
5664			tg3_setup_flow_control(tp, local_adv, remote_adv);
5665
5666			current_link_up = true;
5667		}
5668		for (i = 0; i < 30; i++) {
5669			udelay(20);
5670			tw32_f(MAC_STATUS,
5671			       (MAC_STATUS_SYNC_CHANGED |
5672				MAC_STATUS_CFG_CHANGED));
5673			udelay(40);
5674			if ((tr32(MAC_STATUS) &
5675			     (MAC_STATUS_SYNC_CHANGED |
5676			      MAC_STATUS_CFG_CHANGED)) == 0)
5677				break;
5678		}
5679
5680		mac_status = tr32(MAC_STATUS);
5681		if (!current_link_up &&
5682		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
5683		    !(mac_status & MAC_STATUS_RCVD_CFG))
5684			current_link_up = true;
5685	} else {
5686		tg3_setup_flow_control(tp, 0, 0);
5687
5688		/* Forcing 1000FD link up. */
5689		current_link_up = true;
5690
5691		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5692		udelay(40);
5693
5694		tw32_f(MAC_MODE, tp->mac_mode);
5695		udelay(40);
5696	}
5697
5698out:
5699	return current_link_up;
5700}
5701
5702static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5703{
5704	u32 orig_pause_cfg;
5705	u16 orig_active_speed;
5706	u8 orig_active_duplex;
5707	u32 mac_status;
5708	bool current_link_up;
5709	int i;
5710
5711	orig_pause_cfg = tp->link_config.active_flowctrl;
5712	orig_active_speed = tp->link_config.active_speed;
5713	orig_active_duplex = tp->link_config.active_duplex;
5714
5715	if (!tg3_flag(tp, HW_AUTONEG) &&
5716	    tp->link_up &&
5717	    tg3_flag(tp, INIT_COMPLETE)) {
5718		mac_status = tr32(MAC_STATUS);
5719		mac_status &= (MAC_STATUS_PCS_SYNCED |
5720			       MAC_STATUS_SIGNAL_DET |
5721			       MAC_STATUS_CFG_CHANGED |
5722			       MAC_STATUS_RCVD_CFG);
5723		if (mac_status == (MAC_STATUS_PCS_SYNCED |
5724				   MAC_STATUS_SIGNAL_DET)) {
5725			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5726					    MAC_STATUS_CFG_CHANGED));
5727			return 0;
5728		}
5729	}
5730
5731	tw32_f(MAC_TX_AUTO_NEG, 0);
5732
5733	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5734	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5735	tw32_f(MAC_MODE, tp->mac_mode);
5736	udelay(40);
5737
5738	if (tp->phy_id == TG3_PHY_ID_BCM8002)
5739		tg3_init_bcm8002(tp);
5740
5741	/* Enable link change event even when serdes polling.  */
5742	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5743	udelay(40);
5744
5745	current_link_up = false;
5746	tp->link_config.rmt_adv = 0;
5747	mac_status = tr32(MAC_STATUS);
5748
5749	if (tg3_flag(tp, HW_AUTONEG))
5750		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5751	else
5752		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5753
5754	tp->napi[0].hw_status->status =
5755		(SD_STATUS_UPDATED |
5756		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5757
5758	for (i = 0; i < 100; i++) {
5759		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5760				    MAC_STATUS_CFG_CHANGED));
5761		udelay(5);
5762		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5763					 MAC_STATUS_CFG_CHANGED |
5764					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5765			break;
5766	}
5767
5768	mac_status = tr32(MAC_STATUS);
5769	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5770		current_link_up = false;
5771		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5772		    tp->serdes_counter == 0) {
5773			tw32_f(MAC_MODE, (tp->mac_mode |
5774					  MAC_MODE_SEND_CONFIGS));
5775			udelay(1);
5776			tw32_f(MAC_MODE, tp->mac_mode);
5777		}
5778	}
5779
5780	if (current_link_up) {
5781		tp->link_config.active_speed = SPEED_1000;
5782		tp->link_config.active_duplex = DUPLEX_FULL;
5783		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5784				    LED_CTRL_LNKLED_OVERRIDE |
5785				    LED_CTRL_1000MBPS_ON));
5786	} else {
5787		tp->link_config.active_speed = SPEED_UNKNOWN;
5788		tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5789		tw32(MAC_LED_CTRL, (tp->led_ctrl |
5790				    LED_CTRL_LNKLED_OVERRIDE |
5791				    LED_CTRL_TRAFFIC_OVERRIDE));
5792	}
5793
5794	if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5795		u32 now_pause_cfg = tp->link_config.active_flowctrl;
5796		if (orig_pause_cfg != now_pause_cfg ||
5797		    orig_active_speed != tp->link_config.active_speed ||
5798		    orig_active_duplex != tp->link_config.active_duplex)
5799			tg3_link_report(tp);
5800	}
5801
5802	return 0;
5803}
5804
5805static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5806{
5807	int err = 0;
5808	u32 bmsr, bmcr;
5809	u16 current_speed = SPEED_UNKNOWN;
5810	u8 current_duplex = DUPLEX_UNKNOWN;
5811	bool current_link_up = false;
5812	u32 local_adv, remote_adv, sgsr;
5813
5814	if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5815	     tg3_asic_rev(tp) == ASIC_REV_5720) &&
5816	     !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5817	     (sgsr & SERDES_TG3_SGMII_MODE)) {
5818
5819		if (force_reset)
5820			tg3_phy_reset(tp);
5821
5822		tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5823
5824		if (!(sgsr & SERDES_TG3_LINK_UP)) {
5825			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5826		} else {
5827			current_link_up = true;
5828			if (sgsr & SERDES_TG3_SPEED_1000) {
5829				current_speed = SPEED_1000;
5830				tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5831			} else if (sgsr & SERDES_TG3_SPEED_100) {
5832				current_speed = SPEED_100;
5833				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5834			} else {
5835				current_speed = SPEED_10;
5836				tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5837			}
5838
5839			if (sgsr & SERDES_TG3_FULL_DUPLEX)
5840				current_duplex = DUPLEX_FULL;
5841			else
5842				current_duplex = DUPLEX_HALF;
5843		}
5844
5845		tw32_f(MAC_MODE, tp->mac_mode);
5846		udelay(40);
5847
5848		tg3_clear_mac_status(tp);
5849
5850		goto fiber_setup_done;
5851	}
5852
5853	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5854	tw32_f(MAC_MODE, tp->mac_mode);
5855	udelay(40);
5856
5857	tg3_clear_mac_status(tp);
5858
5859	if (force_reset)
5860		tg3_phy_reset(tp);
5861
5862	tp->link_config.rmt_adv = 0;
5863
5864	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5865	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5866	if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5867		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5868			bmsr |= BMSR_LSTATUS;
5869		else
5870			bmsr &= ~BMSR_LSTATUS;
5871	}
5872
5873	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5874
5875	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5876	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5877		/* do nothing, just check for link up at the end */
5878	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5879		u32 adv, newadv;
5880
5881		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5882		newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5883				 ADVERTISE_1000XPAUSE |
5884				 ADVERTISE_1000XPSE_ASYM |
5885				 ADVERTISE_SLCT);
5886
5887		newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5888		newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5889
5890		if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5891			tg3_writephy(tp, MII_ADVERTISE, newadv);
5892			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5893			tg3_writephy(tp, MII_BMCR, bmcr);
5894
5895			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5896			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5897			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5898
5899			return err;
5900		}
5901	} else {
5902		u32 new_bmcr;
5903
5904		bmcr &= ~BMCR_SPEED1000;
5905		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5906
5907		if (tp->link_config.duplex == DUPLEX_FULL)
5908			new_bmcr |= BMCR_FULLDPLX;
5909
5910		if (new_bmcr != bmcr) {
5911			/* BMCR_SPEED1000 is a reserved bit that needs
5912			 * to be set on write.
5913			 */
5914			new_bmcr |= BMCR_SPEED1000;
5915
5916			/* Force a linkdown */
5917			if (tp->link_up) {
5918				u32 adv;
5919
5920				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5921				adv &= ~(ADVERTISE_1000XFULL |
5922					 ADVERTISE_1000XHALF |
5923					 ADVERTISE_SLCT);
5924				tg3_writephy(tp, MII_ADVERTISE, adv);
5925				tg3_writephy(tp, MII_BMCR, bmcr |
5926							   BMCR_ANRESTART |
5927							   BMCR_ANENABLE);
5928				udelay(10);
5929				tg3_carrier_off(tp);
5930			}
5931			tg3_writephy(tp, MII_BMCR, new_bmcr);
5932			bmcr = new_bmcr;
5933			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5934			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5935			if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5936				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5937					bmsr |= BMSR_LSTATUS;
5938				else
5939					bmsr &= ~BMSR_LSTATUS;
5940			}
5941			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5942		}
5943	}
5944
5945	if (bmsr & BMSR_LSTATUS) {
5946		current_speed = SPEED_1000;
5947		current_link_up = true;
5948		if (bmcr & BMCR_FULLDPLX)
5949			current_duplex = DUPLEX_FULL;
5950		else
5951			current_duplex = DUPLEX_HALF;
5952
5953		local_adv = 0;
5954		remote_adv = 0;
5955
5956		if (bmcr & BMCR_ANENABLE) {
5957			u32 common;
5958
5959			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5960			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5961			common = local_adv & remote_adv;
5962			if (common & (ADVERTISE_1000XHALF |
5963				      ADVERTISE_1000XFULL)) {
5964				if (common & ADVERTISE_1000XFULL)
5965					current_duplex = DUPLEX_FULL;
5966				else
5967					current_duplex = DUPLEX_HALF;
5968
5969				tp->link_config.rmt_adv =
5970					   mii_adv_to_ethtool_adv_x(remote_adv);
5971			} else if (!tg3_flag(tp, 5780_CLASS)) {
5972				/* Link is up via parallel detect */
5973			} else {
5974				current_link_up = false;
5975			}
5976		}
5977	}
5978
5979fiber_setup_done:
5980	if (current_link_up && current_duplex == DUPLEX_FULL)
5981		tg3_setup_flow_control(tp, local_adv, remote_adv);
5982
5983	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5984	if (tp->link_config.active_duplex == DUPLEX_HALF)
5985		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5986
5987	tw32_f(MAC_MODE, tp->mac_mode);
5988	udelay(40);
5989
5990	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5991
5992	tp->link_config.active_speed = current_speed;
5993	tp->link_config.active_duplex = current_duplex;
5994
5995	tg3_test_and_report_link_chg(tp, current_link_up);
5996	return err;
5997}
5998
5999static void tg3_serdes_parallel_detect(struct tg3 *tp)
6000{
6001	if (tp->serdes_counter) {
6002		/* Give autoneg time to complete. */
6003		tp->serdes_counter--;
6004		return;
6005	}
6006
6007	if (!tp->link_up &&
6008	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
6009		u32 bmcr;
6010
6011		tg3_readphy(tp, MII_BMCR, &bmcr);
6012		if (bmcr & BMCR_ANENABLE) {
6013			u32 phy1, phy2;
6014
6015			/* Select shadow register 0x1f */
6016			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
6017			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
6018
6019			/* Select expansion interrupt status register */
6020			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6021					 MII_TG3_DSP_EXP1_INT_STAT);
6022			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6023			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6024
6025			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
6026				/* We have signal detect and not receiving
6027				 * config code words, link is up by parallel
6028				 * detection.
6029				 */
6030
6031				bmcr &= ~BMCR_ANENABLE;
6032				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
6033				tg3_writephy(tp, MII_BMCR, bmcr);
6034				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
6035			}
6036		}
6037	} else if (tp->link_up &&
6038		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
6039		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
6040		u32 phy2;
6041
6042		/* Select expansion interrupt status register */
6043		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
6044				 MII_TG3_DSP_EXP1_INT_STAT);
6045		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
6046		if (phy2 & 0x20) {
6047			u32 bmcr;
6048
6049			/* Config code words received, turn on autoneg. */
6050			tg3_readphy(tp, MII_BMCR, &bmcr);
6051			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
6052
6053			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
6054
6055		}
6056	}
6057}
6058
6059static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
6060{
6061	u32 val;
6062	int err;
6063
6064	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
6065		err = tg3_setup_fiber_phy(tp, force_reset);
6066	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
6067		err = tg3_setup_fiber_mii_phy(tp, force_reset);
6068	else
6069		err = tg3_setup_copper_phy(tp, force_reset);
6070
6071	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
6072		u32 scale;
6073
6074		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
6075		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
6076			scale = 65;
6077		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
6078			scale = 6;
6079		else
6080			scale = 12;
6081
6082		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
6083		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
6084		tw32(GRC_MISC_CFG, val);
6085	}
6086
6087	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
6088	      (6 << TX_LENGTHS_IPG_SHIFT);
6089	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
6090	    tg3_asic_rev(tp) == ASIC_REV_5762)
6091		val |= tr32(MAC_TX_LENGTHS) &
6092		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
6093			TX_LENGTHS_CNT_DWN_VAL_MSK);
6094
6095	if (tp->link_config.active_speed == SPEED_1000 &&
6096	    tp->link_config.active_duplex == DUPLEX_HALF)
6097		tw32(MAC_TX_LENGTHS, val |
6098		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6099	else
6100		tw32(MAC_TX_LENGTHS, val |
6101		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6102
6103	if (!tg3_flag(tp, 5705_PLUS)) {
6104		if (tp->link_up) {
6105			tw32(HOSTCC_STAT_COAL_TICKS,
6106			     tp->coal.stats_block_coalesce_usecs);
6107		} else {
6108			tw32(HOSTCC_STAT_COAL_TICKS, 0);
6109		}
6110	}
6111
6112	if (tg3_flag(tp, ASPM_WORKAROUND)) {
6113		val = tr32(PCIE_PWR_MGMT_THRESH);
6114		if (!tp->link_up)
6115			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6116			      tp->pwrmgmt_thresh;
6117		else
6118			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6119		tw32(PCIE_PWR_MGMT_THRESH, val);
6120	}
6121
6122	return err;
6123}
6124
6125/* tp->lock must be held */
6126static u64 tg3_refclk_read(struct tg3 *tp)
6127{
6128	u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6129	return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6130}
6131
6132/* tp->lock must be held */
6133static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6134{
6135	u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6136
6137	tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
6138	tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6139	tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6140	tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
6141}
6142
6143static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6144static inline void tg3_full_unlock(struct tg3 *tp);
6145static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6146{
6147	struct tg3 *tp = netdev_priv(dev);
6148
6149	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6150				SOF_TIMESTAMPING_RX_SOFTWARE |
6151				SOF_TIMESTAMPING_SOFTWARE;
6152
6153	if (tg3_flag(tp, PTP_CAPABLE)) {
6154		info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6155					SOF_TIMESTAMPING_RX_HARDWARE |
6156					SOF_TIMESTAMPING_RAW_HARDWARE;
6157	}
6158
6159	if (tp->ptp_clock)
6160		info->phc_index = ptp_clock_index(tp->ptp_clock);
6161	else
6162		info->phc_index = -1;
6163
6164	info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6165
6166	info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6167			   (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6168			   (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6169			   (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6170	return 0;
6171}
6172
6173static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6174{
6175	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6176	bool neg_adj = false;
6177	u32 correction = 0;
6178
6179	if (ppb < 0) {
6180		neg_adj = true;
6181		ppb = -ppb;
6182	}
6183
6184	/* Frequency adjustment is performed using hardware with a 24 bit
6185	 * accumulator and a programmable correction value. On each clk, the
6186	 * correction value gets added to the accumulator and when it
6187	 * overflows, the time counter is incremented/decremented.
6188	 *
6189	 * So conversion from ppb to correction value is
6190	 *		ppb * (1 << 24) / 1000000000
6191	 */
6192	correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6193		     TG3_EAV_REF_CLK_CORRECT_MASK;
6194
6195	tg3_full_lock(tp, 0);
6196
6197	if (correction)
6198		tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6199		     TG3_EAV_REF_CLK_CORRECT_EN |
6200		     (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6201	else
6202		tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6203
6204	tg3_full_unlock(tp);
6205
6206	return 0;
6207}
6208
6209static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6210{
6211	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6212
6213	tg3_full_lock(tp, 0);
6214	tp->ptp_adjust += delta;
6215	tg3_full_unlock(tp);
6216
6217	return 0;
6218}
6219
6220static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
6221{
6222	u64 ns;
6223	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6224
6225	tg3_full_lock(tp, 0);
6226	ns = tg3_refclk_read(tp);
6227	ns += tp->ptp_adjust;
6228	tg3_full_unlock(tp);
6229
6230	*ts = ns_to_timespec64(ns);
6231
6232	return 0;
6233}
6234
6235static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6236			   const struct timespec64 *ts)
6237{
6238	u64 ns;
6239	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6240
6241	ns = timespec64_to_ns(ts);
6242
6243	tg3_full_lock(tp, 0);
6244	tg3_refclk_write(tp, ns);
6245	tp->ptp_adjust = 0;
6246	tg3_full_unlock(tp);
6247
6248	return 0;
6249}
6250
6251static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6252			  struct ptp_clock_request *rq, int on)
6253{
6254	struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6255	u32 clock_ctl;
6256	int rval = 0;
6257
6258	switch (rq->type) {
6259	case PTP_CLK_REQ_PEROUT:
6260		if (rq->perout.index != 0)
6261			return -EINVAL;
6262
6263		tg3_full_lock(tp, 0);
6264		clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
6265		clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
6266
6267		if (on) {
6268			u64 nsec;
6269
6270			nsec = rq->perout.start.sec * 1000000000ULL +
6271			       rq->perout.start.nsec;
6272
6273			if (rq->perout.period.sec || rq->perout.period.nsec) {
6274				netdev_warn(tp->dev,
6275					    "Device supports only a one-shot timesync output, period must be 0\n");
6276				rval = -EINVAL;
6277				goto err_out;
6278			}
6279
6280			if (nsec & (1ULL << 63)) {
6281				netdev_warn(tp->dev,
6282					    "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
6283				rval = -EINVAL;
6284				goto err_out;
6285			}
6286
6287			tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
6288			tw32(TG3_EAV_WATCHDOG0_MSB,
6289			     TG3_EAV_WATCHDOG0_EN |
6290			     ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
6291
6292			tw32(TG3_EAV_REF_CLCK_CTL,
6293			     clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
6294		} else {
6295			tw32(TG3_EAV_WATCHDOG0_MSB, 0);
6296			tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
6297		}
6298
6299err_out:
6300		tg3_full_unlock(tp);
6301		return rval;
6302
6303	default:
6304		break;
6305	}
6306
6307	return -EOPNOTSUPP;
6308}
6309
6310static const struct ptp_clock_info tg3_ptp_caps = {
6311	.owner		= THIS_MODULE,
6312	.name		= "tg3 clock",
6313	.max_adj	= 250000000,
6314	.n_alarm	= 0,
6315	.n_ext_ts	= 0,
6316	.n_per_out	= 1,
6317	.n_pins		= 0,
6318	.pps		= 0,
6319	.adjfreq	= tg3_ptp_adjfreq,
6320	.adjtime	= tg3_ptp_adjtime,
6321	.gettime64	= tg3_ptp_gettime,
6322	.settime64	= tg3_ptp_settime,
6323	.enable		= tg3_ptp_enable,
6324};
6325
6326static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6327				     struct skb_shared_hwtstamps *timestamp)
6328{
6329	memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6330	timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6331					   tp->ptp_adjust);
6332}
6333
6334/* tp->lock must be held */
6335static void tg3_ptp_init(struct tg3 *tp)
6336{
6337	if (!tg3_flag(tp, PTP_CAPABLE))
6338		return;
6339
6340	/* Initialize the hardware clock to the system time. */
6341	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6342	tp->ptp_adjust = 0;
6343	tp->ptp_info = tg3_ptp_caps;
6344}
6345
6346/* tp->lock must be held */
6347static void tg3_ptp_resume(struct tg3 *tp)
6348{
6349	if (!tg3_flag(tp, PTP_CAPABLE))
6350		return;
6351
6352	tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6353	tp->ptp_adjust = 0;
6354}
6355
6356static void tg3_ptp_fini(struct tg3 *tp)
6357{
6358	if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6359		return;
6360
6361	ptp_clock_unregister(tp->ptp_clock);
6362	tp->ptp_clock = NULL;
6363	tp->ptp_adjust = 0;
6364}
6365
6366static inline int tg3_irq_sync(struct tg3 *tp)
6367{
6368	return tp->irq_sync;
6369}
6370
6371static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6372{
6373	int i;
6374
6375	dst = (u32 *)((u8 *)dst + off);
6376	for (i = 0; i < len; i += sizeof(u32))
6377		*dst++ = tr32(off + i);
6378}
6379
6380static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6381{
6382	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6383	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6384	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6385	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6386	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6387	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6388	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6389	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6390	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6391	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6392	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6393	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6394	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6395	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6396	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6397	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6398	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6399	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6400	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6401
6402	if (tg3_flag(tp, SUPPORT_MSIX))
6403		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6404
6405	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6406	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6407	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6408	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6409	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6410	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6411	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6412	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6413
6414	if (!tg3_flag(tp, 5705_PLUS)) {
6415		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6416		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6417		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6418	}
6419
6420	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6421	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6422	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6423	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6424	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6425
6426	if (tg3_flag(tp, NVRAM))
6427		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6428}
6429
6430static void tg3_dump_state(struct tg3 *tp)
6431{
6432	int i;
6433	u32 *regs;
6434
6435	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6436	if (!regs)
6437		return;
6438
6439	if (tg3_flag(tp, PCI_EXPRESS)) {
6440		/* Read up to but not including private PCI registers */
6441		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6442			regs[i / sizeof(u32)] = tr32(i);
6443	} else
6444		tg3_dump_legacy_regs(tp, regs);
6445
6446	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6447		if (!regs[i + 0] && !regs[i + 1] &&
6448		    !regs[i + 2] && !regs[i + 3])
6449			continue;
6450
6451		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6452			   i * 4,
6453			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6454	}
6455
6456	kfree(regs);
6457
6458	for (i = 0; i < tp->irq_cnt; i++) {
6459		struct tg3_napi *tnapi = &tp->napi[i];
6460
6461		/* SW status block */
6462		netdev_err(tp->dev,
6463			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6464			   i,
6465			   tnapi->hw_status->status,
6466			   tnapi->hw_status->status_tag,
6467			   tnapi->hw_status->rx_jumbo_consumer,
6468			   tnapi->hw_status->rx_consumer,
6469			   tnapi->hw_status->rx_mini_consumer,
6470			   tnapi->hw_status->idx[0].rx_producer,
6471			   tnapi->hw_status->idx[0].tx_consumer);
6472
6473		netdev_err(tp->dev,
6474		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6475			   i,
6476			   tnapi->last_tag, tnapi->last_irq_tag,
6477			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6478			   tnapi->rx_rcb_ptr,
6479			   tnapi->prodring.rx_std_prod_idx,
6480			   tnapi->prodring.rx_std_cons_idx,
6481			   tnapi->prodring.rx_jmb_prod_idx,
6482			   tnapi->prodring.rx_jmb_cons_idx);
6483	}
6484}
6485
6486/* This is called whenever we suspect that the system chipset is re-
6487 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6488 * is bogus tx completions. We try to recover by setting the
6489 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6490 * in the workqueue.
6491 */
6492static void tg3_tx_recover(struct tg3 *tp)
6493{
6494	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6495	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
6496
6497	netdev_warn(tp->dev,
6498		    "The system may be re-ordering memory-mapped I/O "
6499		    "cycles to the network device, attempting to recover. "
6500		    "Please report the problem to the driver maintainer "
6501		    "and include system chipset information.\n");
6502
6503	tg3_flag_set(tp, TX_RECOVERY_PENDING);
6504}
6505
6506static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6507{
6508	/* Tell compiler to fetch tx indices from memory. */
6509	barrier();
6510	return tnapi->tx_pending -
6511	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6512}
6513
6514/* Tigon3 never reports partial packet sends.  So we do not
6515 * need special logic to handle SKBs that have not had all
6516 * of their frags sent yet, like SunGEM does.
6517 */
6518static void tg3_tx(struct tg3_napi *tnapi)
6519{
6520	struct tg3 *tp = tnapi->tp;
6521	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6522	u32 sw_idx = tnapi->tx_cons;
6523	struct netdev_queue *txq;
6524	int index = tnapi - tp->napi;
6525	unsigned int pkts_compl = 0, bytes_compl = 0;
6526
6527	if (tg3_flag(tp, ENABLE_TSS))
6528		index--;
6529
6530	txq = netdev_get_tx_queue(tp->dev, index);
6531
6532	while (sw_idx != hw_idx) {
6533		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6534		struct sk_buff *skb = ri->skb;
6535		int i, tx_bug = 0;
6536
6537		if (unlikely(skb == NULL)) {
6538			tg3_tx_recover(tp);
6539			return;
6540		}
6541
6542		if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6543			struct skb_shared_hwtstamps timestamp;
6544			u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6545			hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6546
6547			tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6548
6549			skb_tstamp_tx(skb, &timestamp);
6550		}
6551
6552		pci_unmap_single(tp->pdev,
6553				 dma_unmap_addr(ri, mapping),
6554				 skb_headlen(skb),
6555				 PCI_DMA_TODEVICE);
6556
6557		ri->skb = NULL;
6558
6559		while (ri->fragmented) {
6560			ri->fragmented = false;
6561			sw_idx = NEXT_TX(sw_idx);
6562			ri = &tnapi->tx_buffers[sw_idx];
6563		}
6564
6565		sw_idx = NEXT_TX(sw_idx);
6566
6567		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6568			ri = &tnapi->tx_buffers[sw_idx];
6569			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6570				tx_bug = 1;
6571
6572			pci_unmap_page(tp->pdev,
6573				       dma_unmap_addr(ri, mapping),
6574				       skb_frag_size(&skb_shinfo(skb)->frags[i]),
6575				       PCI_DMA_TODEVICE);
6576
6577			while (ri->fragmented) {
6578				ri->fragmented = false;
6579				sw_idx = NEXT_TX(sw_idx);
6580				ri = &tnapi->tx_buffers[sw_idx];
6581			}
6582
6583			sw_idx = NEXT_TX(sw_idx);
6584		}
6585
6586		pkts_compl++;
6587		bytes_compl += skb->len;
6588
6589		dev_kfree_skb_any(skb);
6590
6591		if (unlikely(tx_bug)) {
6592			tg3_tx_recover(tp);
6593			return;
6594		}
6595	}
6596
6597	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6598
6599	tnapi->tx_cons = sw_idx;
6600
6601	/* Need to make the tx_cons update visible to tg3_start_xmit()
6602	 * before checking for netif_queue_stopped().  Without the
6603	 * memory barrier, there is a small possibility that tg3_start_xmit()
6604	 * will miss it and cause the queue to be stopped forever.
6605	 */
6606	smp_mb();
6607
6608	if (unlikely(netif_tx_queue_stopped(txq) &&
6609		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6610		__netif_tx_lock(txq, smp_processor_id());
6611		if (netif_tx_queue_stopped(txq) &&
6612		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6613			netif_tx_wake_queue(txq);
6614		__netif_tx_unlock(txq);
6615	}
6616}
6617
6618static void tg3_frag_free(bool is_frag, void *data)
6619{
6620	if (is_frag)
6621		put_page(virt_to_head_page(data));
6622	else
6623		kfree(data);
6624}
6625
6626static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6627{
6628	unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6629		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6630
6631	if (!ri->data)
6632		return;
6633
6634	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6635			 map_sz, PCI_DMA_FROMDEVICE);
6636	tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6637	ri->data = NULL;
6638}
6639
6640
6641/* Returns size of skb allocated or < 0 on error.
6642 *
6643 * We only need to fill in the address because the other members
6644 * of the RX descriptor are invariant, see tg3_init_rings.
6645 *
6646 * Note the purposeful assymetry of cpu vs. chip accesses.  For
6647 * posting buffers we only dirty the first cache line of the RX
6648 * descriptor (containing the address).  Whereas for the RX status
6649 * buffers the cpu only reads the last cacheline of the RX descriptor
6650 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6651 */
6652static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6653			     u32 opaque_key, u32 dest_idx_unmasked,
6654			     unsigned int *frag_size)
6655{
6656	struct tg3_rx_buffer_desc *desc;
6657	struct ring_info *map;
6658	u8 *data;
6659	dma_addr_t mapping;
6660	int skb_size, data_size, dest_idx;
6661
6662	switch (opaque_key) {
6663	case RXD_OPAQUE_RING_STD:
6664		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6665		desc = &tpr->rx_std[dest_idx];
6666		map = &tpr->rx_std_buffers[dest_idx];
6667		data_size = tp->rx_pkt_map_sz;
6668		break;
6669
6670	case RXD_OPAQUE_RING_JUMBO:
6671		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6672		desc = &tpr->rx_jmb[dest_idx].std;
6673		map = &tpr->rx_jmb_buffers[dest_idx];
6674		data_size = TG3_RX_JMB_MAP_SZ;
6675		break;
6676
6677	default:
6678		return -EINVAL;
6679	}
6680
6681	/* Do not overwrite any of the map or rp information
6682	 * until we are sure we can commit to a new buffer.
6683	 *
6684	 * Callers depend upon this behavior and assume that
6685	 * we leave everything unchanged if we fail.
6686	 */
6687	skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6688		   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6689	if (skb_size <= PAGE_SIZE) {
6690		data = netdev_alloc_frag(skb_size);
6691		*frag_size = skb_size;
6692	} else {
6693		data = kmalloc(skb_size, GFP_ATOMIC);
6694		*frag_size = 0;
6695	}
6696	if (!data)
6697		return -ENOMEM;
6698
6699	mapping = pci_map_single(tp->pdev,
6700				 data + TG3_RX_OFFSET(tp),
6701				 data_size,
6702				 PCI_DMA_FROMDEVICE);
6703	if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6704		tg3_frag_free(skb_size <= PAGE_SIZE, data);
6705		return -EIO;
6706	}
6707
6708	map->data = data;
6709	dma_unmap_addr_set(map, mapping, mapping);
6710
6711	desc->addr_hi = ((u64)mapping >> 32);
6712	desc->addr_lo = ((u64)mapping & 0xffffffff);
6713
6714	return data_size;
6715}
6716
6717/* We only need to move over in the address because the other
6718 * members of the RX descriptor are invariant.  See notes above
6719 * tg3_alloc_rx_data for full details.
6720 */
6721static void tg3_recycle_rx(struct tg3_napi *tnapi,
6722			   struct tg3_rx_prodring_set *dpr,
6723			   u32 opaque_key, int src_idx,
6724			   u32 dest_idx_unmasked)
6725{
6726	struct tg3 *tp = tnapi->tp;
6727	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6728	struct ring_info *src_map, *dest_map;
6729	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6730	int dest_idx;
6731
6732	switch (opaque_key) {
6733	case RXD_OPAQUE_RING_STD:
6734		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6735		dest_desc = &dpr->rx_std[dest_idx];
6736		dest_map = &dpr->rx_std_buffers[dest_idx];
6737		src_desc = &spr->rx_std[src_idx];
6738		src_map = &spr->rx_std_buffers[src_idx];
6739		break;
6740
6741	case RXD_OPAQUE_RING_JUMBO:
6742		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6743		dest_desc = &dpr->rx_jmb[dest_idx].std;
6744		dest_map = &dpr->rx_jmb_buffers[dest_idx];
6745		src_desc = &spr->rx_jmb[src_idx].std;
6746		src_map = &spr->rx_jmb_buffers[src_idx];
6747		break;
6748
6749	default:
6750		return;
6751	}
6752
6753	dest_map->data = src_map->data;
6754	dma_unmap_addr_set(dest_map, mapping,
6755			   dma_unmap_addr(src_map, mapping));
6756	dest_desc->addr_hi = src_desc->addr_hi;
6757	dest_desc->addr_lo = src_desc->addr_lo;
6758
6759	/* Ensure that the update to the skb happens after the physical
6760	 * addresses have been transferred to the new BD location.
6761	 */
6762	smp_wmb();
6763
6764	src_map->data = NULL;
6765}
6766
6767/* The RX ring scheme is composed of multiple rings which post fresh
6768 * buffers to the chip, and one special ring the chip uses to report
6769 * status back to the host.
6770 *
6771 * The special ring reports the status of received packets to the
6772 * host.  The chip does not write into the original descriptor the
6773 * RX buffer was obtained from.  The chip simply takes the original
6774 * descriptor as provided by the host, updates the status and length
6775 * field, then writes this into the next status ring entry.
6776 *
6777 * Each ring the host uses to post buffers to the chip is described
6778 * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6779 * it is first placed into the on-chip ram.  When the packet's length
6780 * is known, it walks down the TG3_BDINFO entries to select the ring.
6781 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6782 * which is within the range of the new packet's length is chosen.
6783 *
6784 * The "separate ring for rx status" scheme may sound queer, but it makes
6785 * sense from a cache coherency perspective.  If only the host writes
6786 * to the buffer post rings, and only the chip writes to the rx status
6787 * rings, then cache lines never move beyond shared-modified state.
6788 * If both the host and chip were to write into the same ring, cache line
6789 * eviction could occur since both entities want it in an exclusive state.
6790 */
6791static int tg3_rx(struct tg3_napi *tnapi, int budget)
6792{
6793	struct tg3 *tp = tnapi->tp;
6794	u32 work_mask, rx_std_posted = 0;
6795	u32 std_prod_idx, jmb_prod_idx;
6796	u32 sw_idx = tnapi->rx_rcb_ptr;
6797	u16 hw_idx;
6798	int received;
6799	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6800
6801	hw_idx = *(tnapi->rx_rcb_prod_idx);
6802	/*
6803	 * We need to order the read of hw_idx and the read of
6804	 * the opaque cookie.
6805	 */
6806	rmb();
6807	work_mask = 0;
6808	received = 0;
6809	std_prod_idx = tpr->rx_std_prod_idx;
6810	jmb_prod_idx = tpr->rx_jmb_prod_idx;
6811	while (sw_idx != hw_idx && budget > 0) {
6812		struct ring_info *ri;
6813		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6814		unsigned int len;
6815		struct sk_buff *skb;
6816		dma_addr_t dma_addr;
6817		u32 opaque_key, desc_idx, *post_ptr;
6818		u8 *data;
6819		u64 tstamp = 0;
6820
6821		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6822		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6823		if (opaque_key == RXD_OPAQUE_RING_STD) {
6824			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6825			dma_addr = dma_unmap_addr(ri, mapping);
6826			data = ri->data;
6827			post_ptr = &std_prod_idx;
6828			rx_std_posted++;
6829		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6830			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6831			dma_addr = dma_unmap_addr(ri, mapping);
6832			data = ri->data;
6833			post_ptr = &jmb_prod_idx;
6834		} else
6835			goto next_pkt_nopost;
6836
6837		work_mask |= opaque_key;
6838
6839		if (desc->err_vlan & RXD_ERR_MASK) {
6840		drop_it:
6841			tg3_recycle_rx(tnapi, tpr, opaque_key,
6842				       desc_idx, *post_ptr);
6843		drop_it_no_recycle:
6844			/* Other statistics kept track of by card. */
6845			tp->rx_dropped++;
6846			goto next_pkt;
6847		}
6848
6849		prefetch(data + TG3_RX_OFFSET(tp));
6850		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6851		      ETH_FCS_LEN;
6852
6853		if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6854		     RXD_FLAG_PTPSTAT_PTPV1 ||
6855		    (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6856		     RXD_FLAG_PTPSTAT_PTPV2) {
6857			tstamp = tr32(TG3_RX_TSTAMP_LSB);
6858			tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6859		}
6860
6861		if (len > TG3_RX_COPY_THRESH(tp)) {
6862			int skb_size;
6863			unsigned int frag_size;
6864
6865			skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6866						    *post_ptr, &frag_size);
6867			if (skb_size < 0)
6868				goto drop_it;
6869
6870			pci_unmap_single(tp->pdev, dma_addr, skb_size,
6871					 PCI_DMA_FROMDEVICE);
6872
6873			/* Ensure that the update to the data happens
6874			 * after the usage of the old DMA mapping.
6875			 */
6876			smp_wmb();
6877
6878			ri->data = NULL;
6879
6880			skb = build_skb(data, frag_size);
6881			if (!skb) {
6882				tg3_frag_free(frag_size != 0, data);
6883				goto drop_it_no_recycle;
6884			}
6885			skb_reserve(skb, TG3_RX_OFFSET(tp));
6886		} else {
6887			tg3_recycle_rx(tnapi, tpr, opaque_key,
6888				       desc_idx, *post_ptr);
6889
6890			skb = netdev_alloc_skb(tp->dev,
6891					       len + TG3_RAW_IP_ALIGN);
6892			if (skb == NULL)
6893				goto drop_it_no_recycle;
6894
6895			skb_reserve(skb, TG3_RAW_IP_ALIGN);
6896			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6897			memcpy(skb->data,
6898			       data + TG3_RX_OFFSET(tp),
6899			       len);
6900			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6901		}
6902
6903		skb_put(skb, len);
6904		if (tstamp)
6905			tg3_hwclock_to_timestamp(tp, tstamp,
6906						 skb_hwtstamps(skb));
6907
6908		if ((tp->dev->features & NETIF_F_RXCSUM) &&
6909		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6910		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6911		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
6912			skb->ip_summed = CHECKSUM_UNNECESSARY;
6913		else
6914			skb_checksum_none_assert(skb);
6915
6916		skb->protocol = eth_type_trans(skb, tp->dev);
6917
6918		if (len > (tp->dev->mtu + ETH_HLEN) &&
6919		    skb->protocol != htons(ETH_P_8021Q) &&
6920		    skb->protocol != htons(ETH_P_8021AD)) {
6921			dev_kfree_skb_any(skb);
6922			goto drop_it_no_recycle;
6923		}
6924
6925		if (desc->type_flags & RXD_FLAG_VLAN &&
6926		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6927			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6928					       desc->err_vlan & RXD_VLAN_MASK);
6929
6930		napi_gro_receive(&tnapi->napi, skb);
6931
6932		received++;
6933		budget--;
6934
6935next_pkt:
6936		(*post_ptr)++;
6937
6938		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6939			tpr->rx_std_prod_idx = std_prod_idx &
6940					       tp->rx_std_ring_mask;
6941			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6942				     tpr->rx_std_prod_idx);
6943			work_mask &= ~RXD_OPAQUE_RING_STD;
6944			rx_std_posted = 0;
6945		}
6946next_pkt_nopost:
6947		sw_idx++;
6948		sw_idx &= tp->rx_ret_ring_mask;
6949
6950		/* Refresh hw_idx to see if there is new work */
6951		if (sw_idx == hw_idx) {
6952			hw_idx = *(tnapi->rx_rcb_prod_idx);
6953			rmb();
6954		}
6955	}
6956
6957	/* ACK the status ring. */
6958	tnapi->rx_rcb_ptr = sw_idx;
6959	tw32_rx_mbox(tnapi->consmbox, sw_idx);
6960
6961	/* Refill RX ring(s). */
6962	if (!tg3_flag(tp, ENABLE_RSS)) {
6963		/* Sync BD data before updating mailbox */
6964		wmb();
6965
6966		if (work_mask & RXD_OPAQUE_RING_STD) {
6967			tpr->rx_std_prod_idx = std_prod_idx &
6968					       tp->rx_std_ring_mask;
6969			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6970				     tpr->rx_std_prod_idx);
6971		}
6972		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6973			tpr->rx_jmb_prod_idx = jmb_prod_idx &
6974					       tp->rx_jmb_ring_mask;
6975			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6976				     tpr->rx_jmb_prod_idx);
6977		}
6978		mmiowb();
6979	} else if (work_mask) {
6980		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6981		 * updated before the producer indices can be updated.
6982		 */
6983		smp_wmb();
6984
6985		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6986		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6987
6988		if (tnapi != &tp->napi[1]) {
6989			tp->rx_refill = true;
6990			napi_schedule(&tp->napi[1].napi);
6991		}
6992	}
6993
6994	return received;
6995}
6996
6997static void tg3_poll_link(struct tg3 *tp)
6998{
6999	/* handle link change and other phy events */
7000	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
7001		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
7002
7003		if (sblk->status & SD_STATUS_LINK_CHG) {
7004			sblk->status = SD_STATUS_UPDATED |
7005				       (sblk->status & ~SD_STATUS_LINK_CHG);
7006			spin_lock(&tp->lock);
7007			if (tg3_flag(tp, USE_PHYLIB)) {
7008				tw32_f(MAC_STATUS,
7009				     (MAC_STATUS_SYNC_CHANGED |
7010				      MAC_STATUS_CFG_CHANGED |
7011				      MAC_STATUS_MI_COMPLETION |
7012				      MAC_STATUS_LNKSTATE_CHANGED));
7013				udelay(40);
7014			} else
7015				tg3_setup_phy(tp, false);
7016			spin_unlock(&tp->lock);
7017		}
7018	}
7019}
7020
7021static int tg3_rx_prodring_xfer(struct tg3 *tp,
7022				struct tg3_rx_prodring_set *dpr,
7023				struct tg3_rx_prodring_set *spr)
7024{
7025	u32 si, di, cpycnt, src_prod_idx;
7026	int i, err = 0;
7027
7028	while (1) {
7029		src_prod_idx = spr->rx_std_prod_idx;
7030
7031		/* Make sure updates to the rx_std_buffers[] entries and the
7032		 * standard producer index are seen in the correct order.
7033		 */
7034		smp_rmb();
7035
7036		if (spr->rx_std_cons_idx == src_prod_idx)
7037			break;
7038
7039		if (spr->rx_std_cons_idx < src_prod_idx)
7040			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
7041		else
7042			cpycnt = tp->rx_std_ring_mask + 1 -
7043				 spr->rx_std_cons_idx;
7044
7045		cpycnt = min(cpycnt,
7046			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
7047
7048		si = spr->rx_std_cons_idx;
7049		di = dpr->rx_std_prod_idx;
7050
7051		for (i = di; i < di + cpycnt; i++) {
7052			if (dpr->rx_std_buffers[i].data) {
7053				cpycnt = i - di;
7054				err = -ENOSPC;
7055				break;
7056			}
7057		}
7058
7059		if (!cpycnt)
7060			break;
7061
7062		/* Ensure that updates to the rx_std_buffers ring and the
7063		 * shadowed hardware producer ring from tg3_recycle_skb() are
7064		 * ordered correctly WRT the skb check above.
7065		 */
7066		smp_rmb();
7067
7068		memcpy(&dpr->rx_std_buffers[di],
7069		       &spr->rx_std_buffers[si],
7070		       cpycnt * sizeof(struct ring_info));
7071
7072		for (i = 0; i < cpycnt; i++, di++, si++) {
7073			struct tg3_rx_buffer_desc *sbd, *dbd;
7074			sbd = &spr->rx_std[si];
7075			dbd = &dpr->rx_std[di];
7076			dbd->addr_hi = sbd->addr_hi;
7077			dbd->addr_lo = sbd->addr_lo;
7078		}
7079
7080		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
7081				       tp->rx_std_ring_mask;
7082		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
7083				       tp->rx_std_ring_mask;
7084	}
7085
7086	while (1) {
7087		src_prod_idx = spr->rx_jmb_prod_idx;
7088
7089		/* Make sure updates to the rx_jmb_buffers[] entries and
7090		 * the jumbo producer index are seen in the correct order.
7091		 */
7092		smp_rmb();
7093
7094		if (spr->rx_jmb_cons_idx == src_prod_idx)
7095			break;
7096
7097		if (spr->rx_jmb_cons_idx < src_prod_idx)
7098			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
7099		else
7100			cpycnt = tp->rx_jmb_ring_mask + 1 -
7101				 spr->rx_jmb_cons_idx;
7102
7103		cpycnt = min(cpycnt,
7104			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
7105
7106		si = spr->rx_jmb_cons_idx;
7107		di = dpr->rx_jmb_prod_idx;
7108
7109		for (i = di; i < di + cpycnt; i++) {
7110			if (dpr->rx_jmb_buffers[i].data) {
7111				cpycnt = i - di;
7112				err = -ENOSPC;
7113				break;
7114			}
7115		}
7116
7117		if (!cpycnt)
7118			break;
7119
7120		/* Ensure that updates to the rx_jmb_buffers ring and the
7121		 * shadowed hardware producer ring from tg3_recycle_skb() are
7122		 * ordered correctly WRT the skb check above.
7123		 */
7124		smp_rmb();
7125
7126		memcpy(&dpr->rx_jmb_buffers[di],
7127		       &spr->rx_jmb_buffers[si],
7128		       cpycnt * sizeof(struct ring_info));
7129
7130		for (i = 0; i < cpycnt; i++, di++, si++) {
7131			struct tg3_rx_buffer_desc *sbd, *dbd;
7132			sbd = &spr->rx_jmb[si].std;
7133			dbd = &dpr->rx_jmb[di].std;
7134			dbd->addr_hi = sbd->addr_hi;
7135			dbd->addr_lo = sbd->addr_lo;
7136		}
7137
7138		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
7139				       tp->rx_jmb_ring_mask;
7140		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
7141				       tp->rx_jmb_ring_mask;
7142	}
7143
7144	return err;
7145}
7146
7147static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7148{
7149	struct tg3 *tp = tnapi->tp;
7150
7151	/* run TX completion thread */
7152	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7153		tg3_tx(tnapi);
7154		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7155			return work_done;
7156	}
7157
7158	if (!tnapi->rx_rcb_prod_idx)
7159		return work_done;
7160
7161	/* run RX thread, within the bounds set by NAPI.
7162	 * All RX "locking" is done by ensuring outside
7163	 * code synchronizes with tg3->napi.poll()
7164	 */
7165	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7166		work_done += tg3_rx(tnapi, budget - work_done);
7167
7168	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7169		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7170		int i, err = 0;
7171		u32 std_prod_idx = dpr->rx_std_prod_idx;
7172		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7173
7174		tp->rx_refill = false;
7175		for (i = 1; i <= tp->rxq_cnt; i++)
7176			err |= tg3_rx_prodring_xfer(tp, dpr,
7177						    &tp->napi[i].prodring);
7178
7179		wmb();
7180
7181		if (std_prod_idx != dpr->rx_std_prod_idx)
7182			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7183				     dpr->rx_std_prod_idx);
7184
7185		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7186			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7187				     dpr->rx_jmb_prod_idx);
7188
7189		mmiowb();
7190
7191		if (err)
7192			tw32_f(HOSTCC_MODE, tp->coal_now);
7193	}
7194
7195	return work_done;
7196}
7197
7198static inline void tg3_reset_task_schedule(struct tg3 *tp)
7199{
7200	if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7201		schedule_work(&tp->reset_task);
7202}
7203
7204static inline void tg3_reset_task_cancel(struct tg3 *tp)
7205{
7206	cancel_work_sync(&tp->reset_task);
7207	tg3_flag_clear(tp, RESET_TASK_PENDING);
7208	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7209}
7210
7211static int tg3_poll_msix(struct napi_struct *napi, int budget)
7212{
7213	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7214	struct tg3 *tp = tnapi->tp;
7215	int work_done = 0;
7216	struct tg3_hw_status *sblk = tnapi->hw_status;
7217
7218	while (1) {
7219		work_done = tg3_poll_work(tnapi, work_done, budget);
7220
7221		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7222			goto tx_recovery;
7223
7224		if (unlikely(work_done >= budget))
7225			break;
7226
7227		/* tp->last_tag is used in tg3_int_reenable() below
7228		 * to tell the hw how much work has been processed,
7229		 * so we must read it before checking for more work.
7230		 */
7231		tnapi->last_tag = sblk->status_tag;
7232		tnapi->last_irq_tag = tnapi->last_tag;
7233		rmb();
7234
7235		/* check for RX/TX work to do */
7236		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7237			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7238
7239			/* This test here is not race free, but will reduce
7240			 * the number of interrupts by looping again.
7241			 */
7242			if (tnapi == &tp->napi[1] && tp->rx_refill)
7243				continue;
7244
7245			napi_complete_done(napi, work_done);
7246			/* Reenable interrupts. */
7247			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7248
7249			/* This test here is synchronized by napi_schedule()
7250			 * and napi_complete() to close the race condition.
7251			 */
7252			if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7253				tw32(HOSTCC_MODE, tp->coalesce_mode |
7254						  HOSTCC_MODE_ENABLE |
7255						  tnapi->coal_now);
7256			}
7257			mmiowb();
7258			break;
7259		}
7260	}
7261
7262	return work_done;
7263
7264tx_recovery:
7265	/* work_done is guaranteed to be less than budget. */
7266	napi_complete(napi);
7267	tg3_reset_task_schedule(tp);
7268	return work_done;
7269}
7270
7271static void tg3_process_error(struct tg3 *tp)
7272{
7273	u32 val;
7274	bool real_error = false;
7275
7276	if (tg3_flag(tp, ERROR_PROCESSED))
7277		return;
7278
7279	/* Check Flow Attention register */
7280	val = tr32(HOSTCC_FLOW_ATTN);
7281	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7282		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7283		real_error = true;
7284	}
7285
7286	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7287		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7288		real_error = true;
7289	}
7290
7291	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7292		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7293		real_error = true;
7294	}
7295
7296	if (!real_error)
7297		return;
7298
7299	tg3_dump_state(tp);
7300
7301	tg3_flag_set(tp, ERROR_PROCESSED);
7302	tg3_reset_task_schedule(tp);
7303}
7304
7305static int tg3_poll(struct napi_struct *napi, int budget)
7306{
7307	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7308	struct tg3 *tp = tnapi->tp;
7309	int work_done = 0;
7310	struct tg3_hw_status *sblk = tnapi->hw_status;
7311
7312	while (1) {
7313		if (sblk->status & SD_STATUS_ERROR)
7314			tg3_process_error(tp);
7315
7316		tg3_poll_link(tp);
7317
7318		work_done = tg3_poll_work(tnapi, work_done, budget);
7319
7320		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7321			goto tx_recovery;
7322
7323		if (unlikely(work_done >= budget))
7324			break;
7325
7326		if (tg3_flag(tp, TAGGED_STATUS)) {
7327			/* tp->last_tag is used in tg3_int_reenable() below
7328			 * to tell the hw how much work has been processed,
7329			 * so we must read it before checking for more work.
7330			 */
7331			tnapi->last_tag = sblk->status_tag;
7332			tnapi->last_irq_tag = tnapi->last_tag;
7333			rmb();
7334		} else
7335			sblk->status &= ~SD_STATUS_UPDATED;
7336
7337		if (likely(!tg3_has_work(tnapi))) {
7338			napi_complete_done(napi, work_done);
7339			tg3_int_reenable(tnapi);
7340			break;
7341		}
7342	}
7343
7344	return work_done;
7345
7346tx_recovery:
7347	/* work_done is guaranteed to be less than budget. */
7348	napi_complete(napi);
7349	tg3_reset_task_schedule(tp);
7350	return work_done;
7351}
7352
7353static void tg3_napi_disable(struct tg3 *tp)
7354{
7355	int i;
7356
7357	for (i = tp->irq_cnt - 1; i >= 0; i--)
7358		napi_disable(&tp->napi[i].napi);
7359}
7360
7361static void tg3_napi_enable(struct tg3 *tp)
7362{
7363	int i;
7364
7365	for (i = 0; i < tp->irq_cnt; i++)
7366		napi_enable(&tp->napi[i].napi);
7367}
7368
7369static void tg3_napi_init(struct tg3 *tp)
7370{
7371	int i;
7372
7373	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7374	for (i = 1; i < tp->irq_cnt; i++)
7375		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7376}
7377
7378static void tg3_napi_fini(struct tg3 *tp)
7379{
7380	int i;
7381
7382	for (i = 0; i < tp->irq_cnt; i++)
7383		netif_napi_del(&tp->napi[i].napi);
7384}
7385
7386static inline void tg3_netif_stop(struct tg3 *tp)
7387{
7388	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
7389	tg3_napi_disable(tp);
7390	netif_carrier_off(tp->dev);
7391	netif_tx_disable(tp->dev);
7392}
7393
7394/* tp->lock must be held */
7395static inline void tg3_netif_start(struct tg3 *tp)
7396{
7397	tg3_ptp_resume(tp);
7398
7399	/* NOTE: unconditional netif_tx_wake_all_queues is only
7400	 * appropriate so long as all callers are assured to
7401	 * have free tx slots (such as after tg3_init_hw)
7402	 */
7403	netif_tx_wake_all_queues(tp->dev);
7404
7405	if (tp->link_up)
7406		netif_carrier_on(tp->dev);
7407
7408	tg3_napi_enable(tp);
7409	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7410	tg3_enable_ints(tp);
7411}
7412
7413static void tg3_irq_quiesce(struct tg3 *tp)
7414	__releases(tp->lock)
7415	__acquires(tp->lock)
7416{
7417	int i;
7418
7419	BUG_ON(tp->irq_sync);
7420
7421	tp->irq_sync = 1;
7422	smp_mb();
7423
7424	spin_unlock_bh(&tp->lock);
7425
7426	for (i = 0; i < tp->irq_cnt; i++)
7427		synchronize_irq(tp->napi[i].irq_vec);
7428
7429	spin_lock_bh(&tp->lock);
7430}
7431
7432/* Fully shutdown all tg3 driver activity elsewhere in the system.
7433 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7434 * with as well.  Most of the time, this is not necessary except when
7435 * shutting down the device.
7436 */
7437static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7438{
7439	spin_lock_bh(&tp->lock);
7440	if (irq_sync)
7441		tg3_irq_quiesce(tp);
7442}
7443
7444static inline void tg3_full_unlock(struct tg3 *tp)
7445{
7446	spin_unlock_bh(&tp->lock);
7447}
7448
7449/* One-shot MSI handler - Chip automatically disables interrupt
7450 * after sending MSI so driver doesn't have to do it.
7451 */
7452static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7453{
7454	struct tg3_napi *tnapi = dev_id;
7455	struct tg3 *tp = tnapi->tp;
7456
7457	prefetch(tnapi->hw_status);
7458	if (tnapi->rx_rcb)
7459		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7460
7461	if (likely(!tg3_irq_sync(tp)))
7462		napi_schedule(&tnapi->napi);
7463
7464	return IRQ_HANDLED;
7465}
7466
7467/* MSI ISR - No need to check for interrupt sharing and no need to
7468 * flush status block and interrupt mailbox. PCI ordering rules
7469 * guarantee that MSI will arrive after the status block.
7470 */
7471static irqreturn_t tg3_msi(int irq, void *dev_id)
7472{
7473	struct tg3_napi *tnapi = dev_id;
7474	struct tg3 *tp = tnapi->tp;
7475
7476	prefetch(tnapi->hw_status);
7477	if (tnapi->rx_rcb)
7478		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7479	/*
7480	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7481	 * chip-internal interrupt pending events.
7482	 * Writing non-zero to intr-mbox-0 additional tells the
7483	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7484	 * event coalescing.
7485	 */
7486	tw32_mailbox(tnapi->int_mbox, 0x00000001);
7487	if (likely(!tg3_irq_sync(tp)))
7488		napi_schedule(&tnapi->napi);
7489
7490	return IRQ_RETVAL(1);
7491}
7492
7493static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7494{
7495	struct tg3_napi *tnapi = dev_id;
7496	struct tg3 *tp = tnapi->tp;
7497	struct tg3_hw_status *sblk = tnapi->hw_status;
7498	unsigned int handled = 1;
7499
7500	/* In INTx mode, it is possible for the interrupt to arrive at
7501	 * the CPU before the status block posted prior to the interrupt.
7502	 * Reading the PCI State register will confirm whether the
7503	 * interrupt is ours and will flush the status block.
7504	 */
7505	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7506		if (tg3_flag(tp, CHIP_RESETTING) ||
7507		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7508			handled = 0;
7509			goto out;
7510		}
7511	}
7512
7513	/*
7514	 * Writing any value to intr-mbox-0 clears PCI INTA# and
7515	 * chip-internal interrupt pending events.
7516	 * Writing non-zero to intr-mbox-0 additional tells the
7517	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7518	 * event coalescing.
7519	 *
7520	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7521	 * spurious interrupts.  The flush impacts performance but
7522	 * excessive spurious interrupts can be worse in some cases.
7523	 */
7524	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7525	if (tg3_irq_sync(tp))
7526		goto out;
7527	sblk->status &= ~SD_STATUS_UPDATED;
7528	if (likely(tg3_has_work(tnapi))) {
7529		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7530		napi_schedule(&tnapi->napi);
7531	} else {
7532		/* No work, shared interrupt perhaps?  re-enable
7533		 * interrupts, and flush that PCI write
7534		 */
7535		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7536			       0x00000000);
7537	}
7538out:
7539	return IRQ_RETVAL(handled);
7540}
7541
7542static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7543{
7544	struct tg3_napi *tnapi = dev_id;
7545	struct tg3 *tp = tnapi->tp;
7546	struct tg3_hw_status *sblk = tnapi->hw_status;
7547	unsigned int handled = 1;
7548
7549	/* In INTx mode, it is possible for the interrupt to arrive at
7550	 * the CPU before the status block posted prior to the interrupt.
7551	 * Reading the PCI State register will confirm whether the
7552	 * interrupt is ours and will flush the status block.
7553	 */
7554	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7555		if (tg3_flag(tp, CHIP_RESETTING) ||
7556		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7557			handled = 0;
7558			goto out;
7559		}
7560	}
7561
7562	/*
7563	 * writing any value to intr-mbox-0 clears PCI INTA# and
7564	 * chip-internal interrupt pending events.
7565	 * writing non-zero to intr-mbox-0 additional tells the
7566	 * NIC to stop sending us irqs, engaging "in-intr-handler"
7567	 * event coalescing.
7568	 *
7569	 * Flush the mailbox to de-assert the IRQ immediately to prevent
7570	 * spurious interrupts.  The flush impacts performance but
7571	 * excessive spurious interrupts can be worse in some cases.
7572	 */
7573	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7574
7575	/*
7576	 * In a shared interrupt configuration, sometimes other devices'
7577	 * interrupts will scream.  We record the current status tag here
7578	 * so that the above check can report that the screaming interrupts
7579	 * are unhandled.  Eventually they will be silenced.
7580	 */
7581	tnapi->last_irq_tag = sblk->status_tag;
7582
7583	if (tg3_irq_sync(tp))
7584		goto out;
7585
7586	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7587
7588	napi_schedule(&tnapi->napi);
7589
7590out:
7591	return IRQ_RETVAL(handled);
7592}
7593
7594/* ISR for interrupt test */
7595static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7596{
7597	struct tg3_napi *tnapi = dev_id;
7598	struct tg3 *tp = tnapi->tp;
7599	struct tg3_hw_status *sblk = tnapi->hw_status;
7600
7601	if ((sblk->status & SD_STATUS_UPDATED) ||
7602	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7603		tg3_disable_ints(tp);
7604		return IRQ_RETVAL(1);
7605	}
7606	return IRQ_RETVAL(0);
7607}
7608
7609#ifdef CONFIG_NET_POLL_CONTROLLER
7610static void tg3_poll_controller(struct net_device *dev)
7611{
7612	int i;
7613	struct tg3 *tp = netdev_priv(dev);
7614
7615	if (tg3_irq_sync(tp))
7616		return;
7617
7618	for (i = 0; i < tp->irq_cnt; i++)
7619		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7620}
7621#endif
7622
7623static void tg3_tx_timeout(struct net_device *dev)
7624{
7625	struct tg3 *tp = netdev_priv(dev);
7626
7627	if (netif_msg_tx_err(tp)) {
7628		netdev_err(dev, "transmit timed out, resetting\n");
7629		tg3_dump_state(tp);
7630	}
7631
7632	tg3_reset_task_schedule(tp);
7633}
7634
7635/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7636static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7637{
7638	u32 base = (u32) mapping & 0xffffffff;
7639
7640	return base + len + 8 < base;
7641}
7642
7643/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7644 * of any 4GB boundaries: 4G, 8G, etc
7645 */
7646static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7647					   u32 len, u32 mss)
7648{
7649	if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7650		u32 base = (u32) mapping & 0xffffffff;
7651
7652		return ((base + len + (mss & 0x3fff)) < base);
7653	}
7654	return 0;
7655}
7656
7657/* Test for DMA addresses > 40-bit */
7658static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7659					  int len)
7660{
7661#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7662	if (tg3_flag(tp, 40BIT_DMA_BUG))
7663		return ((u64) mapping + len) > DMA_BIT_MASK(40);
7664	return 0;
7665#else
7666	return 0;
7667#endif
7668}
7669
7670static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7671				 dma_addr_t mapping, u32 len, u32 flags,
7672				 u32 mss, u32 vlan)
7673{
7674	txbd->addr_hi = ((u64) mapping >> 32);
7675	txbd->addr_lo = ((u64) mapping & 0xffffffff);
7676	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7677	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7678}
7679
7680static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7681			    dma_addr_t map, u32 len, u32 flags,
7682			    u32 mss, u32 vlan)
7683{
7684	struct tg3 *tp = tnapi->tp;
7685	bool hwbug = false;
7686
7687	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7688		hwbug = true;
7689
7690	if (tg3_4g_overflow_test(map, len))
7691		hwbug = true;
7692
7693	if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7694		hwbug = true;
7695
7696	if (tg3_40bit_overflow_test(tp, map, len))
7697		hwbug = true;
7698
7699	if (tp->dma_limit) {
7700		u32 prvidx = *entry;
7701		u32 tmp_flag = flags & ~TXD_FLAG_END;
7702		while (len > tp->dma_limit && *budget) {
7703			u32 frag_len = tp->dma_limit;
7704			len -= tp->dma_limit;
7705
7706			/* Avoid the 8byte DMA problem */
7707			if (len <= 8) {
7708				len += tp->dma_limit / 2;
7709				frag_len = tp->dma_limit / 2;
7710			}
7711
7712			tnapi->tx_buffers[*entry].fragmented = true;
7713
7714			tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7715				      frag_len, tmp_flag, mss, vlan);
7716			*budget -= 1;
7717			prvidx = *entry;
7718			*entry = NEXT_TX(*entry);
7719
7720			map += frag_len;
7721		}
7722
7723		if (len) {
7724			if (*budget) {
7725				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7726					      len, flags, mss, vlan);
7727				*budget -= 1;
7728				*entry = NEXT_TX(*entry);
7729			} else {
7730				hwbug = true;
7731				tnapi->tx_buffers[prvidx].fragmented = false;
7732			}
7733		}
7734	} else {
7735		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7736			      len, flags, mss, vlan);
7737		*entry = NEXT_TX(*entry);
7738	}
7739
7740	return hwbug;
7741}
7742
7743static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7744{
7745	int i;
7746	struct sk_buff *skb;
7747	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7748
7749	skb = txb->skb;
7750	txb->skb = NULL;
7751
7752	pci_unmap_single(tnapi->tp->pdev,
7753			 dma_unmap_addr(txb, mapping),
7754			 skb_headlen(skb),
7755			 PCI_DMA_TODEVICE);
7756
7757	while (txb->fragmented) {
7758		txb->fragmented = false;
7759		entry = NEXT_TX(entry);
7760		txb = &tnapi->tx_buffers[entry];
7761	}
7762
7763	for (i = 0; i <= last; i++) {
7764		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7765
7766		entry = NEXT_TX(entry);
7767		txb = &tnapi->tx_buffers[entry];
7768
7769		pci_unmap_page(tnapi->tp->pdev,
7770			       dma_unmap_addr(txb, mapping),
7771			       skb_frag_size(frag), PCI_DMA_TODEVICE);
7772
7773		while (txb->fragmented) {
7774			txb->fragmented = false;
7775			entry = NEXT_TX(entry);
7776			txb = &tnapi->tx_buffers[entry];
7777		}
7778	}
7779}
7780
7781/* Workaround 4GB and 40-bit hardware DMA bugs. */
7782static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7783				       struct sk_buff **pskb,
7784				       u32 *entry, u32 *budget,
7785				       u32 base_flags, u32 mss, u32 vlan)
7786{
7787	struct tg3 *tp = tnapi->tp;
7788	struct sk_buff *new_skb, *skb = *pskb;
7789	dma_addr_t new_addr = 0;
7790	int ret = 0;
7791
7792	if (tg3_asic_rev(tp) != ASIC_REV_5701)
7793		new_skb = skb_copy(skb, GFP_ATOMIC);
7794	else {
7795		int more_headroom = 4 - ((unsigned long)skb->data & 3);
7796
7797		new_skb = skb_copy_expand(skb,
7798					  skb_headroom(skb) + more_headroom,
7799					  skb_tailroom(skb), GFP_ATOMIC);
7800	}
7801
7802	if (!new_skb) {
7803		ret = -1;
7804	} else {
7805		/* New SKB is guaranteed to be linear. */
7806		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7807					  PCI_DMA_TODEVICE);
7808		/* Make sure the mapping succeeded */
7809		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7810			dev_kfree_skb_any(new_skb);
7811			ret = -1;
7812		} else {
7813			u32 save_entry = *entry;
7814
7815			base_flags |= TXD_FLAG_END;
7816
7817			tnapi->tx_buffers[*entry].skb = new_skb;
7818			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7819					   mapping, new_addr);
7820
7821			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7822					    new_skb->len, base_flags,
7823					    mss, vlan)) {
7824				tg3_tx_skb_unmap(tnapi, save_entry, -1);
7825				dev_kfree_skb_any(new_skb);
7826				ret = -1;
7827			}
7828		}
7829	}
7830
7831	dev_kfree_skb_any(skb);
7832	*pskb = new_skb;
7833	return ret;
7834}
7835
7836static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7837{
7838	/* Check if we will never have enough descriptors,
7839	 * as gso_segs can be more than current ring size
7840	 */
7841	return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7842}
7843
7844static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7845
7846/* Use GSO to workaround all TSO packets that meet HW bug conditions
7847 * indicated in tg3_tx_frag_set()
7848 */
7849static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7850		       struct netdev_queue *txq, struct sk_buff *skb)
7851{
7852	struct sk_buff *segs, *nskb;
7853	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7854
7855	/* Estimate the number of fragments in the worst case */
7856	if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7857		netif_tx_stop_queue(txq);
7858
7859		/* netif_tx_stop_queue() must be done before checking
7860		 * checking tx index in tg3_tx_avail() below, because in
7861		 * tg3_tx(), we update tx index before checking for
7862		 * netif_tx_queue_stopped().
7863		 */
7864		smp_mb();
7865		if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7866			return NETDEV_TX_BUSY;
7867
7868		netif_tx_wake_queue(txq);
7869	}
7870
7871	segs = skb_gso_segment(skb, tp->dev->features &
7872				    ~(NETIF_F_TSO | NETIF_F_TSO6));
7873	if (IS_ERR(segs) || !segs)
7874		goto tg3_tso_bug_end;
7875
7876	do {
7877		nskb = segs;
7878		segs = segs->next;
7879		nskb->next = NULL;
7880		tg3_start_xmit(nskb, tp->dev);
7881	} while (segs);
7882
7883tg3_tso_bug_end:
7884	dev_kfree_skb_any(skb);
7885
7886	return NETDEV_TX_OK;
7887}
7888
7889/* hard_start_xmit for all devices */
7890static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7891{
7892	struct tg3 *tp = netdev_priv(dev);
7893	u32 len, entry, base_flags, mss, vlan = 0;
7894	u32 budget;
7895	int i = -1, would_hit_hwbug;
7896	dma_addr_t mapping;
7897	struct tg3_napi *tnapi;
7898	struct netdev_queue *txq;
7899	unsigned int last;
7900	struct iphdr *iph = NULL;
7901	struct tcphdr *tcph = NULL;
7902	__sum16 tcp_csum = 0, ip_csum = 0;
7903	__be16 ip_tot_len = 0;
7904
7905	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7906	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7907	if (tg3_flag(tp, ENABLE_TSS))
7908		tnapi++;
7909
7910	budget = tg3_tx_avail(tnapi);
7911
7912	/* We are running in BH disabled context with netif_tx_lock
7913	 * and TX reclaim runs via tp->napi.poll inside of a software
7914	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
7915	 * no IRQ context deadlocks to worry about either.  Rejoice!
7916	 */
7917	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7918		if (!netif_tx_queue_stopped(txq)) {
7919			netif_tx_stop_queue(txq);
7920
7921			/* This is a hard error, log it. */
7922			netdev_err(dev,
7923				   "BUG! Tx Ring full when queue awake!\n");
7924		}
7925		return NETDEV_TX_BUSY;
7926	}
7927
7928	entry = tnapi->tx_prod;
7929	base_flags = 0;
7930
7931	mss = skb_shinfo(skb)->gso_size;
7932	if (mss) {
7933		u32 tcp_opt_len, hdr_len;
7934
7935		if (skb_cow_head(skb, 0))
7936			goto drop;
7937
7938		iph = ip_hdr(skb);
7939		tcp_opt_len = tcp_optlen(skb);
7940
7941		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7942
7943		/* HW/FW can not correctly segment packets that have been
7944		 * vlan encapsulated.
7945		 */
7946		if (skb->protocol == htons(ETH_P_8021Q) ||
7947		    skb->protocol == htons(ETH_P_8021AD)) {
7948			if (tg3_tso_bug_gso_check(tnapi, skb))
7949				return tg3_tso_bug(tp, tnapi, txq, skb);
7950			goto drop;
7951		}
7952
7953		if (!skb_is_gso_v6(skb)) {
7954			if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7955			    tg3_flag(tp, TSO_BUG)) {
7956				if (tg3_tso_bug_gso_check(tnapi, skb))
7957					return tg3_tso_bug(tp, tnapi, txq, skb);
7958				goto drop;
7959			}
7960			ip_csum = iph->check;
7961			ip_tot_len = iph->tot_len;
7962			iph->check = 0;
7963			iph->tot_len = htons(mss + hdr_len);
7964		}
7965
7966		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7967			       TXD_FLAG_CPU_POST_DMA);
7968
7969		tcph = tcp_hdr(skb);
7970		tcp_csum = tcph->check;
7971
7972		if (tg3_flag(tp, HW_TSO_1) ||
7973		    tg3_flag(tp, HW_TSO_2) ||
7974		    tg3_flag(tp, HW_TSO_3)) {
7975			tcph->check = 0;
7976			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7977		} else {
7978			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
7979							 0, IPPROTO_TCP, 0);
7980		}
7981
7982		if (tg3_flag(tp, HW_TSO_3)) {
7983			mss |= (hdr_len & 0xc) << 12;
7984			if (hdr_len & 0x10)
7985				base_flags |= 0x00000010;
7986			base_flags |= (hdr_len & 0x3e0) << 5;
7987		} else if (tg3_flag(tp, HW_TSO_2))
7988			mss |= hdr_len << 9;
7989		else if (tg3_flag(tp, HW_TSO_1) ||
7990			 tg3_asic_rev(tp) == ASIC_REV_5705) {
7991			if (tcp_opt_len || iph->ihl > 5) {
7992				int tsflags;
7993
7994				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7995				mss |= (tsflags << 11);
7996			}
7997		} else {
7998			if (tcp_opt_len || iph->ihl > 5) {
7999				int tsflags;
8000
8001				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
8002				base_flags |= tsflags << 12;
8003			}
8004		}
8005	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
8006		/* HW/FW can not correctly checksum packets that have been
8007		 * vlan encapsulated.
8008		 */
8009		if (skb->protocol == htons(ETH_P_8021Q) ||
8010		    skb->protocol == htons(ETH_P_8021AD)) {
8011			if (skb_checksum_help(skb))
8012				goto drop;
8013		} else  {
8014			base_flags |= TXD_FLAG_TCPUDP_CSUM;
8015		}
8016	}
8017
8018	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
8019	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
8020		base_flags |= TXD_FLAG_JMB_PKT;
8021
8022	if (skb_vlan_tag_present(skb)) {
8023		base_flags |= TXD_FLAG_VLAN;
8024		vlan = skb_vlan_tag_get(skb);
8025	}
8026
8027	if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
8028	    tg3_flag(tp, TX_TSTAMP_EN)) {
8029		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
8030		base_flags |= TXD_FLAG_HWTSTAMP;
8031	}
8032
8033	len = skb_headlen(skb);
8034
8035	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
8036	if (pci_dma_mapping_error(tp->pdev, mapping))
8037		goto drop;
8038
8039
8040	tnapi->tx_buffers[entry].skb = skb;
8041	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
8042
8043	would_hit_hwbug = 0;
8044
8045	if (tg3_flag(tp, 5701_DMA_BUG))
8046		would_hit_hwbug = 1;
8047
8048	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
8049			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
8050			    mss, vlan)) {
8051		would_hit_hwbug = 1;
8052	} else if (skb_shinfo(skb)->nr_frags > 0) {
8053		u32 tmp_mss = mss;
8054
8055		if (!tg3_flag(tp, HW_TSO_1) &&
8056		    !tg3_flag(tp, HW_TSO_2) &&
8057		    !tg3_flag(tp, HW_TSO_3))
8058			tmp_mss = 0;
8059
8060		/* Now loop through additional data
8061		 * fragments, and queue them.
8062		 */
8063		last = skb_shinfo(skb)->nr_frags - 1;
8064		for (i = 0; i <= last; i++) {
8065			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8066
8067			len = skb_frag_size(frag);
8068			mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
8069						   len, DMA_TO_DEVICE);
8070
8071			tnapi->tx_buffers[entry].skb = NULL;
8072			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
8073					   mapping);
8074			if (dma_mapping_error(&tp->pdev->dev, mapping))
8075				goto dma_error;
8076
8077			if (!budget ||
8078			    tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
8079					    len, base_flags |
8080					    ((i == last) ? TXD_FLAG_END : 0),
8081					    tmp_mss, vlan)) {
8082				would_hit_hwbug = 1;
8083				break;
8084			}
8085		}
8086	}
8087
8088	if (would_hit_hwbug) {
8089		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8090
8091		if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8092			/* If it's a TSO packet, do GSO instead of
8093			 * allocating and copying to a large linear SKB
8094			 */
8095			if (ip_tot_len) {
8096				iph->check = ip_csum;
8097				iph->tot_len = ip_tot_len;
8098			}
8099			tcph->check = tcp_csum;
8100			return tg3_tso_bug(tp, tnapi, txq, skb);
8101		}
8102
8103		/* If the workaround fails due to memory/mapping
8104		 * failure, silently drop this packet.
8105		 */
8106		entry = tnapi->tx_prod;
8107		budget = tg3_tx_avail(tnapi);
8108		if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
8109						base_flags, mss, vlan))
8110			goto drop_nofree;
8111	}
8112
8113	skb_tx_timestamp(skb);
8114	netdev_tx_sent_queue(txq, skb->len);
8115
8116	/* Sync BD data before updating mailbox */
8117	wmb();
8118
8119	tnapi->tx_prod = entry;
8120	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
8121		netif_tx_stop_queue(txq);
8122
8123		/* netif_tx_stop_queue() must be done before checking
8124		 * checking tx index in tg3_tx_avail() below, because in
8125		 * tg3_tx(), we update tx index before checking for
8126		 * netif_tx_queue_stopped().
8127		 */
8128		smp_mb();
8129		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
8130			netif_tx_wake_queue(txq);
8131	}
8132
8133	if (!skb->xmit_more || netif_xmit_stopped(txq)) {
8134		/* Packets are ready, update Tx producer idx on card. */
8135		tw32_tx_mbox(tnapi->prodmbox, entry);
8136		mmiowb();
8137	}
8138
8139	return NETDEV_TX_OK;
8140
8141dma_error:
8142	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
8143	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
8144drop:
8145	dev_kfree_skb_any(skb);
8146drop_nofree:
8147	tp->tx_dropped++;
8148	return NETDEV_TX_OK;
8149}
8150
8151static void tg3_mac_loopback(struct tg3 *tp, bool enable)
8152{
8153	if (enable) {
8154		tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
8155				  MAC_MODE_PORT_MODE_MASK);
8156
8157		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
8158
8159		if (!tg3_flag(tp, 5705_PLUS))
8160			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8161
8162		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
8163			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
8164		else
8165			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
8166	} else {
8167		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
8168
8169		if (tg3_flag(tp, 5705_PLUS) ||
8170		    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
8171		    tg3_asic_rev(tp) == ASIC_REV_5700)
8172			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
8173	}
8174
8175	tw32(MAC_MODE, tp->mac_mode);
8176	udelay(40);
8177}
8178
8179static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
8180{
8181	u32 val, bmcr, mac_mode, ptest = 0;
8182
8183	tg3_phy_toggle_apd(tp, false);
8184	tg3_phy_toggle_automdix(tp, false);
8185
8186	if (extlpbk && tg3_phy_set_extloopbk(tp))
8187		return -EIO;
8188
8189	bmcr = BMCR_FULLDPLX;
8190	switch (speed) {
8191	case SPEED_10:
8192		break;
8193	case SPEED_100:
8194		bmcr |= BMCR_SPEED100;
8195		break;
8196	case SPEED_1000:
8197	default:
8198		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8199			speed = SPEED_100;
8200			bmcr |= BMCR_SPEED100;
8201		} else {
8202			speed = SPEED_1000;
8203			bmcr |= BMCR_SPEED1000;
8204		}
8205	}
8206
8207	if (extlpbk) {
8208		if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8209			tg3_readphy(tp, MII_CTRL1000, &val);
8210			val |= CTL1000_AS_MASTER |
8211			       CTL1000_ENABLE_MASTER;
8212			tg3_writephy(tp, MII_CTRL1000, val);
8213		} else {
8214			ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8215				MII_TG3_FET_PTEST_TRIM_2;
8216			tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8217		}
8218	} else
8219		bmcr |= BMCR_LOOPBACK;
8220
8221	tg3_writephy(tp, MII_BMCR, bmcr);
8222
8223	/* The write needs to be flushed for the FETs */
8224	if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8225		tg3_readphy(tp, MII_BMCR, &bmcr);
8226
8227	udelay(40);
8228
8229	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8230	    tg3_asic_rev(tp) == ASIC_REV_5785) {
8231		tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8232			     MII_TG3_FET_PTEST_FRC_TX_LINK |
8233			     MII_TG3_FET_PTEST_FRC_TX_LOCK);
8234
8235		/* The write needs to be flushed for the AC131 */
8236		tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8237	}
8238
8239	/* Reset to prevent losing 1st rx packet intermittently */
8240	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8241	    tg3_flag(tp, 5780_CLASS)) {
8242		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8243		udelay(10);
8244		tw32_f(MAC_RX_MODE, tp->rx_mode);
8245	}
8246
8247	mac_mode = tp->mac_mode &
8248		   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8249	if (speed == SPEED_1000)
8250		mac_mode |= MAC_MODE_PORT_MODE_GMII;
8251	else
8252		mac_mode |= MAC_MODE_PORT_MODE_MII;
8253
8254	if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8255		u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8256
8257		if (masked_phy_id == TG3_PHY_ID_BCM5401)
8258			mac_mode &= ~MAC_MODE_LINK_POLARITY;
8259		else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8260			mac_mode |= MAC_MODE_LINK_POLARITY;
8261
8262		tg3_writephy(tp, MII_TG3_EXT_CTRL,
8263			     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8264	}
8265
8266	tw32(MAC_MODE, mac_mode);
8267	udelay(40);
8268
8269	return 0;
8270}
8271
8272static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8273{
8274	struct tg3 *tp = netdev_priv(dev);
8275
8276	if (features & NETIF_F_LOOPBACK) {
8277		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8278			return;
8279
8280		spin_lock_bh(&tp->lock);
8281		tg3_mac_loopback(tp, true);
8282		netif_carrier_on(tp->dev);
8283		spin_unlock_bh(&tp->lock);
8284		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8285	} else {
8286		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8287			return;
8288
8289		spin_lock_bh(&tp->lock);
8290		tg3_mac_loopback(tp, false);
8291		/* Force link status check */
8292		tg3_setup_phy(tp, true);
8293		spin_unlock_bh(&tp->lock);
8294		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8295	}
8296}
8297
8298static netdev_features_t tg3_fix_features(struct net_device *dev,
8299	netdev_features_t features)
8300{
8301	struct tg3 *tp = netdev_priv(dev);
8302
8303	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8304		features &= ~NETIF_F_ALL_TSO;
8305
8306	return features;
8307}
8308
8309static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8310{
8311	netdev_features_t changed = dev->features ^ features;
8312
8313	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8314		tg3_set_loopback(dev, features);
8315
8316	return 0;
8317}
8318
8319static void tg3_rx_prodring_free(struct tg3 *tp,
8320				 struct tg3_rx_prodring_set *tpr)
8321{
8322	int i;
8323
8324	if (tpr != &tp->napi[0].prodring) {
8325		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8326		     i = (i + 1) & tp->rx_std_ring_mask)
8327			tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8328					tp->rx_pkt_map_sz);
8329
8330		if (tg3_flag(tp, JUMBO_CAPABLE)) {
8331			for (i = tpr->rx_jmb_cons_idx;
8332			     i != tpr->rx_jmb_prod_idx;
8333			     i = (i + 1) & tp->rx_jmb_ring_mask) {
8334				tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8335						TG3_RX_JMB_MAP_SZ);
8336			}
8337		}
8338
8339		return;
8340	}
8341
8342	for (i = 0; i <= tp->rx_std_ring_mask; i++)
8343		tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8344				tp->rx_pkt_map_sz);
8345
8346	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8347		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8348			tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8349					TG3_RX_JMB_MAP_SZ);
8350	}
8351}
8352
8353/* Initialize rx rings for packet processing.
8354 *
8355 * The chip has been shut down and the driver detached from
8356 * the networking, so no interrupts or new tx packets will
8357 * end up in the driver.  tp->{tx,}lock are held and thus
8358 * we may not sleep.
8359 */
8360static int tg3_rx_prodring_alloc(struct tg3 *tp,
8361				 struct tg3_rx_prodring_set *tpr)
8362{
8363	u32 i, rx_pkt_dma_sz;
8364
8365	tpr->rx_std_cons_idx = 0;
8366	tpr->rx_std_prod_idx = 0;
8367	tpr->rx_jmb_cons_idx = 0;
8368	tpr->rx_jmb_prod_idx = 0;
8369
8370	if (tpr != &tp->napi[0].prodring) {
8371		memset(&tpr->rx_std_buffers[0], 0,
8372		       TG3_RX_STD_BUFF_RING_SIZE(tp));
8373		if (tpr->rx_jmb_buffers)
8374			memset(&tpr->rx_jmb_buffers[0], 0,
8375			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
8376		goto done;
8377	}
8378
8379	/* Zero out all descriptors. */
8380	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8381
8382	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8383	if (tg3_flag(tp, 5780_CLASS) &&
8384	    tp->dev->mtu > ETH_DATA_LEN)
8385		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8386	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8387
8388	/* Initialize invariants of the rings, we only set this
8389	 * stuff once.  This works because the card does not
8390	 * write into the rx buffer posting rings.
8391	 */
8392	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8393		struct tg3_rx_buffer_desc *rxd;
8394
8395		rxd = &tpr->rx_std[i];
8396		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8397		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8398		rxd->opaque = (RXD_OPAQUE_RING_STD |
8399			       (i << RXD_OPAQUE_INDEX_SHIFT));
8400	}
8401
8402	/* Now allocate fresh SKBs for each rx ring. */
8403	for (i = 0; i < tp->rx_pending; i++) {
8404		unsigned int frag_size;
8405
8406		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8407				      &frag_size) < 0) {
8408			netdev_warn(tp->dev,
8409				    "Using a smaller RX standard ring. Only "
8410				    "%d out of %d buffers were allocated "
8411				    "successfully\n", i, tp->rx_pending);
8412			if (i == 0)
8413				goto initfail;
8414			tp->rx_pending = i;
8415			break;
8416		}
8417	}
8418
8419	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8420		goto done;
8421
8422	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8423
8424	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8425		goto done;
8426
8427	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8428		struct tg3_rx_buffer_desc *rxd;
8429
8430		rxd = &tpr->rx_jmb[i].std;
8431		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8432		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8433				  RXD_FLAG_JUMBO;
8434		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8435		       (i << RXD_OPAQUE_INDEX_SHIFT));
8436	}
8437
8438	for (i = 0; i < tp->rx_jumbo_pending; i++) {
8439		unsigned int frag_size;
8440
8441		if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8442				      &frag_size) < 0) {
8443			netdev_warn(tp->dev,
8444				    "Using a smaller RX jumbo ring. Only %d "
8445				    "out of %d buffers were allocated "
8446				    "successfully\n", i, tp->rx_jumbo_pending);
8447			if (i == 0)
8448				goto initfail;
8449			tp->rx_jumbo_pending = i;
8450			break;
8451		}
8452	}
8453
8454done:
8455	return 0;
8456
8457initfail:
8458	tg3_rx_prodring_free(tp, tpr);
8459	return -ENOMEM;
8460}
8461
8462static void tg3_rx_prodring_fini(struct tg3 *tp,
8463				 struct tg3_rx_prodring_set *tpr)
8464{
8465	kfree(tpr->rx_std_buffers);
8466	tpr->rx_std_buffers = NULL;
8467	kfree(tpr->rx_jmb_buffers);
8468	tpr->rx_jmb_buffers = NULL;
8469	if (tpr->rx_std) {
8470		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8471				  tpr->rx_std, tpr->rx_std_mapping);
8472		tpr->rx_std = NULL;
8473	}
8474	if (tpr->rx_jmb) {
8475		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8476				  tpr->rx_jmb, tpr->rx_jmb_mapping);
8477		tpr->rx_jmb = NULL;
8478	}
8479}
8480
8481static int tg3_rx_prodring_init(struct tg3 *tp,
8482				struct tg3_rx_prodring_set *tpr)
8483{
8484	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8485				      GFP_KERNEL);
8486	if (!tpr->rx_std_buffers)
8487		return -ENOMEM;
8488
8489	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8490					 TG3_RX_STD_RING_BYTES(tp),
8491					 &tpr->rx_std_mapping,
8492					 GFP_KERNEL);
8493	if (!tpr->rx_std)
8494		goto err_out;
8495
8496	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8497		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8498					      GFP_KERNEL);
8499		if (!tpr->rx_jmb_buffers)
8500			goto err_out;
8501
8502		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8503						 TG3_RX_JMB_RING_BYTES(tp),
8504						 &tpr->rx_jmb_mapping,
8505						 GFP_KERNEL);
8506		if (!tpr->rx_jmb)
8507			goto err_out;
8508	}
8509
8510	return 0;
8511
8512err_out:
8513	tg3_rx_prodring_fini(tp, tpr);
8514	return -ENOMEM;
8515}
8516
8517/* Free up pending packets in all rx/tx rings.
8518 *
8519 * The chip has been shut down and the driver detached from
8520 * the networking, so no interrupts or new tx packets will
8521 * end up in the driver.  tp->{tx,}lock is not held and we are not
8522 * in an interrupt context and thus may sleep.
8523 */
8524static void tg3_free_rings(struct tg3 *tp)
8525{
8526	int i, j;
8527
8528	for (j = 0; j < tp->irq_cnt; j++) {
8529		struct tg3_napi *tnapi = &tp->napi[j];
8530
8531		tg3_rx_prodring_free(tp, &tnapi->prodring);
8532
8533		if (!tnapi->tx_buffers)
8534			continue;
8535
8536		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8537			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8538
8539			if (!skb)
8540				continue;
8541
8542			tg3_tx_skb_unmap(tnapi, i,
8543					 skb_shinfo(skb)->nr_frags - 1);
8544
8545			dev_kfree_skb_any(skb);
8546		}
8547		netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8548	}
8549}
8550
8551/* Initialize tx/rx rings for packet processing.
8552 *
8553 * The chip has been shut down and the driver detached from
8554 * the networking, so no interrupts or new tx packets will
8555 * end up in the driver.  tp->{tx,}lock are held and thus
8556 * we may not sleep.
8557 */
8558static int tg3_init_rings(struct tg3 *tp)
8559{
8560	int i;
8561
8562	/* Free up all the SKBs. */
8563	tg3_free_rings(tp);
8564
8565	for (i = 0; i < tp->irq_cnt; i++) {
8566		struct tg3_napi *tnapi = &tp->napi[i];
8567
8568		tnapi->last_tag = 0;
8569		tnapi->last_irq_tag = 0;
8570		tnapi->hw_status->status = 0;
8571		tnapi->hw_status->status_tag = 0;
8572		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8573
8574		tnapi->tx_prod = 0;
8575		tnapi->tx_cons = 0;
8576		if (tnapi->tx_ring)
8577			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8578
8579		tnapi->rx_rcb_ptr = 0;
8580		if (tnapi->rx_rcb)
8581			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8582
8583		if (tnapi->prodring.rx_std &&
8584		    tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8585			tg3_free_rings(tp);
8586			return -ENOMEM;
8587		}
8588	}
8589
8590	return 0;
8591}
8592
8593static void tg3_mem_tx_release(struct tg3 *tp)
8594{
8595	int i;
8596
8597	for (i = 0; i < tp->irq_max; i++) {
8598		struct tg3_napi *tnapi = &tp->napi[i];
8599
8600		if (tnapi->tx_ring) {
8601			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8602				tnapi->tx_ring, tnapi->tx_desc_mapping);
8603			tnapi->tx_ring = NULL;
8604		}
8605
8606		kfree(tnapi->tx_buffers);
8607		tnapi->tx_buffers = NULL;
8608	}
8609}
8610
8611static int tg3_mem_tx_acquire(struct tg3 *tp)
8612{
8613	int i;
8614	struct tg3_napi *tnapi = &tp->napi[0];
8615
8616	/* If multivector TSS is enabled, vector 0 does not handle
8617	 * tx interrupts.  Don't allocate any resources for it.
8618	 */
8619	if (tg3_flag(tp, ENABLE_TSS))
8620		tnapi++;
8621
8622	for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8623		tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8624					    TG3_TX_RING_SIZE, GFP_KERNEL);
8625		if (!tnapi->tx_buffers)
8626			goto err_out;
8627
8628		tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8629						    TG3_TX_RING_BYTES,
8630						    &tnapi->tx_desc_mapping,
8631						    GFP_KERNEL);
8632		if (!tnapi->tx_ring)
8633			goto err_out;
8634	}
8635
8636	return 0;
8637
8638err_out:
8639	tg3_mem_tx_release(tp);
8640	return -ENOMEM;
8641}
8642
8643static void tg3_mem_rx_release(struct tg3 *tp)
8644{
8645	int i;
8646
8647	for (i = 0; i < tp->irq_max; i++) {
8648		struct tg3_napi *tnapi = &tp->napi[i];
8649
8650		tg3_rx_prodring_fini(tp, &tnapi->prodring);
8651
8652		if (!tnapi->rx_rcb)
8653			continue;
8654
8655		dma_free_coherent(&tp->pdev->dev,
8656				  TG3_RX_RCB_RING_BYTES(tp),
8657				  tnapi->rx_rcb,
8658				  tnapi->rx_rcb_mapping);
8659		tnapi->rx_rcb = NULL;
8660	}
8661}
8662
8663static int tg3_mem_rx_acquire(struct tg3 *tp)
8664{
8665	unsigned int i, limit;
8666
8667	limit = tp->rxq_cnt;
8668
8669	/* If RSS is enabled, we need a (dummy) producer ring
8670	 * set on vector zero.  This is the true hw prodring.
8671	 */
8672	if (tg3_flag(tp, ENABLE_RSS))
8673		limit++;
8674
8675	for (i = 0; i < limit; i++) {
8676		struct tg3_napi *tnapi = &tp->napi[i];
8677
8678		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8679			goto err_out;
8680
8681		/* If multivector RSS is enabled, vector 0
8682		 * does not handle rx or tx interrupts.
8683		 * Don't allocate any resources for it.
8684		 */
8685		if (!i && tg3_flag(tp, ENABLE_RSS))
8686			continue;
8687
8688		tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
8689						    TG3_RX_RCB_RING_BYTES(tp),
8690						    &tnapi->rx_rcb_mapping,
8691						    GFP_KERNEL);
8692		if (!tnapi->rx_rcb)
8693			goto err_out;
8694	}
8695
8696	return 0;
8697
8698err_out:
8699	tg3_mem_rx_release(tp);
8700	return -ENOMEM;
8701}
8702
8703/*
8704 * Must not be invoked with interrupt sources disabled and
8705 * the hardware shutdown down.
8706 */
8707static void tg3_free_consistent(struct tg3 *tp)
8708{
8709	int i;
8710
8711	for (i = 0; i < tp->irq_cnt; i++) {
8712		struct tg3_napi *tnapi = &tp->napi[i];
8713
8714		if (tnapi->hw_status) {
8715			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8716					  tnapi->hw_status,
8717					  tnapi->status_mapping);
8718			tnapi->hw_status = NULL;
8719		}
8720	}
8721
8722	tg3_mem_rx_release(tp);
8723	tg3_mem_tx_release(tp);
8724
8725	if (tp->hw_stats) {
8726		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8727				  tp->hw_stats, tp->stats_mapping);
8728		tp->hw_stats = NULL;
8729	}
8730}
8731
8732/*
8733 * Must not be invoked with interrupt sources disabled and
8734 * the hardware shutdown down.  Can sleep.
8735 */
8736static int tg3_alloc_consistent(struct tg3 *tp)
8737{
8738	int i;
8739
8740	tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
8741					   sizeof(struct tg3_hw_stats),
8742					   &tp->stats_mapping, GFP_KERNEL);
8743	if (!tp->hw_stats)
8744		goto err_out;
8745
8746	for (i = 0; i < tp->irq_cnt; i++) {
8747		struct tg3_napi *tnapi = &tp->napi[i];
8748		struct tg3_hw_status *sblk;
8749
8750		tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
8751						       TG3_HW_STATUS_SIZE,
8752						       &tnapi->status_mapping,
8753						       GFP_KERNEL);
8754		if (!tnapi->hw_status)
8755			goto err_out;
8756
8757		sblk = tnapi->hw_status;
8758
8759		if (tg3_flag(tp, ENABLE_RSS)) {
8760			u16 *prodptr = NULL;
8761
8762			/*
8763			 * When RSS is enabled, the status block format changes
8764			 * slightly.  The "rx_jumbo_consumer", "reserved",
8765			 * and "rx_mini_consumer" members get mapped to the
8766			 * other three rx return ring producer indexes.
8767			 */
8768			switch (i) {
8769			case 1:
8770				prodptr = &sblk->idx[0].rx_producer;
8771				break;
8772			case 2:
8773				prodptr = &sblk->rx_jumbo_consumer;
8774				break;
8775			case 3:
8776				prodptr = &sblk->reserved;
8777				break;
8778			case 4:
8779				prodptr = &sblk->rx_mini_consumer;
8780				break;
8781			}
8782			tnapi->rx_rcb_prod_idx = prodptr;
8783		} else {
8784			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8785		}
8786	}
8787
8788	if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8789		goto err_out;
8790
8791	return 0;
8792
8793err_out:
8794	tg3_free_consistent(tp);
8795	return -ENOMEM;
8796}
8797
8798#define MAX_WAIT_CNT 1000
8799
8800/* To stop a block, clear the enable bit and poll till it
8801 * clears.  tp->lock is held.
8802 */
8803static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8804{
8805	unsigned int i;
8806	u32 val;
8807
8808	if (tg3_flag(tp, 5705_PLUS)) {
8809		switch (ofs) {
8810		case RCVLSC_MODE:
8811		case DMAC_MODE:
8812		case MBFREE_MODE:
8813		case BUFMGR_MODE:
8814		case MEMARB_MODE:
8815			/* We can't enable/disable these bits of the
8816			 * 5705/5750, just say success.
8817			 */
8818			return 0;
8819
8820		default:
8821			break;
8822		}
8823	}
8824
8825	val = tr32(ofs);
8826	val &= ~enable_bit;
8827	tw32_f(ofs, val);
8828
8829	for (i = 0; i < MAX_WAIT_CNT; i++) {
8830		if (pci_channel_offline(tp->pdev)) {
8831			dev_err(&tp->pdev->dev,
8832				"tg3_stop_block device offline, "
8833				"ofs=%lx enable_bit=%x\n",
8834				ofs, enable_bit);
8835			return -ENODEV;
8836		}
8837
8838		udelay(100);
8839		val = tr32(ofs);
8840		if ((val & enable_bit) == 0)
8841			break;
8842	}
8843
8844	if (i == MAX_WAIT_CNT && !silent) {
8845		dev_err(&tp->pdev->dev,
8846			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8847			ofs, enable_bit);
8848		return -ENODEV;
8849	}
8850
8851	return 0;
8852}
8853
8854/* tp->lock is held. */
8855static int tg3_abort_hw(struct tg3 *tp, bool silent)
8856{
8857	int i, err;
8858
8859	tg3_disable_ints(tp);
8860
8861	if (pci_channel_offline(tp->pdev)) {
8862		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8863		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8864		err = -ENODEV;
8865		goto err_no_dev;
8866	}
8867
8868	tp->rx_mode &= ~RX_MODE_ENABLE;
8869	tw32_f(MAC_RX_MODE, tp->rx_mode);
8870	udelay(10);
8871
8872	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8873	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8874	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8875	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8876	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8877	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8878
8879	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8880	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8881	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8882	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8883	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8884	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8885	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8886
8887	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8888	tw32_f(MAC_MODE, tp->mac_mode);
8889	udelay(40);
8890
8891	tp->tx_mode &= ~TX_MODE_ENABLE;
8892	tw32_f(MAC_TX_MODE, tp->tx_mode);
8893
8894	for (i = 0; i < MAX_WAIT_CNT; i++) {
8895		udelay(100);
8896		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8897			break;
8898	}
8899	if (i >= MAX_WAIT_CNT) {
8900		dev_err(&tp->pdev->dev,
8901			"%s timed out, TX_MODE_ENABLE will not clear "
8902			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8903		err |= -ENODEV;
8904	}
8905
8906	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8907	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8908	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8909
8910	tw32(FTQ_RESET, 0xffffffff);
8911	tw32(FTQ_RESET, 0x00000000);
8912
8913	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8914	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8915
8916err_no_dev:
8917	for (i = 0; i < tp->irq_cnt; i++) {
8918		struct tg3_napi *tnapi = &tp->napi[i];
8919		if (tnapi->hw_status)
8920			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8921	}
8922
8923	return err;
8924}
8925
8926/* Save PCI command register before chip reset */
8927static void tg3_save_pci_state(struct tg3 *tp)
8928{
8929	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8930}
8931
8932/* Restore PCI state after chip reset */
8933static void tg3_restore_pci_state(struct tg3 *tp)
8934{
8935	u32 val;
8936
8937	/* Re-enable indirect register accesses. */
8938	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8939			       tp->misc_host_ctrl);
8940
8941	/* Set MAX PCI retry to zero. */
8942	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8943	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8944	    tg3_flag(tp, PCIX_MODE))
8945		val |= PCISTATE_RETRY_SAME_DMA;
8946	/* Allow reads and writes to the APE register and memory space. */
8947	if (tg3_flag(tp, ENABLE_APE))
8948		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8949		       PCISTATE_ALLOW_APE_SHMEM_WR |
8950		       PCISTATE_ALLOW_APE_PSPACE_WR;
8951	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8952
8953	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8954
8955	if (!tg3_flag(tp, PCI_EXPRESS)) {
8956		pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8957				      tp->pci_cacheline_sz);
8958		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8959				      tp->pci_lat_timer);
8960	}
8961
8962	/* Make sure PCI-X relaxed ordering bit is clear. */
8963	if (tg3_flag(tp, PCIX_MODE)) {
8964		u16 pcix_cmd;
8965
8966		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8967				     &pcix_cmd);
8968		pcix_cmd &= ~PCI_X_CMD_ERO;
8969		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8970				      pcix_cmd);
8971	}
8972
8973	if (tg3_flag(tp, 5780_CLASS)) {
8974
8975		/* Chip reset on 5780 will reset MSI enable bit,
8976		 * so need to restore it.
8977		 */
8978		if (tg3_flag(tp, USING_MSI)) {
8979			u16 ctrl;
8980
8981			pci_read_config_word(tp->pdev,
8982					     tp->msi_cap + PCI_MSI_FLAGS,
8983					     &ctrl);
8984			pci_write_config_word(tp->pdev,
8985					      tp->msi_cap + PCI_MSI_FLAGS,
8986					      ctrl | PCI_MSI_FLAGS_ENABLE);
8987			val = tr32(MSGINT_MODE);
8988			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8989		}
8990	}
8991}
8992
8993static void tg3_override_clk(struct tg3 *tp)
8994{
8995	u32 val;
8996
8997	switch (tg3_asic_rev(tp)) {
8998	case ASIC_REV_5717:
8999		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9000		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9001		     TG3_CPMU_MAC_ORIDE_ENABLE);
9002		break;
9003
9004	case ASIC_REV_5719:
9005	case ASIC_REV_5720:
9006		tw32(TG3_CPMU_CLCK_ORIDE, CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9007		break;
9008
9009	default:
9010		return;
9011	}
9012}
9013
9014static void tg3_restore_clk(struct tg3 *tp)
9015{
9016	u32 val;
9017
9018	switch (tg3_asic_rev(tp)) {
9019	case ASIC_REV_5717:
9020		val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9021		tw32(TG3_CPMU_CLCK_ORIDE_ENABLE,
9022		     val & ~TG3_CPMU_MAC_ORIDE_ENABLE);
9023		break;
9024
9025	case ASIC_REV_5719:
9026	case ASIC_REV_5720:
9027		val = tr32(TG3_CPMU_CLCK_ORIDE);
9028		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9029		break;
9030
9031	default:
9032		return;
9033	}
9034}
9035
9036/* tp->lock is held. */
9037static int tg3_chip_reset(struct tg3 *tp)
9038	__releases(tp->lock)
9039	__acquires(tp->lock)
9040{
9041	u32 val;
9042	void (*write_op)(struct tg3 *, u32, u32);
9043	int i, err;
9044
9045	if (!pci_device_is_present(tp->pdev))
9046		return -ENODEV;
9047
9048	tg3_nvram_lock(tp);
9049
9050	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
9051
9052	/* No matching tg3_nvram_unlock() after this because
9053	 * chip reset below will undo the nvram lock.
9054	 */
9055	tp->nvram_lock_cnt = 0;
9056
9057	/* GRC_MISC_CFG core clock reset will clear the memory
9058	 * enable bit in PCI register 4 and the MSI enable bit
9059	 * on some chips, so we save relevant registers here.
9060	 */
9061	tg3_save_pci_state(tp);
9062
9063	if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
9064	    tg3_flag(tp, 5755_PLUS))
9065		tw32(GRC_FASTBOOT_PC, 0);
9066
9067	/*
9068	 * We must avoid the readl() that normally takes place.
9069	 * It locks machines, causes machine checks, and other
9070	 * fun things.  So, temporarily disable the 5701
9071	 * hardware workaround, while we do the reset.
9072	 */
9073	write_op = tp->write32;
9074	if (write_op == tg3_write_flush_reg32)
9075		tp->write32 = tg3_write32;
9076
9077	/* Prevent the irq handler from reading or writing PCI registers
9078	 * during chip reset when the memory enable bit in the PCI command
9079	 * register may be cleared.  The chip does not generate interrupt
9080	 * at this time, but the irq handler may still be called due to irq
9081	 * sharing or irqpoll.
9082	 */
9083	tg3_flag_set(tp, CHIP_RESETTING);
9084	for (i = 0; i < tp->irq_cnt; i++) {
9085		struct tg3_napi *tnapi = &tp->napi[i];
9086		if (tnapi->hw_status) {
9087			tnapi->hw_status->status = 0;
9088			tnapi->hw_status->status_tag = 0;
9089		}
9090		tnapi->last_tag = 0;
9091		tnapi->last_irq_tag = 0;
9092	}
9093	smp_mb();
9094
9095	tg3_full_unlock(tp);
9096
9097	for (i = 0; i < tp->irq_cnt; i++)
9098		synchronize_irq(tp->napi[i].irq_vec);
9099
9100	tg3_full_lock(tp, 0);
9101
9102	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9103		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9104		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9105	}
9106
9107	/* do the reset */
9108	val = GRC_MISC_CFG_CORECLK_RESET;
9109
9110	if (tg3_flag(tp, PCI_EXPRESS)) {
9111		/* Force PCIe 1.0a mode */
9112		if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
9113		    !tg3_flag(tp, 57765_PLUS) &&
9114		    tr32(TG3_PCIE_PHY_TSTCTL) ==
9115		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
9116			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
9117
9118		if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
9119			tw32(GRC_MISC_CFG, (1 << 29));
9120			val |= (1 << 29);
9121		}
9122	}
9123
9124	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
9125		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
9126		tw32(GRC_VCPU_EXT_CTRL,
9127		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
9128	}
9129
9130	/* Set the clock to the highest frequency to avoid timeouts. With link
9131	 * aware mode, the clock speed could be slow and bootcode does not
9132	 * complete within the expected time. Override the clock to allow the
9133	 * bootcode to finish sooner and then restore it.
9134	 */
9135	tg3_override_clk(tp);
9136
9137	/* Manage gphy power for all CPMU absent PCIe devices. */
9138	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
9139		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
9140
9141	tw32(GRC_MISC_CFG, val);
9142
9143	/* restore 5701 hardware bug workaround write method */
9144	tp->write32 = write_op;
9145
9146	/* Unfortunately, we have to delay before the PCI read back.
9147	 * Some 575X chips even will not respond to a PCI cfg access
9148	 * when the reset command is given to the chip.
9149	 *
9150	 * How do these hardware designers expect things to work
9151	 * properly if the PCI write is posted for a long period
9152	 * of time?  It is always necessary to have some method by
9153	 * which a register read back can occur to push the write
9154	 * out which does the reset.
9155	 *
9156	 * For most tg3 variants the trick below was working.
9157	 * Ho hum...
9158	 */
9159	udelay(120);
9160
9161	/* Flush PCI posted writes.  The normal MMIO registers
9162	 * are inaccessible at this time so this is the only
9163	 * way to make this reliably (actually, this is no longer
9164	 * the case, see above).  I tried to use indirect
9165	 * register read/write but this upset some 5701 variants.
9166	 */
9167	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
9168
9169	udelay(120);
9170
9171	if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
9172		u16 val16;
9173
9174		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
9175			int j;
9176			u32 cfg_val;
9177
9178			/* Wait for link training to complete.  */
9179			for (j = 0; j < 5000; j++)
9180				udelay(100);
9181
9182			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
9183			pci_write_config_dword(tp->pdev, 0xc4,
9184					       cfg_val | (1 << 15));
9185		}
9186
9187		/* Clear the "no snoop" and "relaxed ordering" bits. */
9188		val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
9189		/*
9190		 * Older PCIe devices only support the 128 byte
9191		 * MPS setting.  Enforce the restriction.
9192		 */
9193		if (!tg3_flag(tp, CPMU_PRESENT))
9194			val16 |= PCI_EXP_DEVCTL_PAYLOAD;
9195		pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
9196
9197		/* Clear error status */
9198		pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
9199				      PCI_EXP_DEVSTA_CED |
9200				      PCI_EXP_DEVSTA_NFED |
9201				      PCI_EXP_DEVSTA_FED |
9202				      PCI_EXP_DEVSTA_URD);
9203	}
9204
9205	tg3_restore_pci_state(tp);
9206
9207	tg3_flag_clear(tp, CHIP_RESETTING);
9208	tg3_flag_clear(tp, ERROR_PROCESSED);
9209
9210	val = 0;
9211	if (tg3_flag(tp, 5780_CLASS))
9212		val = tr32(MEMARB_MODE);
9213	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
9214
9215	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
9216		tg3_stop_fw(tp);
9217		tw32(0x5000, 0x400);
9218	}
9219
9220	if (tg3_flag(tp, IS_SSB_CORE)) {
9221		/*
9222		 * BCM4785: In order to avoid repercussions from using
9223		 * potentially defective internal ROM, stop the Rx RISC CPU,
9224		 * which is not required.
9225		 */
9226		tg3_stop_fw(tp);
9227		tg3_halt_cpu(tp, RX_CPU_BASE);
9228	}
9229
9230	err = tg3_poll_fw(tp);
9231	if (err)
9232		return err;
9233
9234	tw32(GRC_MODE, tp->grc_mode);
9235
9236	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
9237		val = tr32(0xc4);
9238
9239		tw32(0xc4, val | (1 << 15));
9240	}
9241
9242	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
9243	    tg3_asic_rev(tp) == ASIC_REV_5705) {
9244		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
9245		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9246			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9247		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9248	}
9249
9250	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9251		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9252		val = tp->mac_mode;
9253	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9254		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9255		val = tp->mac_mode;
9256	} else
9257		val = 0;
9258
9259	tw32_f(MAC_MODE, val);
9260	udelay(40);
9261
9262	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9263
9264	tg3_mdio_start(tp);
9265
9266	if (tg3_flag(tp, PCI_EXPRESS) &&
9267	    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9268	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
9269	    !tg3_flag(tp, 57765_PLUS)) {
9270		val = tr32(0x7c00);
9271
9272		tw32(0x7c00, val | (1 << 25));
9273	}
9274
9275	tg3_restore_clk(tp);
9276
9277	/* Reprobe ASF enable state.  */
9278	tg3_flag_clear(tp, ENABLE_ASF);
9279	tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9280			   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9281
9282	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9283	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9284	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9285		u32 nic_cfg;
9286
9287		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9288		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9289			tg3_flag_set(tp, ENABLE_ASF);
9290			tp->last_event_jiffies = jiffies;
9291			if (tg3_flag(tp, 5750_PLUS))
9292				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9293
9294			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9295			if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9296				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9297			if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9298				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9299		}
9300	}
9301
9302	return 0;
9303}
9304
9305static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9306static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9307static void __tg3_set_rx_mode(struct net_device *);
9308
9309/* tp->lock is held. */
9310static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9311{
9312	int err;
9313
9314	tg3_stop_fw(tp);
9315
9316	tg3_write_sig_pre_reset(tp, kind);
9317
9318	tg3_abort_hw(tp, silent);
9319	err = tg3_chip_reset(tp);
9320
9321	__tg3_set_mac_addr(tp, false);
9322
9323	tg3_write_sig_legacy(tp, kind);
9324	tg3_write_sig_post_reset(tp, kind);
9325
9326	if (tp->hw_stats) {
9327		/* Save the stats across chip resets... */
9328		tg3_get_nstats(tp, &tp->net_stats_prev);
9329		tg3_get_estats(tp, &tp->estats_prev);
9330
9331		/* And make sure the next sample is new data */
9332		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9333	}
9334
9335	return err;
9336}
9337
9338static int tg3_set_mac_addr(struct net_device *dev, void *p)
9339{
9340	struct tg3 *tp = netdev_priv(dev);
9341	struct sockaddr *addr = p;
9342	int err = 0;
9343	bool skip_mac_1 = false;
9344
9345	if (!is_valid_ether_addr(addr->sa_data))
9346		return -EADDRNOTAVAIL;
9347
9348	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9349
9350	if (!netif_running(dev))
9351		return 0;
9352
9353	if (tg3_flag(tp, ENABLE_ASF)) {
9354		u32 addr0_high, addr0_low, addr1_high, addr1_low;
9355
9356		addr0_high = tr32(MAC_ADDR_0_HIGH);
9357		addr0_low = tr32(MAC_ADDR_0_LOW);
9358		addr1_high = tr32(MAC_ADDR_1_HIGH);
9359		addr1_low = tr32(MAC_ADDR_1_LOW);
9360
9361		/* Skip MAC addr 1 if ASF is using it. */
9362		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9363		    !(addr1_high == 0 && addr1_low == 0))
9364			skip_mac_1 = true;
9365	}
9366	spin_lock_bh(&tp->lock);
9367	__tg3_set_mac_addr(tp, skip_mac_1);
9368	__tg3_set_rx_mode(dev);
9369	spin_unlock_bh(&tp->lock);
9370
9371	return err;
9372}
9373
9374/* tp->lock is held. */
9375static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9376			   dma_addr_t mapping, u32 maxlen_flags,
9377			   u32 nic_addr)
9378{
9379	tg3_write_mem(tp,
9380		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9381		      ((u64) mapping >> 32));
9382	tg3_write_mem(tp,
9383		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9384		      ((u64) mapping & 0xffffffff));
9385	tg3_write_mem(tp,
9386		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9387		       maxlen_flags);
9388
9389	if (!tg3_flag(tp, 5705_PLUS))
9390		tg3_write_mem(tp,
9391			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9392			      nic_addr);
9393}
9394
9395
9396static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9397{
9398	int i = 0;
9399
9400	if (!tg3_flag(tp, ENABLE_TSS)) {
9401		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9402		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9403		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9404	} else {
9405		tw32(HOSTCC_TXCOL_TICKS, 0);
9406		tw32(HOSTCC_TXMAX_FRAMES, 0);
9407		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9408
9409		for (; i < tp->txq_cnt; i++) {
9410			u32 reg;
9411
9412			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9413			tw32(reg, ec->tx_coalesce_usecs);
9414			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9415			tw32(reg, ec->tx_max_coalesced_frames);
9416			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9417			tw32(reg, ec->tx_max_coalesced_frames_irq);
9418		}
9419	}
9420
9421	for (; i < tp->irq_max - 1; i++) {
9422		tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9423		tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9424		tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9425	}
9426}
9427
9428static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9429{
9430	int i = 0;
9431	u32 limit = tp->rxq_cnt;
9432
9433	if (!tg3_flag(tp, ENABLE_RSS)) {
9434		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9435		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9436		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9437		limit--;
9438	} else {
9439		tw32(HOSTCC_RXCOL_TICKS, 0);
9440		tw32(HOSTCC_RXMAX_FRAMES, 0);
9441		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9442	}
9443
9444	for (; i < limit; i++) {
9445		u32 reg;
9446
9447		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9448		tw32(reg, ec->rx_coalesce_usecs);
9449		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9450		tw32(reg, ec->rx_max_coalesced_frames);
9451		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9452		tw32(reg, ec->rx_max_coalesced_frames_irq);
9453	}
9454
9455	for (; i < tp->irq_max - 1; i++) {
9456		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9457		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9458		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9459	}
9460}
9461
9462static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9463{
9464	tg3_coal_tx_init(tp, ec);
9465	tg3_coal_rx_init(tp, ec);
9466
9467	if (!tg3_flag(tp, 5705_PLUS)) {
9468		u32 val = ec->stats_block_coalesce_usecs;
9469
9470		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9471		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9472
9473		if (!tp->link_up)
9474			val = 0;
9475
9476		tw32(HOSTCC_STAT_COAL_TICKS, val);
9477	}
9478}
9479
9480/* tp->lock is held. */
9481static void tg3_tx_rcbs_disable(struct tg3 *tp)
9482{
9483	u32 txrcb, limit;
9484
9485	/* Disable all transmit rings but the first. */
9486	if (!tg3_flag(tp, 5705_PLUS))
9487		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9488	else if (tg3_flag(tp, 5717_PLUS))
9489		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9490	else if (tg3_flag(tp, 57765_CLASS) ||
9491		 tg3_asic_rev(tp) == ASIC_REV_5762)
9492		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9493	else
9494		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9495
9496	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9497	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9498		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9499			      BDINFO_FLAGS_DISABLED);
9500}
9501
9502/* tp->lock is held. */
9503static void tg3_tx_rcbs_init(struct tg3 *tp)
9504{
9505	int i = 0;
9506	u32 txrcb = NIC_SRAM_SEND_RCB;
9507
9508	if (tg3_flag(tp, ENABLE_TSS))
9509		i++;
9510
9511	for (; i < tp->irq_max; i++, txrcb += TG3_BDINFO_SIZE) {
9512		struct tg3_napi *tnapi = &tp->napi[i];
9513
9514		if (!tnapi->tx_ring)
9515			continue;
9516
9517		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9518			       (TG3_TX_RING_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT),
9519			       NIC_SRAM_TX_BUFFER_DESC);
9520	}
9521}
9522
9523/* tp->lock is held. */
9524static void tg3_rx_ret_rcbs_disable(struct tg3 *tp)
9525{
9526	u32 rxrcb, limit;
9527
9528	/* Disable all receive return rings but the first. */
9529	if (tg3_flag(tp, 5717_PLUS))
9530		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9531	else if (!tg3_flag(tp, 5705_PLUS))
9532		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9533	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9534		 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9535		 tg3_flag(tp, 57765_CLASS))
9536		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9537	else
9538		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9539
9540	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9541	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9542		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9543			      BDINFO_FLAGS_DISABLED);
9544}
9545
9546/* tp->lock is held. */
9547static void tg3_rx_ret_rcbs_init(struct tg3 *tp)
9548{
9549	int i = 0;
9550	u32 rxrcb = NIC_SRAM_RCV_RET_RCB;
9551
9552	if (tg3_flag(tp, ENABLE_RSS))
9553		i++;
9554
9555	for (; i < tp->irq_max; i++, rxrcb += TG3_BDINFO_SIZE) {
9556		struct tg3_napi *tnapi = &tp->napi[i];
9557
9558		if (!tnapi->rx_rcb)
9559			continue;
9560
9561		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9562			       (tp->rx_ret_ring_mask + 1) <<
9563				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9564	}
9565}
9566
9567/* tp->lock is held. */
9568static void tg3_rings_reset(struct tg3 *tp)
9569{
9570	int i;
9571	u32 stblk;
9572	struct tg3_napi *tnapi = &tp->napi[0];
9573
9574	tg3_tx_rcbs_disable(tp);
9575
9576	tg3_rx_ret_rcbs_disable(tp);
9577
9578	/* Disable interrupts */
9579	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9580	tp->napi[0].chk_msi_cnt = 0;
9581	tp->napi[0].last_rx_cons = 0;
9582	tp->napi[0].last_tx_cons = 0;
9583
9584	/* Zero mailbox registers. */
9585	if (tg3_flag(tp, SUPPORT_MSIX)) {
9586		for (i = 1; i < tp->irq_max; i++) {
9587			tp->napi[i].tx_prod = 0;
9588			tp->napi[i].tx_cons = 0;
9589			if (tg3_flag(tp, ENABLE_TSS))
9590				tw32_mailbox(tp->napi[i].prodmbox, 0);
9591			tw32_rx_mbox(tp->napi[i].consmbox, 0);
9592			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9593			tp->napi[i].chk_msi_cnt = 0;
9594			tp->napi[i].last_rx_cons = 0;
9595			tp->napi[i].last_tx_cons = 0;
9596		}
9597		if (!tg3_flag(tp, ENABLE_TSS))
9598			tw32_mailbox(tp->napi[0].prodmbox, 0);
9599	} else {
9600		tp->napi[0].tx_prod = 0;
9601		tp->napi[0].tx_cons = 0;
9602		tw32_mailbox(tp->napi[0].prodmbox, 0);
9603		tw32_rx_mbox(tp->napi[0].consmbox, 0);
9604	}
9605
9606	/* Make sure the NIC-based send BD rings are disabled. */
9607	if (!tg3_flag(tp, 5705_PLUS)) {
9608		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9609		for (i = 0; i < 16; i++)
9610			tw32_tx_mbox(mbox + i * 8, 0);
9611	}
9612
9613	/* Clear status block in ram. */
9614	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9615
9616	/* Set status block DMA address */
9617	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9618	     ((u64) tnapi->status_mapping >> 32));
9619	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9620	     ((u64) tnapi->status_mapping & 0xffffffff));
9621
9622	stblk = HOSTCC_STATBLCK_RING1;
9623
9624	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9625		u64 mapping = (u64)tnapi->status_mapping;
9626		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9627		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9628		stblk += 8;
9629
9630		/* Clear status block in ram. */
9631		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9632	}
9633
9634	tg3_tx_rcbs_init(tp);
9635	tg3_rx_ret_rcbs_init(tp);
9636}
9637
9638static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9639{
9640	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9641
9642	if (!tg3_flag(tp, 5750_PLUS) ||
9643	    tg3_flag(tp, 5780_CLASS) ||
9644	    tg3_asic_rev(tp) == ASIC_REV_5750 ||
9645	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
9646	    tg3_flag(tp, 57765_PLUS))
9647		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9648	else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9649		 tg3_asic_rev(tp) == ASIC_REV_5787)
9650		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9651	else
9652		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9653
9654	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9655	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9656
9657	val = min(nic_rep_thresh, host_rep_thresh);
9658	tw32(RCVBDI_STD_THRESH, val);
9659
9660	if (tg3_flag(tp, 57765_PLUS))
9661		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9662
9663	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9664		return;
9665
9666	bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9667
9668	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9669
9670	val = min(bdcache_maxcnt / 2, host_rep_thresh);
9671	tw32(RCVBDI_JUMBO_THRESH, val);
9672
9673	if (tg3_flag(tp, 57765_PLUS))
9674		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9675}
9676
9677static inline u32 calc_crc(unsigned char *buf, int len)
9678{
9679	u32 reg;
9680	u32 tmp;
9681	int j, k;
9682
9683	reg = 0xffffffff;
9684
9685	for (j = 0; j < len; j++) {
9686		reg ^= buf[j];
9687
9688		for (k = 0; k < 8; k++) {
9689			tmp = reg & 0x01;
9690
9691			reg >>= 1;
9692
9693			if (tmp)
9694				reg ^= 0xedb88320;
9695		}
9696	}
9697
9698	return ~reg;
9699}
9700
9701static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9702{
9703	/* accept or reject all multicast frames */
9704	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9705	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9706	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9707	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9708}
9709
9710static void __tg3_set_rx_mode(struct net_device *dev)
9711{
9712	struct tg3 *tp = netdev_priv(dev);
9713	u32 rx_mode;
9714
9715	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9716				  RX_MODE_KEEP_VLAN_TAG);
9717
9718#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9719	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9720	 * flag clear.
9721	 */
9722	if (!tg3_flag(tp, ENABLE_ASF))
9723		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9724#endif
9725
9726	if (dev->flags & IFF_PROMISC) {
9727		/* Promiscuous mode. */
9728		rx_mode |= RX_MODE_PROMISC;
9729	} else if (dev->flags & IFF_ALLMULTI) {
9730		/* Accept all multicast. */
9731		tg3_set_multi(tp, 1);
9732	} else if (netdev_mc_empty(dev)) {
9733		/* Reject all multicast. */
9734		tg3_set_multi(tp, 0);
9735	} else {
9736		/* Accept one or more multicast(s). */
9737		struct netdev_hw_addr *ha;
9738		u32 mc_filter[4] = { 0, };
9739		u32 regidx;
9740		u32 bit;
9741		u32 crc;
9742
9743		netdev_for_each_mc_addr(ha, dev) {
9744			crc = calc_crc(ha->addr, ETH_ALEN);
9745			bit = ~crc & 0x7f;
9746			regidx = (bit & 0x60) >> 5;
9747			bit &= 0x1f;
9748			mc_filter[regidx] |= (1 << bit);
9749		}
9750
9751		tw32(MAC_HASH_REG_0, mc_filter[0]);
9752		tw32(MAC_HASH_REG_1, mc_filter[1]);
9753		tw32(MAC_HASH_REG_2, mc_filter[2]);
9754		tw32(MAC_HASH_REG_3, mc_filter[3]);
9755	}
9756
9757	if (netdev_uc_count(dev) > TG3_MAX_UCAST_ADDR(tp)) {
9758		rx_mode |= RX_MODE_PROMISC;
9759	} else if (!(dev->flags & IFF_PROMISC)) {
9760		/* Add all entries into to the mac addr filter list */
9761		int i = 0;
9762		struct netdev_hw_addr *ha;
9763
9764		netdev_for_each_uc_addr(ha, dev) {
9765			__tg3_set_one_mac_addr(tp, ha->addr,
9766					       i + TG3_UCAST_ADDR_IDX(tp));
9767			i++;
9768		}
9769	}
9770
9771	if (rx_mode != tp->rx_mode) {
9772		tp->rx_mode = rx_mode;
9773		tw32_f(MAC_RX_MODE, rx_mode);
9774		udelay(10);
9775	}
9776}
9777
9778static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9779{
9780	int i;
9781
9782	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9783		tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9784}
9785
9786static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9787{
9788	int i;
9789
9790	if (!tg3_flag(tp, SUPPORT_MSIX))
9791		return;
9792
9793	if (tp->rxq_cnt == 1) {
9794		memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9795		return;
9796	}
9797
9798	/* Validate table against current IRQ count */
9799	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9800		if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9801			break;
9802	}
9803
9804	if (i != TG3_RSS_INDIR_TBL_SIZE)
9805		tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9806}
9807
9808static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9809{
9810	int i = 0;
9811	u32 reg = MAC_RSS_INDIR_TBL_0;
9812
9813	while (i < TG3_RSS_INDIR_TBL_SIZE) {
9814		u32 val = tp->rss_ind_tbl[i];
9815		i++;
9816		for (; i % 8; i++) {
9817			val <<= 4;
9818			val |= tp->rss_ind_tbl[i];
9819		}
9820		tw32(reg, val);
9821		reg += 4;
9822	}
9823}
9824
9825static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9826{
9827	if (tg3_asic_rev(tp) == ASIC_REV_5719)
9828		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9829	else
9830		return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9831}
9832
9833/* tp->lock is held. */
9834static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9835{
9836	u32 val, rdmac_mode;
9837	int i, err, limit;
9838	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9839
9840	tg3_disable_ints(tp);
9841
9842	tg3_stop_fw(tp);
9843
9844	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9845
9846	if (tg3_flag(tp, INIT_COMPLETE))
9847		tg3_abort_hw(tp, 1);
9848
9849	if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9850	    !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9851		tg3_phy_pull_config(tp);
9852		tg3_eee_pull_config(tp, NULL);
9853		tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9854	}
9855
9856	/* Enable MAC control of LPI */
9857	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
9858		tg3_setup_eee(tp);
9859
9860	if (reset_phy)
9861		tg3_phy_reset(tp);
9862
9863	err = tg3_chip_reset(tp);
9864	if (err)
9865		return err;
9866
9867	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9868
9869	if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9870		val = tr32(TG3_CPMU_CTRL);
9871		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9872		tw32(TG3_CPMU_CTRL, val);
9873
9874		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9875		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9876		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9877		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9878
9879		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9880		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9881		val |= CPMU_LNK_AWARE_MACCLK_6_25;
9882		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9883
9884		val = tr32(TG3_CPMU_HST_ACC);
9885		val &= ~CPMU_HST_ACC_MACCLK_MASK;
9886		val |= CPMU_HST_ACC_MACCLK_6_25;
9887		tw32(TG3_CPMU_HST_ACC, val);
9888	}
9889
9890	if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9891		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9892		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9893		       PCIE_PWR_MGMT_L1_THRESH_4MS;
9894		tw32(PCIE_PWR_MGMT_THRESH, val);
9895
9896		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9897		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9898
9899		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9900
9901		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9902		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9903	}
9904
9905	if (tg3_flag(tp, L1PLLPD_EN)) {
9906		u32 grc_mode = tr32(GRC_MODE);
9907
9908		/* Access the lower 1K of PL PCIE block registers. */
9909		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9910		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9911
9912		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9913		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9914		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9915
9916		tw32(GRC_MODE, grc_mode);
9917	}
9918
9919	if (tg3_flag(tp, 57765_CLASS)) {
9920		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9921			u32 grc_mode = tr32(GRC_MODE);
9922
9923			/* Access the lower 1K of PL PCIE block registers. */
9924			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9925			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9926
9927			val = tr32(TG3_PCIE_TLDLPL_PORT +
9928				   TG3_PCIE_PL_LO_PHYCTL5);
9929			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9930			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9931
9932			tw32(GRC_MODE, grc_mode);
9933		}
9934
9935		if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9936			u32 grc_mode;
9937
9938			/* Fix transmit hangs */
9939			val = tr32(TG3_CPMU_PADRNG_CTL);
9940			val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9941			tw32(TG3_CPMU_PADRNG_CTL, val);
9942
9943			grc_mode = tr32(GRC_MODE);
9944
9945			/* Access the lower 1K of DL PCIE block registers. */
9946			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9947			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9948
9949			val = tr32(TG3_PCIE_TLDLPL_PORT +
9950				   TG3_PCIE_DL_LO_FTSMAX);
9951			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9952			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9953			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9954
9955			tw32(GRC_MODE, grc_mode);
9956		}
9957
9958		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9959		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9960		val |= CPMU_LSPD_10MB_MACCLK_6_25;
9961		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9962	}
9963
9964	/* This works around an issue with Athlon chipsets on
9965	 * B3 tigon3 silicon.  This bit has no effect on any
9966	 * other revision.  But do not set this on PCI Express
9967	 * chips and don't even touch the clocks if the CPMU is present.
9968	 */
9969	if (!tg3_flag(tp, CPMU_PRESENT)) {
9970		if (!tg3_flag(tp, PCI_EXPRESS))
9971			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9972		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9973	}
9974
9975	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9976	    tg3_flag(tp, PCIX_MODE)) {
9977		val = tr32(TG3PCI_PCISTATE);
9978		val |= PCISTATE_RETRY_SAME_DMA;
9979		tw32(TG3PCI_PCISTATE, val);
9980	}
9981
9982	if (tg3_flag(tp, ENABLE_APE)) {
9983		/* Allow reads and writes to the
9984		 * APE register and memory space.
9985		 */
9986		val = tr32(TG3PCI_PCISTATE);
9987		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9988		       PCISTATE_ALLOW_APE_SHMEM_WR |
9989		       PCISTATE_ALLOW_APE_PSPACE_WR;
9990		tw32(TG3PCI_PCISTATE, val);
9991	}
9992
9993	if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9994		/* Enable some hw fixes.  */
9995		val = tr32(TG3PCI_MSI_DATA);
9996		val |= (1 << 26) | (1 << 28) | (1 << 29);
9997		tw32(TG3PCI_MSI_DATA, val);
9998	}
9999
10000	/* Descriptor ring init may make accesses to the
10001	 * NIC SRAM area to setup the TX descriptors, so we
10002	 * can only do this after the hardware has been
10003	 * successfully reset.
10004	 */
10005	err = tg3_init_rings(tp);
10006	if (err)
10007		return err;
10008
10009	if (tg3_flag(tp, 57765_PLUS)) {
10010		val = tr32(TG3PCI_DMA_RW_CTRL) &
10011		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
10012		if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
10013			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
10014		if (!tg3_flag(tp, 57765_CLASS) &&
10015		    tg3_asic_rev(tp) != ASIC_REV_5717 &&
10016		    tg3_asic_rev(tp) != ASIC_REV_5762)
10017			val |= DMA_RWCTRL_TAGGED_STAT_WA;
10018		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
10019	} else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
10020		   tg3_asic_rev(tp) != ASIC_REV_5761) {
10021		/* This value is determined during the probe time DMA
10022		 * engine test, tg3_test_dma.
10023		 */
10024		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
10025	}
10026
10027	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
10028			  GRC_MODE_4X_NIC_SEND_RINGS |
10029			  GRC_MODE_NO_TX_PHDR_CSUM |
10030			  GRC_MODE_NO_RX_PHDR_CSUM);
10031	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
10032
10033	/* Pseudo-header checksum is done by hardware logic and not
10034	 * the offload processers, so make the chip do the pseudo-
10035	 * header checksums on receive.  For transmit it is more
10036	 * convenient to do the pseudo-header checksum in software
10037	 * as Linux does that on transmit for us in all cases.
10038	 */
10039	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
10040
10041	val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
10042	if (tp->rxptpctl)
10043		tw32(TG3_RX_PTP_CTL,
10044		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
10045
10046	if (tg3_flag(tp, PTP_CAPABLE))
10047		val |= GRC_MODE_TIME_SYNC_ENABLE;
10048
10049	tw32(GRC_MODE, tp->grc_mode | val);
10050
10051	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
10052	val = tr32(GRC_MISC_CFG);
10053	val &= ~0xff;
10054	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
10055	tw32(GRC_MISC_CFG, val);
10056
10057	/* Initialize MBUF/DESC pool. */
10058	if (tg3_flag(tp, 5750_PLUS)) {
10059		/* Do nothing.  */
10060	} else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
10061		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
10062		if (tg3_asic_rev(tp) == ASIC_REV_5704)
10063			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
10064		else
10065			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
10066		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
10067		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
10068	} else if (tg3_flag(tp, TSO_CAPABLE)) {
10069		int fw_len;
10070
10071		fw_len = tp->fw_len;
10072		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
10073		tw32(BUFMGR_MB_POOL_ADDR,
10074		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
10075		tw32(BUFMGR_MB_POOL_SIZE,
10076		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
10077	}
10078
10079	if (tp->dev->mtu <= ETH_DATA_LEN) {
10080		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10081		     tp->bufmgr_config.mbuf_read_dma_low_water);
10082		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10083		     tp->bufmgr_config.mbuf_mac_rx_low_water);
10084		tw32(BUFMGR_MB_HIGH_WATER,
10085		     tp->bufmgr_config.mbuf_high_water);
10086	} else {
10087		tw32(BUFMGR_MB_RDMA_LOW_WATER,
10088		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
10089		tw32(BUFMGR_MB_MACRX_LOW_WATER,
10090		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
10091		tw32(BUFMGR_MB_HIGH_WATER,
10092		     tp->bufmgr_config.mbuf_high_water_jumbo);
10093	}
10094	tw32(BUFMGR_DMA_LOW_WATER,
10095	     tp->bufmgr_config.dma_low_water);
10096	tw32(BUFMGR_DMA_HIGH_WATER,
10097	     tp->bufmgr_config.dma_high_water);
10098
10099	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
10100	if (tg3_asic_rev(tp) == ASIC_REV_5719)
10101		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
10102	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10103	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
10104	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10105	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
10106		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
10107	tw32(BUFMGR_MODE, val);
10108	for (i = 0; i < 2000; i++) {
10109		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
10110			break;
10111		udelay(10);
10112	}
10113	if (i >= 2000) {
10114		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
10115		return -ENODEV;
10116	}
10117
10118	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
10119		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
10120
10121	tg3_setup_rxbd_thresholds(tp);
10122
10123	/* Initialize TG3_BDINFO's at:
10124	 *  RCVDBDI_STD_BD:	standard eth size rx ring
10125	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
10126	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
10127	 *
10128	 * like so:
10129	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
10130	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
10131	 *                              ring attribute flags
10132	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
10133	 *
10134	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
10135	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
10136	 *
10137	 * The size of each ring is fixed in the firmware, but the location is
10138	 * configurable.
10139	 */
10140	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10141	     ((u64) tpr->rx_std_mapping >> 32));
10142	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10143	     ((u64) tpr->rx_std_mapping & 0xffffffff));
10144	if (!tg3_flag(tp, 5717_PLUS))
10145		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
10146		     NIC_SRAM_RX_BUFFER_DESC);
10147
10148	/* Disable the mini ring */
10149	if (!tg3_flag(tp, 5705_PLUS))
10150		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
10151		     BDINFO_FLAGS_DISABLED);
10152
10153	/* Program the jumbo buffer descriptor ring control
10154	 * blocks on those devices that have them.
10155	 */
10156	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10157	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
10158
10159		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
10160			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
10161			     ((u64) tpr->rx_jmb_mapping >> 32));
10162			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
10163			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
10164			val = TG3_RX_JMB_RING_SIZE(tp) <<
10165			      BDINFO_FLAGS_MAXLEN_SHIFT;
10166			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10167			     val | BDINFO_FLAGS_USE_EXT_RECV);
10168			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
10169			    tg3_flag(tp, 57765_CLASS) ||
10170			    tg3_asic_rev(tp) == ASIC_REV_5762)
10171				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
10172				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
10173		} else {
10174			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
10175			     BDINFO_FLAGS_DISABLED);
10176		}
10177
10178		if (tg3_flag(tp, 57765_PLUS)) {
10179			val = TG3_RX_STD_RING_SIZE(tp);
10180			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
10181			val |= (TG3_RX_STD_DMA_SZ << 2);
10182		} else
10183			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
10184	} else
10185		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
10186
10187	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
10188
10189	tpr->rx_std_prod_idx = tp->rx_pending;
10190	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
10191
10192	tpr->rx_jmb_prod_idx =
10193		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
10194	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
10195
10196	tg3_rings_reset(tp);
10197
10198	/* Initialize MAC address and backoff seed. */
10199	__tg3_set_mac_addr(tp, false);
10200
10201	/* MTU + ethernet header + FCS + optional VLAN tag */
10202	tw32(MAC_RX_MTU_SIZE,
10203	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
10204
10205	/* The slot time is changed by tg3_setup_phy if we
10206	 * run at gigabit with half duplex.
10207	 */
10208	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
10209	      (6 << TX_LENGTHS_IPG_SHIFT) |
10210	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
10211
10212	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10213	    tg3_asic_rev(tp) == ASIC_REV_5762)
10214		val |= tr32(MAC_TX_LENGTHS) &
10215		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
10216			TX_LENGTHS_CNT_DWN_VAL_MSK);
10217
10218	tw32(MAC_TX_LENGTHS, val);
10219
10220	/* Receive rules. */
10221	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
10222	tw32(RCVLPC_CONFIG, 0x0181);
10223
10224	/* Calculate RDMAC_MODE setting early, we need it to determine
10225	 * the RCVLPC_STATE_ENABLE mask.
10226	 */
10227	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
10228		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
10229		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
10230		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
10231		      RDMAC_MODE_LNGREAD_ENAB);
10232
10233	if (tg3_asic_rev(tp) == ASIC_REV_5717)
10234		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
10235
10236	if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
10237	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10238	    tg3_asic_rev(tp) == ASIC_REV_57780)
10239		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
10240			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
10241			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
10242
10243	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10244	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10245		if (tg3_flag(tp, TSO_CAPABLE) &&
10246		    tg3_asic_rev(tp) == ASIC_REV_5705) {
10247			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
10248		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10249			   !tg3_flag(tp, IS_5788)) {
10250			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10251		}
10252	}
10253
10254	if (tg3_flag(tp, PCI_EXPRESS))
10255		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10256
10257	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10258		tp->dma_limit = 0;
10259		if (tp->dev->mtu <= ETH_DATA_LEN) {
10260			rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10261			tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10262		}
10263	}
10264
10265	if (tg3_flag(tp, HW_TSO_1) ||
10266	    tg3_flag(tp, HW_TSO_2) ||
10267	    tg3_flag(tp, HW_TSO_3))
10268		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10269
10270	if (tg3_flag(tp, 57765_PLUS) ||
10271	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10272	    tg3_asic_rev(tp) == ASIC_REV_57780)
10273		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10274
10275	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10276	    tg3_asic_rev(tp) == ASIC_REV_5762)
10277		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10278
10279	if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10280	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
10281	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
10282	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
10283	    tg3_flag(tp, 57765_PLUS)) {
10284		u32 tgtreg;
10285
10286		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10287			tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10288		else
10289			tgtreg = TG3_RDMA_RSRVCTRL_REG;
10290
10291		val = tr32(tgtreg);
10292		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10293		    tg3_asic_rev(tp) == ASIC_REV_5762) {
10294			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10295				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10296				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10297			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10298			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10299			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10300		}
10301		tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10302	}
10303
10304	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10305	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
10306	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10307		u32 tgtreg;
10308
10309		if (tg3_asic_rev(tp) == ASIC_REV_5762)
10310			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10311		else
10312			tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10313
10314		val = tr32(tgtreg);
10315		tw32(tgtreg, val |
10316		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10317		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10318	}
10319
10320	/* Receive/send statistics. */
10321	if (tg3_flag(tp, 5750_PLUS)) {
10322		val = tr32(RCVLPC_STATS_ENABLE);
10323		val &= ~RCVLPC_STATSENAB_DACK_FIX;
10324		tw32(RCVLPC_STATS_ENABLE, val);
10325	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10326		   tg3_flag(tp, TSO_CAPABLE)) {
10327		val = tr32(RCVLPC_STATS_ENABLE);
10328		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10329		tw32(RCVLPC_STATS_ENABLE, val);
10330	} else {
10331		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10332	}
10333	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10334	tw32(SNDDATAI_STATSENAB, 0xffffff);
10335	tw32(SNDDATAI_STATSCTRL,
10336	     (SNDDATAI_SCTRL_ENABLE |
10337	      SNDDATAI_SCTRL_FASTUPD));
10338
10339	/* Setup host coalescing engine. */
10340	tw32(HOSTCC_MODE, 0);
10341	for (i = 0; i < 2000; i++) {
10342		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10343			break;
10344		udelay(10);
10345	}
10346
10347	__tg3_set_coalesce(tp, &tp->coal);
10348
10349	if (!tg3_flag(tp, 5705_PLUS)) {
10350		/* Status/statistics block address.  See tg3_timer,
10351		 * the tg3_periodic_fetch_stats call there, and
10352		 * tg3_get_stats to see how this works for 5705/5750 chips.
10353		 */
10354		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10355		     ((u64) tp->stats_mapping >> 32));
10356		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10357		     ((u64) tp->stats_mapping & 0xffffffff));
10358		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10359
10360		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10361
10362		/* Clear statistics and status block memory areas */
10363		for (i = NIC_SRAM_STATS_BLK;
10364		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10365		     i += sizeof(u32)) {
10366			tg3_write_mem(tp, i, 0);
10367			udelay(40);
10368		}
10369	}
10370
10371	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10372
10373	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10374	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10375	if (!tg3_flag(tp, 5705_PLUS))
10376		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10377
10378	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10379		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10380		/* reset to prevent losing 1st rx packet intermittently */
10381		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10382		udelay(10);
10383	}
10384
10385	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10386			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10387			MAC_MODE_FHDE_ENABLE;
10388	if (tg3_flag(tp, ENABLE_APE))
10389		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10390	if (!tg3_flag(tp, 5705_PLUS) &&
10391	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10392	    tg3_asic_rev(tp) != ASIC_REV_5700)
10393		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10394	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10395	udelay(40);
10396
10397	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10398	 * If TG3_FLAG_IS_NIC is zero, we should read the
10399	 * register to preserve the GPIO settings for LOMs. The GPIOs,
10400	 * whether used as inputs or outputs, are set by boot code after
10401	 * reset.
10402	 */
10403	if (!tg3_flag(tp, IS_NIC)) {
10404		u32 gpio_mask;
10405
10406		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10407			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10408			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10409
10410		if (tg3_asic_rev(tp) == ASIC_REV_5752)
10411			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10412				     GRC_LCLCTRL_GPIO_OUTPUT3;
10413
10414		if (tg3_asic_rev(tp) == ASIC_REV_5755)
10415			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10416
10417		tp->grc_local_ctrl &= ~gpio_mask;
10418		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10419
10420		/* GPIO1 must be driven high for eeprom write protect */
10421		if (tg3_flag(tp, EEPROM_WRITE_PROT))
10422			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10423					       GRC_LCLCTRL_GPIO_OUTPUT1);
10424	}
10425	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10426	udelay(100);
10427
10428	if (tg3_flag(tp, USING_MSIX)) {
10429		val = tr32(MSGINT_MODE);
10430		val |= MSGINT_MODE_ENABLE;
10431		if (tp->irq_cnt > 1)
10432			val |= MSGINT_MODE_MULTIVEC_EN;
10433		if (!tg3_flag(tp, 1SHOT_MSI))
10434			val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10435		tw32(MSGINT_MODE, val);
10436	}
10437
10438	if (!tg3_flag(tp, 5705_PLUS)) {
10439		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10440		udelay(40);
10441	}
10442
10443	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10444	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10445	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10446	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10447	       WDMAC_MODE_LNGREAD_ENAB);
10448
10449	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10450	    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10451		if (tg3_flag(tp, TSO_CAPABLE) &&
10452		    (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10453		     tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10454			/* nothing */
10455		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10456			   !tg3_flag(tp, IS_5788)) {
10457			val |= WDMAC_MODE_RX_ACCEL;
10458		}
10459	}
10460
10461	/* Enable host coalescing bug fix */
10462	if (tg3_flag(tp, 5755_PLUS))
10463		val |= WDMAC_MODE_STATUS_TAG_FIX;
10464
10465	if (tg3_asic_rev(tp) == ASIC_REV_5785)
10466		val |= WDMAC_MODE_BURST_ALL_DATA;
10467
10468	tw32_f(WDMAC_MODE, val);
10469	udelay(40);
10470
10471	if (tg3_flag(tp, PCIX_MODE)) {
10472		u16 pcix_cmd;
10473
10474		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10475				     &pcix_cmd);
10476		if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10477			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10478			pcix_cmd |= PCI_X_CMD_READ_2K;
10479		} else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10480			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10481			pcix_cmd |= PCI_X_CMD_READ_2K;
10482		}
10483		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10484				      pcix_cmd);
10485	}
10486
10487	tw32_f(RDMAC_MODE, rdmac_mode);
10488	udelay(40);
10489
10490	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10491	    tg3_asic_rev(tp) == ASIC_REV_5720) {
10492		for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10493			if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10494				break;
10495		}
10496		if (i < TG3_NUM_RDMA_CHANNELS) {
10497			val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10498			val |= tg3_lso_rd_dma_workaround_bit(tp);
10499			tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10500			tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10501		}
10502	}
10503
10504	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10505	if (!tg3_flag(tp, 5705_PLUS))
10506		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10507
10508	if (tg3_asic_rev(tp) == ASIC_REV_5761)
10509		tw32(SNDDATAC_MODE,
10510		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10511	else
10512		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10513
10514	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10515	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10516	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10517	if (tg3_flag(tp, LRG_PROD_RING_CAP))
10518		val |= RCVDBDI_MODE_LRG_RING_SZ;
10519	tw32(RCVDBDI_MODE, val);
10520	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10521	if (tg3_flag(tp, HW_TSO_1) ||
10522	    tg3_flag(tp, HW_TSO_2) ||
10523	    tg3_flag(tp, HW_TSO_3))
10524		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10525	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10526	if (tg3_flag(tp, ENABLE_TSS))
10527		val |= SNDBDI_MODE_MULTI_TXQ_EN;
10528	tw32(SNDBDI_MODE, val);
10529	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10530
10531	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10532		err = tg3_load_5701_a0_firmware_fix(tp);
10533		if (err)
10534			return err;
10535	}
10536
10537	if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10538		/* Ignore any errors for the firmware download. If download
10539		 * fails, the device will operate with EEE disabled
10540		 */
10541		tg3_load_57766_firmware(tp);
10542	}
10543
10544	if (tg3_flag(tp, TSO_CAPABLE)) {
10545		err = tg3_load_tso_firmware(tp);
10546		if (err)
10547			return err;
10548	}
10549
10550	tp->tx_mode = TX_MODE_ENABLE;
10551
10552	if (tg3_flag(tp, 5755_PLUS) ||
10553	    tg3_asic_rev(tp) == ASIC_REV_5906)
10554		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10555
10556	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10557	    tg3_asic_rev(tp) == ASIC_REV_5762) {
10558		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10559		tp->tx_mode &= ~val;
10560		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10561	}
10562
10563	tw32_f(MAC_TX_MODE, tp->tx_mode);
10564	udelay(100);
10565
10566	if (tg3_flag(tp, ENABLE_RSS)) {
10567		u32 rss_key[10];
10568
10569		tg3_rss_write_indir_tbl(tp);
10570
10571		netdev_rss_key_fill(rss_key, 10 * sizeof(u32));
10572
10573		for (i = 0; i < 10 ; i++)
10574			tw32(MAC_RSS_HASH_KEY_0 + i*4, rss_key[i]);
10575	}
10576
10577	tp->rx_mode = RX_MODE_ENABLE;
10578	if (tg3_flag(tp, 5755_PLUS))
10579		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10580
10581	if (tg3_asic_rev(tp) == ASIC_REV_5762)
10582		tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
10583
10584	if (tg3_flag(tp, ENABLE_RSS))
10585		tp->rx_mode |= RX_MODE_RSS_ENABLE |
10586			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
10587			       RX_MODE_RSS_IPV6_HASH_EN |
10588			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
10589			       RX_MODE_RSS_IPV4_HASH_EN |
10590			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
10591
10592	tw32_f(MAC_RX_MODE, tp->rx_mode);
10593	udelay(10);
10594
10595	tw32(MAC_LED_CTRL, tp->led_ctrl);
10596
10597	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10598	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10599		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10600		udelay(10);
10601	}
10602	tw32_f(MAC_RX_MODE, tp->rx_mode);
10603	udelay(10);
10604
10605	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10606		if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10607		    !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10608			/* Set drive transmission level to 1.2V  */
10609			/* only if the signal pre-emphasis bit is not set  */
10610			val = tr32(MAC_SERDES_CFG);
10611			val &= 0xfffff000;
10612			val |= 0x880;
10613			tw32(MAC_SERDES_CFG, val);
10614		}
10615		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10616			tw32(MAC_SERDES_CFG, 0x616000);
10617	}
10618
10619	/* Prevent chip from dropping frames when flow control
10620	 * is enabled.
10621	 */
10622	if (tg3_flag(tp, 57765_CLASS))
10623		val = 1;
10624	else
10625		val = 2;
10626	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10627
10628	if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10629	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10630		/* Use hardware link auto-negotiation */
10631		tg3_flag_set(tp, HW_AUTONEG);
10632	}
10633
10634	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10635	    tg3_asic_rev(tp) == ASIC_REV_5714) {
10636		u32 tmp;
10637
10638		tmp = tr32(SERDES_RX_CTRL);
10639		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10640		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10641		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10642		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10643	}
10644
10645	if (!tg3_flag(tp, USE_PHYLIB)) {
10646		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10647			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10648
10649		err = tg3_setup_phy(tp, false);
10650		if (err)
10651			return err;
10652
10653		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10654		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10655			u32 tmp;
10656
10657			/* Clear CRC stats. */
10658			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10659				tg3_writephy(tp, MII_TG3_TEST1,
10660					     tmp | MII_TG3_TEST1_CRC_EN);
10661				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10662			}
10663		}
10664	}
10665
10666	__tg3_set_rx_mode(tp->dev);
10667
10668	/* Initialize receive rules. */
10669	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10670	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10671	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10672	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10673
10674	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10675		limit = 8;
10676	else
10677		limit = 16;
10678	if (tg3_flag(tp, ENABLE_ASF))
10679		limit -= 4;
10680	switch (limit) {
10681	case 16:
10682		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10683	case 15:
10684		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10685	case 14:
10686		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10687	case 13:
10688		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10689	case 12:
10690		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10691	case 11:
10692		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10693	case 10:
10694		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10695	case 9:
10696		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10697	case 8:
10698		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10699	case 7:
10700		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10701	case 6:
10702		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10703	case 5:
10704		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10705	case 4:
10706		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10707	case 3:
10708		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10709	case 2:
10710	case 1:
10711
10712	default:
10713		break;
10714	}
10715
10716	if (tg3_flag(tp, ENABLE_APE))
10717		/* Write our heartbeat update interval to APE. */
10718		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10719				APE_HOST_HEARTBEAT_INT_DISABLE);
10720
10721	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10722
10723	return 0;
10724}
10725
10726/* Called at device open time to get the chip ready for
10727 * packet processing.  Invoked with tp->lock held.
10728 */
10729static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10730{
10731	/* Chip may have been just powered on. If so, the boot code may still
10732	 * be running initialization. Wait for it to finish to avoid races in
10733	 * accessing the hardware.
10734	 */
10735	tg3_enable_register_access(tp);
10736	tg3_poll_fw(tp);
10737
10738	tg3_switch_clocks(tp);
10739
10740	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10741
10742	return tg3_reset_hw(tp, reset_phy);
10743}
10744
10745static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10746{
10747	int i;
10748
10749	for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10750		u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10751
10752		tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10753		off += len;
10754
10755		if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10756		    !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10757			memset(ocir, 0, TG3_OCIR_LEN);
10758	}
10759}
10760
10761/* sysfs attributes for hwmon */
10762static ssize_t tg3_show_temp(struct device *dev,
10763			     struct device_attribute *devattr, char *buf)
10764{
10765	struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10766	struct tg3 *tp = dev_get_drvdata(dev);
10767	u32 temperature;
10768
10769	spin_lock_bh(&tp->lock);
10770	tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10771				sizeof(temperature));
10772	spin_unlock_bh(&tp->lock);
10773	return sprintf(buf, "%u\n", temperature * 1000);
10774}
10775
10776
10777static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10778			  TG3_TEMP_SENSOR_OFFSET);
10779static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10780			  TG3_TEMP_CAUTION_OFFSET);
10781static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10782			  TG3_TEMP_MAX_OFFSET);
10783
10784static struct attribute *tg3_attrs[] = {
10785	&sensor_dev_attr_temp1_input.dev_attr.attr,
10786	&sensor_dev_attr_temp1_crit.dev_attr.attr,
10787	&sensor_dev_attr_temp1_max.dev_attr.attr,
10788	NULL
10789};
10790ATTRIBUTE_GROUPS(tg3);
10791
10792static void tg3_hwmon_close(struct tg3 *tp)
10793{
10794	if (tp->hwmon_dev) {
10795		hwmon_device_unregister(tp->hwmon_dev);
10796		tp->hwmon_dev = NULL;
10797	}
10798}
10799
10800static void tg3_hwmon_open(struct tg3 *tp)
10801{
10802	int i;
10803	u32 size = 0;
10804	struct pci_dev *pdev = tp->pdev;
10805	struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10806
10807	tg3_sd_scan_scratchpad(tp, ocirs);
10808
10809	for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10810		if (!ocirs[i].src_data_length)
10811			continue;
10812
10813		size += ocirs[i].src_hdr_length;
10814		size += ocirs[i].src_data_length;
10815	}
10816
10817	if (!size)
10818		return;
10819
10820	tp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, "tg3",
10821							  tp, tg3_groups);
10822	if (IS_ERR(tp->hwmon_dev)) {
10823		tp->hwmon_dev = NULL;
10824		dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10825	}
10826}
10827
10828
10829#define TG3_STAT_ADD32(PSTAT, REG) \
10830do {	u32 __val = tr32(REG); \
10831	(PSTAT)->low += __val; \
10832	if ((PSTAT)->low < __val) \
10833		(PSTAT)->high += 1; \
10834} while (0)
10835
10836static void tg3_periodic_fetch_stats(struct tg3 *tp)
10837{
10838	struct tg3_hw_stats *sp = tp->hw_stats;
10839
10840	if (!tp->link_up)
10841		return;
10842
10843	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10844	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10845	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10846	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10847	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10848	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10849	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10850	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10851	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10852	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10853	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10854	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10855	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10856	if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10857		     (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10858		      sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10859		u32 val;
10860
10861		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10862		val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10863		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10864		tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10865	}
10866
10867	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10868	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10869	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10870	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10871	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10872	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10873	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10874	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10875	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10876	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10877	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10878	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10879	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10880	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10881
10882	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10883	if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10884	    tg3_asic_rev(tp) != ASIC_REV_5762 &&
10885	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10886	    tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10887		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10888	} else {
10889		u32 val = tr32(HOSTCC_FLOW_ATTN);
10890		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10891		if (val) {
10892			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10893			sp->rx_discards.low += val;
10894			if (sp->rx_discards.low < val)
10895				sp->rx_discards.high += 1;
10896		}
10897		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10898	}
10899	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10900}
10901
10902static void tg3_chk_missed_msi(struct tg3 *tp)
10903{
10904	u32 i;
10905
10906	for (i = 0; i < tp->irq_cnt; i++) {
10907		struct tg3_napi *tnapi = &tp->napi[i];
10908
10909		if (tg3_has_work(tnapi)) {
10910			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10911			    tnapi->last_tx_cons == tnapi->tx_cons) {
10912				if (tnapi->chk_msi_cnt < 1) {
10913					tnapi->chk_msi_cnt++;
10914					return;
10915				}
10916				tg3_msi(0, tnapi);
10917			}
10918		}
10919		tnapi->chk_msi_cnt = 0;
10920		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10921		tnapi->last_tx_cons = tnapi->tx_cons;
10922	}
10923}
10924
10925static void tg3_timer(unsigned long __opaque)
10926{
10927	struct tg3 *tp = (struct tg3 *) __opaque;
10928
10929	spin_lock(&tp->lock);
10930
10931	if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
10932		spin_unlock(&tp->lock);
10933		goto restart_timer;
10934	}
10935
10936	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10937	    tg3_flag(tp, 57765_CLASS))
10938		tg3_chk_missed_msi(tp);
10939
10940	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10941		/* BCM4785: Flush posted writes from GbE to host memory. */
10942		tr32(HOSTCC_MODE);
10943	}
10944
10945	if (!tg3_flag(tp, TAGGED_STATUS)) {
10946		/* All of this garbage is because when using non-tagged
10947		 * IRQ status the mailbox/status_block protocol the chip
10948		 * uses with the cpu is race prone.
10949		 */
10950		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10951			tw32(GRC_LOCAL_CTRL,
10952			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10953		} else {
10954			tw32(HOSTCC_MODE, tp->coalesce_mode |
10955			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10956		}
10957
10958		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10959			spin_unlock(&tp->lock);
10960			tg3_reset_task_schedule(tp);
10961			goto restart_timer;
10962		}
10963	}
10964
10965	/* This part only runs once per second. */
10966	if (!--tp->timer_counter) {
10967		if (tg3_flag(tp, 5705_PLUS))
10968			tg3_periodic_fetch_stats(tp);
10969
10970		if (tp->setlpicnt && !--tp->setlpicnt)
10971			tg3_phy_eee_enable(tp);
10972
10973		if (tg3_flag(tp, USE_LINKCHG_REG)) {
10974			u32 mac_stat;
10975			int phy_event;
10976
10977			mac_stat = tr32(MAC_STATUS);
10978
10979			phy_event = 0;
10980			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10981				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10982					phy_event = 1;
10983			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10984				phy_event = 1;
10985
10986			if (phy_event)
10987				tg3_setup_phy(tp, false);
10988		} else if (tg3_flag(tp, POLL_SERDES)) {
10989			u32 mac_stat = tr32(MAC_STATUS);
10990			int need_setup = 0;
10991
10992			if (tp->link_up &&
10993			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10994				need_setup = 1;
10995			}
10996			if (!tp->link_up &&
10997			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
10998					 MAC_STATUS_SIGNAL_DET))) {
10999				need_setup = 1;
11000			}
11001			if (need_setup) {
11002				if (!tp->serdes_counter) {
11003					tw32_f(MAC_MODE,
11004					     (tp->mac_mode &
11005					      ~MAC_MODE_PORT_MODE_MASK));
11006					udelay(40);
11007					tw32_f(MAC_MODE, tp->mac_mode);
11008					udelay(40);
11009				}
11010				tg3_setup_phy(tp, false);
11011			}
11012		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
11013			   tg3_flag(tp, 5780_CLASS)) {
11014			tg3_serdes_parallel_detect(tp);
11015		} else if (tg3_flag(tp, POLL_CPMU_LINK)) {
11016			u32 cpmu = tr32(TG3_CPMU_STATUS);
11017			bool link_up = !((cpmu & TG3_CPMU_STATUS_LINK_MASK) ==
11018					 TG3_CPMU_STATUS_LINK_MASK);
11019
11020			if (link_up != tp->link_up)
11021				tg3_setup_phy(tp, false);
11022		}
11023
11024		tp->timer_counter = tp->timer_multiplier;
11025	}
11026
11027	/* Heartbeat is only sent once every 2 seconds.
11028	 *
11029	 * The heartbeat is to tell the ASF firmware that the host
11030	 * driver is still alive.  In the event that the OS crashes,
11031	 * ASF needs to reset the hardware to free up the FIFO space
11032	 * that may be filled with rx packets destined for the host.
11033	 * If the FIFO is full, ASF will no longer function properly.
11034	 *
11035	 * Unintended resets have been reported on real time kernels
11036	 * where the timer doesn't run on time.  Netpoll will also have
11037	 * same problem.
11038	 *
11039	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
11040	 * to check the ring condition when the heartbeat is expiring
11041	 * before doing the reset.  This will prevent most unintended
11042	 * resets.
11043	 */
11044	if (!--tp->asf_counter) {
11045		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
11046			tg3_wait_for_event_ack(tp);
11047
11048			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
11049				      FWCMD_NICDRV_ALIVE3);
11050			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
11051			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
11052				      TG3_FW_UPDATE_TIMEOUT_SEC);
11053
11054			tg3_generate_fw_event(tp);
11055		}
11056		tp->asf_counter = tp->asf_multiplier;
11057	}
11058
11059	spin_unlock(&tp->lock);
11060
11061restart_timer:
11062	tp->timer.expires = jiffies + tp->timer_offset;
11063	add_timer(&tp->timer);
11064}
11065
11066static void tg3_timer_init(struct tg3 *tp)
11067{
11068	if (tg3_flag(tp, TAGGED_STATUS) &&
11069	    tg3_asic_rev(tp) != ASIC_REV_5717 &&
11070	    !tg3_flag(tp, 57765_CLASS))
11071		tp->timer_offset = HZ;
11072	else
11073		tp->timer_offset = HZ / 10;
11074
11075	BUG_ON(tp->timer_offset > HZ);
11076
11077	tp->timer_multiplier = (HZ / tp->timer_offset);
11078	tp->asf_multiplier = (HZ / tp->timer_offset) *
11079			     TG3_FW_UPDATE_FREQ_SEC;
11080
11081	init_timer(&tp->timer);
11082	tp->timer.data = (unsigned long) tp;
11083	tp->timer.function = tg3_timer;
11084}
11085
11086static void tg3_timer_start(struct tg3 *tp)
11087{
11088	tp->asf_counter   = tp->asf_multiplier;
11089	tp->timer_counter = tp->timer_multiplier;
11090
11091	tp->timer.expires = jiffies + tp->timer_offset;
11092	add_timer(&tp->timer);
11093}
11094
11095static void tg3_timer_stop(struct tg3 *tp)
11096{
11097	del_timer_sync(&tp->timer);
11098}
11099
11100/* Restart hardware after configuration changes, self-test, etc.
11101 * Invoked with tp->lock held.
11102 */
11103static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
11104	__releases(tp->lock)
11105	__acquires(tp->lock)
11106{
11107	int err;
11108
11109	err = tg3_init_hw(tp, reset_phy);
11110	if (err) {
11111		netdev_err(tp->dev,
11112			   "Failed to re-initialize device, aborting\n");
11113		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11114		tg3_full_unlock(tp);
11115		tg3_timer_stop(tp);
11116		tp->irq_sync = 0;
11117		tg3_napi_enable(tp);
11118		dev_close(tp->dev);
11119		tg3_full_lock(tp, 0);
11120	}
11121	return err;
11122}
11123
11124static void tg3_reset_task(struct work_struct *work)
11125{
11126	struct tg3 *tp = container_of(work, struct tg3, reset_task);
11127	int err;
11128
11129	rtnl_lock();
11130	tg3_full_lock(tp, 0);
11131
11132	if (!netif_running(tp->dev)) {
11133		tg3_flag_clear(tp, RESET_TASK_PENDING);
11134		tg3_full_unlock(tp);
11135		rtnl_unlock();
11136		return;
11137	}
11138
11139	tg3_full_unlock(tp);
11140
11141	tg3_phy_stop(tp);
11142
11143	tg3_netif_stop(tp);
11144
11145	tg3_full_lock(tp, 1);
11146
11147	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
11148		tp->write32_tx_mbox = tg3_write32_tx_mbox;
11149		tp->write32_rx_mbox = tg3_write_flush_reg32;
11150		tg3_flag_set(tp, MBOX_WRITE_REORDER);
11151		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
11152	}
11153
11154	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
11155	err = tg3_init_hw(tp, true);
11156	if (err)
11157		goto out;
11158
11159	tg3_netif_start(tp);
11160
11161out:
11162	tg3_full_unlock(tp);
11163
11164	if (!err)
11165		tg3_phy_start(tp);
11166
11167	tg3_flag_clear(tp, RESET_TASK_PENDING);
11168	rtnl_unlock();
11169}
11170
11171static int tg3_request_irq(struct tg3 *tp, int irq_num)
11172{
11173	irq_handler_t fn;
11174	unsigned long flags;
11175	char *name;
11176	struct tg3_napi *tnapi = &tp->napi[irq_num];
11177
11178	if (tp->irq_cnt == 1)
11179		name = tp->dev->name;
11180	else {
11181		name = &tnapi->irq_lbl[0];
11182		if (tnapi->tx_buffers && tnapi->rx_rcb)
11183			snprintf(name, IFNAMSIZ,
11184				 "%s-txrx-%d", tp->dev->name, irq_num);
11185		else if (tnapi->tx_buffers)
11186			snprintf(name, IFNAMSIZ,
11187				 "%s-tx-%d", tp->dev->name, irq_num);
11188		else if (tnapi->rx_rcb)
11189			snprintf(name, IFNAMSIZ,
11190				 "%s-rx-%d", tp->dev->name, irq_num);
11191		else
11192			snprintf(name, IFNAMSIZ,
11193				 "%s-%d", tp->dev->name, irq_num);
11194		name[IFNAMSIZ-1] = 0;
11195	}
11196
11197	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11198		fn = tg3_msi;
11199		if (tg3_flag(tp, 1SHOT_MSI))
11200			fn = tg3_msi_1shot;
11201		flags = 0;
11202	} else {
11203		fn = tg3_interrupt;
11204		if (tg3_flag(tp, TAGGED_STATUS))
11205			fn = tg3_interrupt_tagged;
11206		flags = IRQF_SHARED;
11207	}
11208
11209	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
11210}
11211
11212static int tg3_test_interrupt(struct tg3 *tp)
11213{
11214	struct tg3_napi *tnapi = &tp->napi[0];
11215	struct net_device *dev = tp->dev;
11216	int err, i, intr_ok = 0;
11217	u32 val;
11218
11219	if (!netif_running(dev))
11220		return -ENODEV;
11221
11222	tg3_disable_ints(tp);
11223
11224	free_irq(tnapi->irq_vec, tnapi);
11225
11226	/*
11227	 * Turn off MSI one shot mode.  Otherwise this test has no
11228	 * observable way to know whether the interrupt was delivered.
11229	 */
11230	if (tg3_flag(tp, 57765_PLUS)) {
11231		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
11232		tw32(MSGINT_MODE, val);
11233	}
11234
11235	err = request_irq(tnapi->irq_vec, tg3_test_isr,
11236			  IRQF_SHARED, dev->name, tnapi);
11237	if (err)
11238		return err;
11239
11240	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
11241	tg3_enable_ints(tp);
11242
11243	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11244	       tnapi->coal_now);
11245
11246	for (i = 0; i < 5; i++) {
11247		u32 int_mbox, misc_host_ctrl;
11248
11249		int_mbox = tr32_mailbox(tnapi->int_mbox);
11250		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
11251
11252		if ((int_mbox != 0) ||
11253		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
11254			intr_ok = 1;
11255			break;
11256		}
11257
11258		if (tg3_flag(tp, 57765_PLUS) &&
11259		    tnapi->hw_status->status_tag != tnapi->last_tag)
11260			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11261
11262		msleep(10);
11263	}
11264
11265	tg3_disable_ints(tp);
11266
11267	free_irq(tnapi->irq_vec, tnapi);
11268
11269	err = tg3_request_irq(tp, 0);
11270
11271	if (err)
11272		return err;
11273
11274	if (intr_ok) {
11275		/* Reenable MSI one shot mode. */
11276		if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11277			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11278			tw32(MSGINT_MODE, val);
11279		}
11280		return 0;
11281	}
11282
11283	return -EIO;
11284}
11285
11286/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11287 * successfully restored
11288 */
11289static int tg3_test_msi(struct tg3 *tp)
11290{
11291	int err;
11292	u16 pci_cmd;
11293
11294	if (!tg3_flag(tp, USING_MSI))
11295		return 0;
11296
11297	/* Turn off SERR reporting in case MSI terminates with Master
11298	 * Abort.
11299	 */
11300	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11301	pci_write_config_word(tp->pdev, PCI_COMMAND,
11302			      pci_cmd & ~PCI_COMMAND_SERR);
11303
11304	err = tg3_test_interrupt(tp);
11305
11306	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11307
11308	if (!err)
11309		return 0;
11310
11311	/* other failures */
11312	if (err != -EIO)
11313		return err;
11314
11315	/* MSI test failed, go back to INTx mode */
11316	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11317		    "to INTx mode. Please report this failure to the PCI "
11318		    "maintainer and include system chipset information\n");
11319
11320	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11321
11322	pci_disable_msi(tp->pdev);
11323
11324	tg3_flag_clear(tp, USING_MSI);
11325	tp->napi[0].irq_vec = tp->pdev->irq;
11326
11327	err = tg3_request_irq(tp, 0);
11328	if (err)
11329		return err;
11330
11331	/* Need to reset the chip because the MSI cycle may have terminated
11332	 * with Master Abort.
11333	 */
11334	tg3_full_lock(tp, 1);
11335
11336	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11337	err = tg3_init_hw(tp, true);
11338
11339	tg3_full_unlock(tp);
11340
11341	if (err)
11342		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11343
11344	return err;
11345}
11346
11347static int tg3_request_firmware(struct tg3 *tp)
11348{
11349	const struct tg3_firmware_hdr *fw_hdr;
11350
11351	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11352		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11353			   tp->fw_needed);
11354		return -ENOENT;
11355	}
11356
11357	fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11358
11359	/* Firmware blob starts with version numbers, followed by
11360	 * start address and _full_ length including BSS sections
11361	 * (which must be longer than the actual data, of course
11362	 */
11363
11364	tp->fw_len = be32_to_cpu(fw_hdr->len);	/* includes bss */
11365	if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11366		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11367			   tp->fw_len, tp->fw_needed);
11368		release_firmware(tp->fw);
11369		tp->fw = NULL;
11370		return -EINVAL;
11371	}
11372
11373	/* We no longer need firmware; we have it. */
11374	tp->fw_needed = NULL;
11375	return 0;
11376}
11377
11378static u32 tg3_irq_count(struct tg3 *tp)
11379{
11380	u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11381
11382	if (irq_cnt > 1) {
11383		/* We want as many rx rings enabled as there are cpus.
11384		 * In multiqueue MSI-X mode, the first MSI-X vector
11385		 * only deals with link interrupts, etc, so we add
11386		 * one to the number of vectors we are requesting.
11387		 */
11388		irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11389	}
11390
11391	return irq_cnt;
11392}
11393
11394static bool tg3_enable_msix(struct tg3 *tp)
11395{
11396	int i, rc;
11397	struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11398
11399	tp->txq_cnt = tp->txq_req;
11400	tp->rxq_cnt = tp->rxq_req;
11401	if (!tp->rxq_cnt)
11402		tp->rxq_cnt = netif_get_num_default_rss_queues();
11403	if (tp->rxq_cnt > tp->rxq_max)
11404		tp->rxq_cnt = tp->rxq_max;
11405
11406	/* Disable multiple TX rings by default.  Simple round-robin hardware
11407	 * scheduling of the TX rings can cause starvation of rings with
11408	 * small packets when other rings have TSO or jumbo packets.
11409	 */
11410	if (!tp->txq_req)
11411		tp->txq_cnt = 1;
11412
11413	tp->irq_cnt = tg3_irq_count(tp);
11414
11415	for (i = 0; i < tp->irq_max; i++) {
11416		msix_ent[i].entry  = i;
11417		msix_ent[i].vector = 0;
11418	}
11419
11420	rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
11421	if (rc < 0) {
11422		return false;
11423	} else if (rc < tp->irq_cnt) {
11424		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11425			      tp->irq_cnt, rc);
11426		tp->irq_cnt = rc;
11427		tp->rxq_cnt = max(rc - 1, 1);
11428		if (tp->txq_cnt)
11429			tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11430	}
11431
11432	for (i = 0; i < tp->irq_max; i++)
11433		tp->napi[i].irq_vec = msix_ent[i].vector;
11434
11435	if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11436		pci_disable_msix(tp->pdev);
11437		return false;
11438	}
11439
11440	if (tp->irq_cnt == 1)
11441		return true;
11442
11443	tg3_flag_set(tp, ENABLE_RSS);
11444
11445	if (tp->txq_cnt > 1)
11446		tg3_flag_set(tp, ENABLE_TSS);
11447
11448	netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11449
11450	return true;
11451}
11452
11453static void tg3_ints_init(struct tg3 *tp)
11454{
11455	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11456	    !tg3_flag(tp, TAGGED_STATUS)) {
11457		/* All MSI supporting chips should support tagged
11458		 * status.  Assert that this is the case.
11459		 */
11460		netdev_warn(tp->dev,
11461			    "MSI without TAGGED_STATUS? Not using MSI\n");
11462		goto defcfg;
11463	}
11464
11465	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11466		tg3_flag_set(tp, USING_MSIX);
11467	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11468		tg3_flag_set(tp, USING_MSI);
11469
11470	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11471		u32 msi_mode = tr32(MSGINT_MODE);
11472		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11473			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11474		if (!tg3_flag(tp, 1SHOT_MSI))
11475			msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11476		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11477	}
11478defcfg:
11479	if (!tg3_flag(tp, USING_MSIX)) {
11480		tp->irq_cnt = 1;
11481		tp->napi[0].irq_vec = tp->pdev->irq;
11482	}
11483
11484	if (tp->irq_cnt == 1) {
11485		tp->txq_cnt = 1;
11486		tp->rxq_cnt = 1;
11487		netif_set_real_num_tx_queues(tp->dev, 1);
11488		netif_set_real_num_rx_queues(tp->dev, 1);
11489	}
11490}
11491
11492static void tg3_ints_fini(struct tg3 *tp)
11493{
11494	if (tg3_flag(tp, USING_MSIX))
11495		pci_disable_msix(tp->pdev);
11496	else if (tg3_flag(tp, USING_MSI))
11497		pci_disable_msi(tp->pdev);
11498	tg3_flag_clear(tp, USING_MSI);
11499	tg3_flag_clear(tp, USING_MSIX);
11500	tg3_flag_clear(tp, ENABLE_RSS);
11501	tg3_flag_clear(tp, ENABLE_TSS);
11502}
11503
11504static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11505		     bool init)
11506{
11507	struct net_device *dev = tp->dev;
11508	int i, err;
11509
11510	/*
11511	 * Setup interrupts first so we know how
11512	 * many NAPI resources to allocate
11513	 */
11514	tg3_ints_init(tp);
11515
11516	tg3_rss_check_indir_tbl(tp);
11517
11518	/* The placement of this call is tied
11519	 * to the setup and use of Host TX descriptors.
11520	 */
11521	err = tg3_alloc_consistent(tp);
11522	if (err)
11523		goto out_ints_fini;
11524
11525	tg3_napi_init(tp);
11526
11527	tg3_napi_enable(tp);
11528
11529	for (i = 0; i < tp->irq_cnt; i++) {
11530		struct tg3_napi *tnapi = &tp->napi[i];
11531		err = tg3_request_irq(tp, i);
11532		if (err) {
11533			for (i--; i >= 0; i--) {
11534				tnapi = &tp->napi[i];
11535				free_irq(tnapi->irq_vec, tnapi);
11536			}
11537			goto out_napi_fini;
11538		}
11539	}
11540
11541	tg3_full_lock(tp, 0);
11542
11543	if (init)
11544		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
11545
11546	err = tg3_init_hw(tp, reset_phy);
11547	if (err) {
11548		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11549		tg3_free_rings(tp);
11550	}
11551
11552	tg3_full_unlock(tp);
11553
11554	if (err)
11555		goto out_free_irq;
11556
11557	if (test_irq && tg3_flag(tp, USING_MSI)) {
11558		err = tg3_test_msi(tp);
11559
11560		if (err) {
11561			tg3_full_lock(tp, 0);
11562			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11563			tg3_free_rings(tp);
11564			tg3_full_unlock(tp);
11565
11566			goto out_napi_fini;
11567		}
11568
11569		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11570			u32 val = tr32(PCIE_TRANSACTION_CFG);
11571
11572			tw32(PCIE_TRANSACTION_CFG,
11573			     val | PCIE_TRANS_CFG_1SHOT_MSI);
11574		}
11575	}
11576
11577	tg3_phy_start(tp);
11578
11579	tg3_hwmon_open(tp);
11580
11581	tg3_full_lock(tp, 0);
11582
11583	tg3_timer_start(tp);
11584	tg3_flag_set(tp, INIT_COMPLETE);
11585	tg3_enable_ints(tp);
11586
11587	tg3_ptp_resume(tp);
11588
11589	tg3_full_unlock(tp);
11590
11591	netif_tx_start_all_queues(dev);
11592
11593	/*
11594	 * Reset loopback feature if it was turned on while the device was down
11595	 * make sure that it's installed properly now.
11596	 */
11597	if (dev->features & NETIF_F_LOOPBACK)
11598		tg3_set_loopback(dev, dev->features);
11599
11600	return 0;
11601
11602out_free_irq:
11603	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11604		struct tg3_napi *tnapi = &tp->napi[i];
11605		free_irq(tnapi->irq_vec, tnapi);
11606	}
11607
11608out_napi_fini:
11609	tg3_napi_disable(tp);
11610	tg3_napi_fini(tp);
11611	tg3_free_consistent(tp);
11612
11613out_ints_fini:
11614	tg3_ints_fini(tp);
11615
11616	return err;
11617}
11618
11619static void tg3_stop(struct tg3 *tp)
11620{
11621	int i;
11622
11623	tg3_reset_task_cancel(tp);
11624	tg3_netif_stop(tp);
11625
11626	tg3_timer_stop(tp);
11627
11628	tg3_hwmon_close(tp);
11629
11630	tg3_phy_stop(tp);
11631
11632	tg3_full_lock(tp, 1);
11633
11634	tg3_disable_ints(tp);
11635
11636	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11637	tg3_free_rings(tp);
11638	tg3_flag_clear(tp, INIT_COMPLETE);
11639
11640	tg3_full_unlock(tp);
11641
11642	for (i = tp->irq_cnt - 1; i >= 0; i--) {
11643		struct tg3_napi *tnapi = &tp->napi[i];
11644		free_irq(tnapi->irq_vec, tnapi);
11645	}
11646
11647	tg3_ints_fini(tp);
11648
11649	tg3_napi_fini(tp);
11650
11651	tg3_free_consistent(tp);
11652}
11653
11654static int tg3_open(struct net_device *dev)
11655{
11656	struct tg3 *tp = netdev_priv(dev);
11657	int err;
11658
11659	if (tp->pcierr_recovery) {
11660		netdev_err(dev, "Failed to open device. PCI error recovery "
11661			   "in progress\n");
11662		return -EAGAIN;
11663	}
11664
11665	if (tp->fw_needed) {
11666		err = tg3_request_firmware(tp);
11667		if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11668			if (err) {
11669				netdev_warn(tp->dev, "EEE capability disabled\n");
11670				tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11671			} else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11672				netdev_warn(tp->dev, "EEE capability restored\n");
11673				tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11674			}
11675		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11676			if (err)
11677				return err;
11678		} else if (err) {
11679			netdev_warn(tp->dev, "TSO capability disabled\n");
11680			tg3_flag_clear(tp, TSO_CAPABLE);
11681		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
11682			netdev_notice(tp->dev, "TSO capability restored\n");
11683			tg3_flag_set(tp, TSO_CAPABLE);
11684		}
11685	}
11686
11687	tg3_carrier_off(tp);
11688
11689	err = tg3_power_up(tp);
11690	if (err)
11691		return err;
11692
11693	tg3_full_lock(tp, 0);
11694
11695	tg3_disable_ints(tp);
11696	tg3_flag_clear(tp, INIT_COMPLETE);
11697
11698	tg3_full_unlock(tp);
11699
11700	err = tg3_start(tp,
11701			!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11702			true, true);
11703	if (err) {
11704		tg3_frob_aux_power(tp, false);
11705		pci_set_power_state(tp->pdev, PCI_D3hot);
11706	}
11707
11708	return err;
11709}
11710
11711static int tg3_close(struct net_device *dev)
11712{
11713	struct tg3 *tp = netdev_priv(dev);
11714
11715	if (tp->pcierr_recovery) {
11716		netdev_err(dev, "Failed to close device. PCI error recovery "
11717			   "in progress\n");
11718		return -EAGAIN;
11719	}
11720
11721	tg3_stop(tp);
11722
11723	/* Clear stats across close / open calls */
11724	memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11725	memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11726
11727	if (pci_device_is_present(tp->pdev)) {
11728		tg3_power_down_prepare(tp);
11729
11730		tg3_carrier_off(tp);
11731	}
11732	return 0;
11733}
11734
11735static inline u64 get_stat64(tg3_stat64_t *val)
11736{
11737       return ((u64)val->high << 32) | ((u64)val->low);
11738}
11739
11740static u64 tg3_calc_crc_errors(struct tg3 *tp)
11741{
11742	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11743
11744	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11745	    (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11746	     tg3_asic_rev(tp) == ASIC_REV_5701)) {
11747		u32 val;
11748
11749		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11750			tg3_writephy(tp, MII_TG3_TEST1,
11751				     val | MII_TG3_TEST1_CRC_EN);
11752			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11753		} else
11754			val = 0;
11755
11756		tp->phy_crc_errors += val;
11757
11758		return tp->phy_crc_errors;
11759	}
11760
11761	return get_stat64(&hw_stats->rx_fcs_errors);
11762}
11763
11764#define ESTAT_ADD(member) \
11765	estats->member =	old_estats->member + \
11766				get_stat64(&hw_stats->member)
11767
11768static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11769{
11770	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11771	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11772
11773	ESTAT_ADD(rx_octets);
11774	ESTAT_ADD(rx_fragments);
11775	ESTAT_ADD(rx_ucast_packets);
11776	ESTAT_ADD(rx_mcast_packets);
11777	ESTAT_ADD(rx_bcast_packets);
11778	ESTAT_ADD(rx_fcs_errors);
11779	ESTAT_ADD(rx_align_errors);
11780	ESTAT_ADD(rx_xon_pause_rcvd);
11781	ESTAT_ADD(rx_xoff_pause_rcvd);
11782	ESTAT_ADD(rx_mac_ctrl_rcvd);
11783	ESTAT_ADD(rx_xoff_entered);
11784	ESTAT_ADD(rx_frame_too_long_errors);
11785	ESTAT_ADD(rx_jabbers);
11786	ESTAT_ADD(rx_undersize_packets);
11787	ESTAT_ADD(rx_in_length_errors);
11788	ESTAT_ADD(rx_out_length_errors);
11789	ESTAT_ADD(rx_64_or_less_octet_packets);
11790	ESTAT_ADD(rx_65_to_127_octet_packets);
11791	ESTAT_ADD(rx_128_to_255_octet_packets);
11792	ESTAT_ADD(rx_256_to_511_octet_packets);
11793	ESTAT_ADD(rx_512_to_1023_octet_packets);
11794	ESTAT_ADD(rx_1024_to_1522_octet_packets);
11795	ESTAT_ADD(rx_1523_to_2047_octet_packets);
11796	ESTAT_ADD(rx_2048_to_4095_octet_packets);
11797	ESTAT_ADD(rx_4096_to_8191_octet_packets);
11798	ESTAT_ADD(rx_8192_to_9022_octet_packets);
11799
11800	ESTAT_ADD(tx_octets);
11801	ESTAT_ADD(tx_collisions);
11802	ESTAT_ADD(tx_xon_sent);
11803	ESTAT_ADD(tx_xoff_sent);
11804	ESTAT_ADD(tx_flow_control);
11805	ESTAT_ADD(tx_mac_errors);
11806	ESTAT_ADD(tx_single_collisions);
11807	ESTAT_ADD(tx_mult_collisions);
11808	ESTAT_ADD(tx_deferred);
11809	ESTAT_ADD(tx_excessive_collisions);
11810	ESTAT_ADD(tx_late_collisions);
11811	ESTAT_ADD(tx_collide_2times);
11812	ESTAT_ADD(tx_collide_3times);
11813	ESTAT_ADD(tx_collide_4times);
11814	ESTAT_ADD(tx_collide_5times);
11815	ESTAT_ADD(tx_collide_6times);
11816	ESTAT_ADD(tx_collide_7times);
11817	ESTAT_ADD(tx_collide_8times);
11818	ESTAT_ADD(tx_collide_9times);
11819	ESTAT_ADD(tx_collide_10times);
11820	ESTAT_ADD(tx_collide_11times);
11821	ESTAT_ADD(tx_collide_12times);
11822	ESTAT_ADD(tx_collide_13times);
11823	ESTAT_ADD(tx_collide_14times);
11824	ESTAT_ADD(tx_collide_15times);
11825	ESTAT_ADD(tx_ucast_packets);
11826	ESTAT_ADD(tx_mcast_packets);
11827	ESTAT_ADD(tx_bcast_packets);
11828	ESTAT_ADD(tx_carrier_sense_errors);
11829	ESTAT_ADD(tx_discards);
11830	ESTAT_ADD(tx_errors);
11831
11832	ESTAT_ADD(dma_writeq_full);
11833	ESTAT_ADD(dma_write_prioq_full);
11834	ESTAT_ADD(rxbds_empty);
11835	ESTAT_ADD(rx_discards);
11836	ESTAT_ADD(rx_errors);
11837	ESTAT_ADD(rx_threshold_hit);
11838
11839	ESTAT_ADD(dma_readq_full);
11840	ESTAT_ADD(dma_read_prioq_full);
11841	ESTAT_ADD(tx_comp_queue_full);
11842
11843	ESTAT_ADD(ring_set_send_prod_index);
11844	ESTAT_ADD(ring_status_update);
11845	ESTAT_ADD(nic_irqs);
11846	ESTAT_ADD(nic_avoided_irqs);
11847	ESTAT_ADD(nic_tx_threshold_hit);
11848
11849	ESTAT_ADD(mbuf_lwm_thresh_hit);
11850}
11851
11852static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11853{
11854	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11855	struct tg3_hw_stats *hw_stats = tp->hw_stats;
11856
11857	stats->rx_packets = old_stats->rx_packets +
11858		get_stat64(&hw_stats->rx_ucast_packets) +
11859		get_stat64(&hw_stats->rx_mcast_packets) +
11860		get_stat64(&hw_stats->rx_bcast_packets);
11861
11862	stats->tx_packets = old_stats->tx_packets +
11863		get_stat64(&hw_stats->tx_ucast_packets) +
11864		get_stat64(&hw_stats->tx_mcast_packets) +
11865		get_stat64(&hw_stats->tx_bcast_packets);
11866
11867	stats->rx_bytes = old_stats->rx_bytes +
11868		get_stat64(&hw_stats->rx_octets);
11869	stats->tx_bytes = old_stats->tx_bytes +
11870		get_stat64(&hw_stats->tx_octets);
11871
11872	stats->rx_errors = old_stats->rx_errors +
11873		get_stat64(&hw_stats->rx_errors);
11874	stats->tx_errors = old_stats->tx_errors +
11875		get_stat64(&hw_stats->tx_errors) +
11876		get_stat64(&hw_stats->tx_mac_errors) +
11877		get_stat64(&hw_stats->tx_carrier_sense_errors) +
11878		get_stat64(&hw_stats->tx_discards);
11879
11880	stats->multicast = old_stats->multicast +
11881		get_stat64(&hw_stats->rx_mcast_packets);
11882	stats->collisions = old_stats->collisions +
11883		get_stat64(&hw_stats->tx_collisions);
11884
11885	stats->rx_length_errors = old_stats->rx_length_errors +
11886		get_stat64(&hw_stats->rx_frame_too_long_errors) +
11887		get_stat64(&hw_stats->rx_undersize_packets);
11888
11889	stats->rx_frame_errors = old_stats->rx_frame_errors +
11890		get_stat64(&hw_stats->rx_align_errors);
11891	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11892		get_stat64(&hw_stats->tx_discards);
11893	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11894		get_stat64(&hw_stats->tx_carrier_sense_errors);
11895
11896	stats->rx_crc_errors = old_stats->rx_crc_errors +
11897		tg3_calc_crc_errors(tp);
11898
11899	stats->rx_missed_errors = old_stats->rx_missed_errors +
11900		get_stat64(&hw_stats->rx_discards);
11901
11902	stats->rx_dropped = tp->rx_dropped;
11903	stats->tx_dropped = tp->tx_dropped;
11904}
11905
11906static int tg3_get_regs_len(struct net_device *dev)
11907{
11908	return TG3_REG_BLK_SIZE;
11909}
11910
11911static void tg3_get_regs(struct net_device *dev,
11912		struct ethtool_regs *regs, void *_p)
11913{
11914	struct tg3 *tp = netdev_priv(dev);
11915
11916	regs->version = 0;
11917
11918	memset(_p, 0, TG3_REG_BLK_SIZE);
11919
11920	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11921		return;
11922
11923	tg3_full_lock(tp, 0);
11924
11925	tg3_dump_legacy_regs(tp, (u32 *)_p);
11926
11927	tg3_full_unlock(tp);
11928}
11929
11930static int tg3_get_eeprom_len(struct net_device *dev)
11931{
11932	struct tg3 *tp = netdev_priv(dev);
11933
11934	return tp->nvram_size;
11935}
11936
11937static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11938{
11939	struct tg3 *tp = netdev_priv(dev);
11940	int ret, cpmu_restore = 0;
11941	u8  *pd;
11942	u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
11943	__be32 val;
11944
11945	if (tg3_flag(tp, NO_NVRAM))
11946		return -EINVAL;
11947
11948	offset = eeprom->offset;
11949	len = eeprom->len;
11950	eeprom->len = 0;
11951
11952	eeprom->magic = TG3_EEPROM_MAGIC;
11953
11954	/* Override clock, link aware and link idle modes */
11955	if (tg3_flag(tp, CPMU_PRESENT)) {
11956		cpmu_val = tr32(TG3_CPMU_CTRL);
11957		if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
11958				CPMU_CTRL_LINK_IDLE_MODE)) {
11959			tw32(TG3_CPMU_CTRL, cpmu_val &
11960					    ~(CPMU_CTRL_LINK_AWARE_MODE |
11961					     CPMU_CTRL_LINK_IDLE_MODE));
11962			cpmu_restore = 1;
11963		}
11964	}
11965	tg3_override_clk(tp);
11966
11967	if (offset & 3) {
11968		/* adjustments to start on required 4 byte boundary */
11969		b_offset = offset & 3;
11970		b_count = 4 - b_offset;
11971		if (b_count > len) {
11972			/* i.e. offset=1 len=2 */
11973			b_count = len;
11974		}
11975		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11976		if (ret)
11977			goto eeprom_done;
11978		memcpy(data, ((char *)&val) + b_offset, b_count);
11979		len -= b_count;
11980		offset += b_count;
11981		eeprom->len += b_count;
11982	}
11983
11984	/* read bytes up to the last 4 byte boundary */
11985	pd = &data[eeprom->len];
11986	for (i = 0; i < (len - (len & 3)); i += 4) {
11987		ret = tg3_nvram_read_be32(tp, offset + i, &val);
11988		if (ret) {
11989			if (i)
11990				i -= 4;
11991			eeprom->len += i;
11992			goto eeprom_done;
11993		}
11994		memcpy(pd + i, &val, 4);
11995		if (need_resched()) {
11996			if (signal_pending(current)) {
11997				eeprom->len += i;
11998				ret = -EINTR;
11999				goto eeprom_done;
12000			}
12001			cond_resched();
12002		}
12003	}
12004	eeprom->len += i;
12005
12006	if (len & 3) {
12007		/* read last bytes not ending on 4 byte boundary */
12008		pd = &data[eeprom->len];
12009		b_count = len & 3;
12010		b_offset = offset + len - b_count;
12011		ret = tg3_nvram_read_be32(tp, b_offset, &val);
12012		if (ret)
12013			goto eeprom_done;
12014		memcpy(pd, &val, b_count);
12015		eeprom->len += b_count;
12016	}
12017	ret = 0;
12018
12019eeprom_done:
12020	/* Restore clock, link aware and link idle modes */
12021	tg3_restore_clk(tp);
12022	if (cpmu_restore)
12023		tw32(TG3_CPMU_CTRL, cpmu_val);
12024
12025	return ret;
12026}
12027
12028static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
12029{
12030	struct tg3 *tp = netdev_priv(dev);
12031	int ret;
12032	u32 offset, len, b_offset, odd_len;
12033	u8 *buf;
12034	__be32 start, end;
12035
12036	if (tg3_flag(tp, NO_NVRAM) ||
12037	    eeprom->magic != TG3_EEPROM_MAGIC)
12038		return -EINVAL;
12039
12040	offset = eeprom->offset;
12041	len = eeprom->len;
12042
12043	if ((b_offset = (offset & 3))) {
12044		/* adjustments to start on required 4 byte boundary */
12045		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
12046		if (ret)
12047			return ret;
12048		len += b_offset;
12049		offset &= ~3;
12050		if (len < 4)
12051			len = 4;
12052	}
12053
12054	odd_len = 0;
12055	if (len & 3) {
12056		/* adjustments to end on required 4 byte boundary */
12057		odd_len = 1;
12058		len = (len + 3) & ~3;
12059		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
12060		if (ret)
12061			return ret;
12062	}
12063
12064	buf = data;
12065	if (b_offset || odd_len) {
12066		buf = kmalloc(len, GFP_KERNEL);
12067		if (!buf)
12068			return -ENOMEM;
12069		if (b_offset)
12070			memcpy(buf, &start, 4);
12071		if (odd_len)
12072			memcpy(buf+len-4, &end, 4);
12073		memcpy(buf + b_offset, data, eeprom->len);
12074	}
12075
12076	ret = tg3_nvram_write_block(tp, offset, len, buf);
12077
12078	if (buf != data)
12079		kfree(buf);
12080
12081	return ret;
12082}
12083
12084static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12085{
12086	struct tg3 *tp = netdev_priv(dev);
12087
12088	if (tg3_flag(tp, USE_PHYLIB)) {
12089		struct phy_device *phydev;
12090		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12091			return -EAGAIN;
12092		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12093		return phy_ethtool_gset(phydev, cmd);
12094	}
12095
12096	cmd->supported = (SUPPORTED_Autoneg);
12097
12098	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12099		cmd->supported |= (SUPPORTED_1000baseT_Half |
12100				   SUPPORTED_1000baseT_Full);
12101
12102	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12103		cmd->supported |= (SUPPORTED_100baseT_Half |
12104				  SUPPORTED_100baseT_Full |
12105				  SUPPORTED_10baseT_Half |
12106				  SUPPORTED_10baseT_Full |
12107				  SUPPORTED_TP);
12108		cmd->port = PORT_TP;
12109	} else {
12110		cmd->supported |= SUPPORTED_FIBRE;
12111		cmd->port = PORT_FIBRE;
12112	}
12113
12114	cmd->advertising = tp->link_config.advertising;
12115	if (tg3_flag(tp, PAUSE_AUTONEG)) {
12116		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
12117			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12118				cmd->advertising |= ADVERTISED_Pause;
12119			} else {
12120				cmd->advertising |= ADVERTISED_Pause |
12121						    ADVERTISED_Asym_Pause;
12122			}
12123		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
12124			cmd->advertising |= ADVERTISED_Asym_Pause;
12125		}
12126	}
12127	if (netif_running(dev) && tp->link_up) {
12128		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
12129		cmd->duplex = tp->link_config.active_duplex;
12130		cmd->lp_advertising = tp->link_config.rmt_adv;
12131		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
12132			if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
12133				cmd->eth_tp_mdix = ETH_TP_MDI_X;
12134			else
12135				cmd->eth_tp_mdix = ETH_TP_MDI;
12136		}
12137	} else {
12138		ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
12139		cmd->duplex = DUPLEX_UNKNOWN;
12140		cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
12141	}
12142	cmd->phy_address = tp->phy_addr;
12143	cmd->transceiver = XCVR_INTERNAL;
12144	cmd->autoneg = tp->link_config.autoneg;
12145	cmd->maxtxpkt = 0;
12146	cmd->maxrxpkt = 0;
12147	return 0;
12148}
12149
12150static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
12151{
12152	struct tg3 *tp = netdev_priv(dev);
12153	u32 speed = ethtool_cmd_speed(cmd);
12154
12155	if (tg3_flag(tp, USE_PHYLIB)) {
12156		struct phy_device *phydev;
12157		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12158			return -EAGAIN;
12159		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12160		return phy_ethtool_sset(phydev, cmd);
12161	}
12162
12163	if (cmd->autoneg != AUTONEG_ENABLE &&
12164	    cmd->autoneg != AUTONEG_DISABLE)
12165		return -EINVAL;
12166
12167	if (cmd->autoneg == AUTONEG_DISABLE &&
12168	    cmd->duplex != DUPLEX_FULL &&
12169	    cmd->duplex != DUPLEX_HALF)
12170		return -EINVAL;
12171
12172	if (cmd->autoneg == AUTONEG_ENABLE) {
12173		u32 mask = ADVERTISED_Autoneg |
12174			   ADVERTISED_Pause |
12175			   ADVERTISED_Asym_Pause;
12176
12177		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12178			mask |= ADVERTISED_1000baseT_Half |
12179				ADVERTISED_1000baseT_Full;
12180
12181		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12182			mask |= ADVERTISED_100baseT_Half |
12183				ADVERTISED_100baseT_Full |
12184				ADVERTISED_10baseT_Half |
12185				ADVERTISED_10baseT_Full |
12186				ADVERTISED_TP;
12187		else
12188			mask |= ADVERTISED_FIBRE;
12189
12190		if (cmd->advertising & ~mask)
12191			return -EINVAL;
12192
12193		mask &= (ADVERTISED_1000baseT_Half |
12194			 ADVERTISED_1000baseT_Full |
12195			 ADVERTISED_100baseT_Half |
12196			 ADVERTISED_100baseT_Full |
12197			 ADVERTISED_10baseT_Half |
12198			 ADVERTISED_10baseT_Full);
12199
12200		cmd->advertising &= mask;
12201	} else {
12202		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
12203			if (speed != SPEED_1000)
12204				return -EINVAL;
12205
12206			if (cmd->duplex != DUPLEX_FULL)
12207				return -EINVAL;
12208		} else {
12209			if (speed != SPEED_100 &&
12210			    speed != SPEED_10)
12211				return -EINVAL;
12212		}
12213	}
12214
12215	tg3_full_lock(tp, 0);
12216
12217	tp->link_config.autoneg = cmd->autoneg;
12218	if (cmd->autoneg == AUTONEG_ENABLE) {
12219		tp->link_config.advertising = (cmd->advertising |
12220					      ADVERTISED_Autoneg);
12221		tp->link_config.speed = SPEED_UNKNOWN;
12222		tp->link_config.duplex = DUPLEX_UNKNOWN;
12223	} else {
12224		tp->link_config.advertising = 0;
12225		tp->link_config.speed = speed;
12226		tp->link_config.duplex = cmd->duplex;
12227	}
12228
12229	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12230
12231	tg3_warn_mgmt_link_flap(tp);
12232
12233	if (netif_running(dev))
12234		tg3_setup_phy(tp, true);
12235
12236	tg3_full_unlock(tp);
12237
12238	return 0;
12239}
12240
12241static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
12242{
12243	struct tg3 *tp = netdev_priv(dev);
12244
12245	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
12246	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
12247	strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
12248	strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
12249}
12250
12251static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12252{
12253	struct tg3 *tp = netdev_priv(dev);
12254
12255	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
12256		wol->supported = WAKE_MAGIC;
12257	else
12258		wol->supported = 0;
12259	wol->wolopts = 0;
12260	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
12261		wol->wolopts = WAKE_MAGIC;
12262	memset(&wol->sopass, 0, sizeof(wol->sopass));
12263}
12264
12265static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
12266{
12267	struct tg3 *tp = netdev_priv(dev);
12268	struct device *dp = &tp->pdev->dev;
12269
12270	if (wol->wolopts & ~WAKE_MAGIC)
12271		return -EINVAL;
12272	if ((wol->wolopts & WAKE_MAGIC) &&
12273	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
12274		return -EINVAL;
12275
12276	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
12277
12278	if (device_may_wakeup(dp))
12279		tg3_flag_set(tp, WOL_ENABLE);
12280	else
12281		tg3_flag_clear(tp, WOL_ENABLE);
12282
12283	return 0;
12284}
12285
12286static u32 tg3_get_msglevel(struct net_device *dev)
12287{
12288	struct tg3 *tp = netdev_priv(dev);
12289	return tp->msg_enable;
12290}
12291
12292static void tg3_set_msglevel(struct net_device *dev, u32 value)
12293{
12294	struct tg3 *tp = netdev_priv(dev);
12295	tp->msg_enable = value;
12296}
12297
12298static int tg3_nway_reset(struct net_device *dev)
12299{
12300	struct tg3 *tp = netdev_priv(dev);
12301	int r;
12302
12303	if (!netif_running(dev))
12304		return -EAGAIN;
12305
12306	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12307		return -EINVAL;
12308
12309	tg3_warn_mgmt_link_flap(tp);
12310
12311	if (tg3_flag(tp, USE_PHYLIB)) {
12312		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12313			return -EAGAIN;
12314		r = phy_start_aneg(tp->mdio_bus->phy_map[tp->phy_addr]);
12315	} else {
12316		u32 bmcr;
12317
12318		spin_lock_bh(&tp->lock);
12319		r = -EINVAL;
12320		tg3_readphy(tp, MII_BMCR, &bmcr);
12321		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12322		    ((bmcr & BMCR_ANENABLE) ||
12323		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12324			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12325						   BMCR_ANENABLE);
12326			r = 0;
12327		}
12328		spin_unlock_bh(&tp->lock);
12329	}
12330
12331	return r;
12332}
12333
12334static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12335{
12336	struct tg3 *tp = netdev_priv(dev);
12337
12338	ering->rx_max_pending = tp->rx_std_ring_mask;
12339	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12340		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12341	else
12342		ering->rx_jumbo_max_pending = 0;
12343
12344	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12345
12346	ering->rx_pending = tp->rx_pending;
12347	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12348		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12349	else
12350		ering->rx_jumbo_pending = 0;
12351
12352	ering->tx_pending = tp->napi[0].tx_pending;
12353}
12354
12355static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12356{
12357	struct tg3 *tp = netdev_priv(dev);
12358	int i, irq_sync = 0, err = 0;
12359
12360	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12361	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12362	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12363	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
12364	    (tg3_flag(tp, TSO_BUG) &&
12365	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12366		return -EINVAL;
12367
12368	if (netif_running(dev)) {
12369		tg3_phy_stop(tp);
12370		tg3_netif_stop(tp);
12371		irq_sync = 1;
12372	}
12373
12374	tg3_full_lock(tp, irq_sync);
12375
12376	tp->rx_pending = ering->rx_pending;
12377
12378	if (tg3_flag(tp, MAX_RXPEND_64) &&
12379	    tp->rx_pending > 63)
12380		tp->rx_pending = 63;
12381
12382	if (tg3_flag(tp, JUMBO_RING_ENABLE))
12383		tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12384
12385	for (i = 0; i < tp->irq_max; i++)
12386		tp->napi[i].tx_pending = ering->tx_pending;
12387
12388	if (netif_running(dev)) {
12389		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12390		err = tg3_restart_hw(tp, false);
12391		if (!err)
12392			tg3_netif_start(tp);
12393	}
12394
12395	tg3_full_unlock(tp);
12396
12397	if (irq_sync && !err)
12398		tg3_phy_start(tp);
12399
12400	return err;
12401}
12402
12403static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12404{
12405	struct tg3 *tp = netdev_priv(dev);
12406
12407	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12408
12409	if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12410		epause->rx_pause = 1;
12411	else
12412		epause->rx_pause = 0;
12413
12414	if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12415		epause->tx_pause = 1;
12416	else
12417		epause->tx_pause = 0;
12418}
12419
12420static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12421{
12422	struct tg3 *tp = netdev_priv(dev);
12423	int err = 0;
12424
12425	if (tp->link_config.autoneg == AUTONEG_ENABLE)
12426		tg3_warn_mgmt_link_flap(tp);
12427
12428	if (tg3_flag(tp, USE_PHYLIB)) {
12429		u32 newadv;
12430		struct phy_device *phydev;
12431
12432		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
12433
12434		if (!(phydev->supported & SUPPORTED_Pause) ||
12435		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12436		     (epause->rx_pause != epause->tx_pause)))
12437			return -EINVAL;
12438
12439		tp->link_config.flowctrl = 0;
12440		if (epause->rx_pause) {
12441			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12442
12443			if (epause->tx_pause) {
12444				tp->link_config.flowctrl |= FLOW_CTRL_TX;
12445				newadv = ADVERTISED_Pause;
12446			} else
12447				newadv = ADVERTISED_Pause |
12448					 ADVERTISED_Asym_Pause;
12449		} else if (epause->tx_pause) {
12450			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12451			newadv = ADVERTISED_Asym_Pause;
12452		} else
12453			newadv = 0;
12454
12455		if (epause->autoneg)
12456			tg3_flag_set(tp, PAUSE_AUTONEG);
12457		else
12458			tg3_flag_clear(tp, PAUSE_AUTONEG);
12459
12460		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12461			u32 oldadv = phydev->advertising &
12462				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12463			if (oldadv != newadv) {
12464				phydev->advertising &=
12465					~(ADVERTISED_Pause |
12466					  ADVERTISED_Asym_Pause);
12467				phydev->advertising |= newadv;
12468				if (phydev->autoneg) {
12469					/*
12470					 * Always renegotiate the link to
12471					 * inform our link partner of our
12472					 * flow control settings, even if the
12473					 * flow control is forced.  Let
12474					 * tg3_adjust_link() do the final
12475					 * flow control setup.
12476					 */
12477					return phy_start_aneg(phydev);
12478				}
12479			}
12480
12481			if (!epause->autoneg)
12482				tg3_setup_flow_control(tp, 0, 0);
12483		} else {
12484			tp->link_config.advertising &=
12485					~(ADVERTISED_Pause |
12486					  ADVERTISED_Asym_Pause);
12487			tp->link_config.advertising |= newadv;
12488		}
12489	} else {
12490		int irq_sync = 0;
12491
12492		if (netif_running(dev)) {
12493			tg3_netif_stop(tp);
12494			irq_sync = 1;
12495		}
12496
12497		tg3_full_lock(tp, irq_sync);
12498
12499		if (epause->autoneg)
12500			tg3_flag_set(tp, PAUSE_AUTONEG);
12501		else
12502			tg3_flag_clear(tp, PAUSE_AUTONEG);
12503		if (epause->rx_pause)
12504			tp->link_config.flowctrl |= FLOW_CTRL_RX;
12505		else
12506			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12507		if (epause->tx_pause)
12508			tp->link_config.flowctrl |= FLOW_CTRL_TX;
12509		else
12510			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12511
12512		if (netif_running(dev)) {
12513			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12514			err = tg3_restart_hw(tp, false);
12515			if (!err)
12516				tg3_netif_start(tp);
12517		}
12518
12519		tg3_full_unlock(tp);
12520	}
12521
12522	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12523
12524	return err;
12525}
12526
12527static int tg3_get_sset_count(struct net_device *dev, int sset)
12528{
12529	switch (sset) {
12530	case ETH_SS_TEST:
12531		return TG3_NUM_TEST;
12532	case ETH_SS_STATS:
12533		return TG3_NUM_STATS;
12534	default:
12535		return -EOPNOTSUPP;
12536	}
12537}
12538
12539static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12540			 u32 *rules __always_unused)
12541{
12542	struct tg3 *tp = netdev_priv(dev);
12543
12544	if (!tg3_flag(tp, SUPPORT_MSIX))
12545		return -EOPNOTSUPP;
12546
12547	switch (info->cmd) {
12548	case ETHTOOL_GRXRINGS:
12549		if (netif_running(tp->dev))
12550			info->data = tp->rxq_cnt;
12551		else {
12552			info->data = num_online_cpus();
12553			if (info->data > TG3_RSS_MAX_NUM_QS)
12554				info->data = TG3_RSS_MAX_NUM_QS;
12555		}
12556
12557		/* The first interrupt vector only
12558		 * handles link interrupts.
12559		 */
12560		info->data -= 1;
12561		return 0;
12562
12563	default:
12564		return -EOPNOTSUPP;
12565	}
12566}
12567
12568static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12569{
12570	u32 size = 0;
12571	struct tg3 *tp = netdev_priv(dev);
12572
12573	if (tg3_flag(tp, SUPPORT_MSIX))
12574		size = TG3_RSS_INDIR_TBL_SIZE;
12575
12576	return size;
12577}
12578
12579static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
12580{
12581	struct tg3 *tp = netdev_priv(dev);
12582	int i;
12583
12584	if (hfunc)
12585		*hfunc = ETH_RSS_HASH_TOP;
12586	if (!indir)
12587		return 0;
12588
12589	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12590		indir[i] = tp->rss_ind_tbl[i];
12591
12592	return 0;
12593}
12594
12595static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key,
12596			const u8 hfunc)
12597{
12598	struct tg3 *tp = netdev_priv(dev);
12599	size_t i;
12600
12601	/* We require at least one supported parameter to be changed and no
12602	 * change in any of the unsupported parameters
12603	 */
12604	if (key ||
12605	    (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
12606		return -EOPNOTSUPP;
12607
12608	if (!indir)
12609		return 0;
12610
12611	for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12612		tp->rss_ind_tbl[i] = indir[i];
12613
12614	if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12615		return 0;
12616
12617	/* It is legal to write the indirection
12618	 * table while the device is running.
12619	 */
12620	tg3_full_lock(tp, 0);
12621	tg3_rss_write_indir_tbl(tp);
12622	tg3_full_unlock(tp);
12623
12624	return 0;
12625}
12626
12627static void tg3_get_channels(struct net_device *dev,
12628			     struct ethtool_channels *channel)
12629{
12630	struct tg3 *tp = netdev_priv(dev);
12631	u32 deflt_qs = netif_get_num_default_rss_queues();
12632
12633	channel->max_rx = tp->rxq_max;
12634	channel->max_tx = tp->txq_max;
12635
12636	if (netif_running(dev)) {
12637		channel->rx_count = tp->rxq_cnt;
12638		channel->tx_count = tp->txq_cnt;
12639	} else {
12640		if (tp->rxq_req)
12641			channel->rx_count = tp->rxq_req;
12642		else
12643			channel->rx_count = min(deflt_qs, tp->rxq_max);
12644
12645		if (tp->txq_req)
12646			channel->tx_count = tp->txq_req;
12647		else
12648			channel->tx_count = min(deflt_qs, tp->txq_max);
12649	}
12650}
12651
12652static int tg3_set_channels(struct net_device *dev,
12653			    struct ethtool_channels *channel)
12654{
12655	struct tg3 *tp = netdev_priv(dev);
12656
12657	if (!tg3_flag(tp, SUPPORT_MSIX))
12658		return -EOPNOTSUPP;
12659
12660	if (channel->rx_count > tp->rxq_max ||
12661	    channel->tx_count > tp->txq_max)
12662		return -EINVAL;
12663
12664	tp->rxq_req = channel->rx_count;
12665	tp->txq_req = channel->tx_count;
12666
12667	if (!netif_running(dev))
12668		return 0;
12669
12670	tg3_stop(tp);
12671
12672	tg3_carrier_off(tp);
12673
12674	tg3_start(tp, true, false, false);
12675
12676	return 0;
12677}
12678
12679static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12680{
12681	switch (stringset) {
12682	case ETH_SS_STATS:
12683		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12684		break;
12685	case ETH_SS_TEST:
12686		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12687		break;
12688	default:
12689		WARN_ON(1);	/* we need a WARN() */
12690		break;
12691	}
12692}
12693
12694static int tg3_set_phys_id(struct net_device *dev,
12695			    enum ethtool_phys_id_state state)
12696{
12697	struct tg3 *tp = netdev_priv(dev);
12698
12699	if (!netif_running(tp->dev))
12700		return -EAGAIN;
12701
12702	switch (state) {
12703	case ETHTOOL_ID_ACTIVE:
12704		return 1;	/* cycle on/off once per second */
12705
12706	case ETHTOOL_ID_ON:
12707		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12708		     LED_CTRL_1000MBPS_ON |
12709		     LED_CTRL_100MBPS_ON |
12710		     LED_CTRL_10MBPS_ON |
12711		     LED_CTRL_TRAFFIC_OVERRIDE |
12712		     LED_CTRL_TRAFFIC_BLINK |
12713		     LED_CTRL_TRAFFIC_LED);
12714		break;
12715
12716	case ETHTOOL_ID_OFF:
12717		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12718		     LED_CTRL_TRAFFIC_OVERRIDE);
12719		break;
12720
12721	case ETHTOOL_ID_INACTIVE:
12722		tw32(MAC_LED_CTRL, tp->led_ctrl);
12723		break;
12724	}
12725
12726	return 0;
12727}
12728
12729static void tg3_get_ethtool_stats(struct net_device *dev,
12730				   struct ethtool_stats *estats, u64 *tmp_stats)
12731{
12732	struct tg3 *tp = netdev_priv(dev);
12733
12734	if (tp->hw_stats)
12735		tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12736	else
12737		memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12738}
12739
12740static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12741{
12742	int i;
12743	__be32 *buf;
12744	u32 offset = 0, len = 0;
12745	u32 magic, val;
12746
12747	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12748		return NULL;
12749
12750	if (magic == TG3_EEPROM_MAGIC) {
12751		for (offset = TG3_NVM_DIR_START;
12752		     offset < TG3_NVM_DIR_END;
12753		     offset += TG3_NVM_DIRENT_SIZE) {
12754			if (tg3_nvram_read(tp, offset, &val))
12755				return NULL;
12756
12757			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12758			    TG3_NVM_DIRTYPE_EXTVPD)
12759				break;
12760		}
12761
12762		if (offset != TG3_NVM_DIR_END) {
12763			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12764			if (tg3_nvram_read(tp, offset + 4, &offset))
12765				return NULL;
12766
12767			offset = tg3_nvram_logical_addr(tp, offset);
12768		}
12769	}
12770
12771	if (!offset || !len) {
12772		offset = TG3_NVM_VPD_OFF;
12773		len = TG3_NVM_VPD_LEN;
12774	}
12775
12776	buf = kmalloc(len, GFP_KERNEL);
12777	if (buf == NULL)
12778		return NULL;
12779
12780	if (magic == TG3_EEPROM_MAGIC) {
12781		for (i = 0; i < len; i += 4) {
12782			/* The data is in little-endian format in NVRAM.
12783			 * Use the big-endian read routines to preserve
12784			 * the byte order as it exists in NVRAM.
12785			 */
12786			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12787				goto error;
12788		}
12789	} else {
12790		u8 *ptr;
12791		ssize_t cnt;
12792		unsigned int pos = 0;
12793
12794		ptr = (u8 *)&buf[0];
12795		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12796			cnt = pci_read_vpd(tp->pdev, pos,
12797					   len - pos, ptr);
12798			if (cnt == -ETIMEDOUT || cnt == -EINTR)
12799				cnt = 0;
12800			else if (cnt < 0)
12801				goto error;
12802		}
12803		if (pos != len)
12804			goto error;
12805	}
12806
12807	*vpdlen = len;
12808
12809	return buf;
12810
12811error:
12812	kfree(buf);
12813	return NULL;
12814}
12815
12816#define NVRAM_TEST_SIZE 0x100
12817#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
12818#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
12819#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
12820#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
12821#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
12822#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
12823#define NVRAM_SELFBOOT_HW_SIZE 0x20
12824#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12825
12826static int tg3_test_nvram(struct tg3 *tp)
12827{
12828	u32 csum, magic, len;
12829	__be32 *buf;
12830	int i, j, k, err = 0, size;
12831
12832	if (tg3_flag(tp, NO_NVRAM))
12833		return 0;
12834
12835	if (tg3_nvram_read(tp, 0, &magic) != 0)
12836		return -EIO;
12837
12838	if (magic == TG3_EEPROM_MAGIC)
12839		size = NVRAM_TEST_SIZE;
12840	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12841		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12842		    TG3_EEPROM_SB_FORMAT_1) {
12843			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12844			case TG3_EEPROM_SB_REVISION_0:
12845				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12846				break;
12847			case TG3_EEPROM_SB_REVISION_2:
12848				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12849				break;
12850			case TG3_EEPROM_SB_REVISION_3:
12851				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12852				break;
12853			case TG3_EEPROM_SB_REVISION_4:
12854				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12855				break;
12856			case TG3_EEPROM_SB_REVISION_5:
12857				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12858				break;
12859			case TG3_EEPROM_SB_REVISION_6:
12860				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12861				break;
12862			default:
12863				return -EIO;
12864			}
12865		} else
12866			return 0;
12867	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12868		size = NVRAM_SELFBOOT_HW_SIZE;
12869	else
12870		return -EIO;
12871
12872	buf = kmalloc(size, GFP_KERNEL);
12873	if (buf == NULL)
12874		return -ENOMEM;
12875
12876	err = -EIO;
12877	for (i = 0, j = 0; i < size; i += 4, j++) {
12878		err = tg3_nvram_read_be32(tp, i, &buf[j]);
12879		if (err)
12880			break;
12881	}
12882	if (i < size)
12883		goto out;
12884
12885	/* Selfboot format */
12886	magic = be32_to_cpu(buf[0]);
12887	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12888	    TG3_EEPROM_MAGIC_FW) {
12889		u8 *buf8 = (u8 *) buf, csum8 = 0;
12890
12891		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12892		    TG3_EEPROM_SB_REVISION_2) {
12893			/* For rev 2, the csum doesn't include the MBA. */
12894			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12895				csum8 += buf8[i];
12896			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12897				csum8 += buf8[i];
12898		} else {
12899			for (i = 0; i < size; i++)
12900				csum8 += buf8[i];
12901		}
12902
12903		if (csum8 == 0) {
12904			err = 0;
12905			goto out;
12906		}
12907
12908		err = -EIO;
12909		goto out;
12910	}
12911
12912	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12913	    TG3_EEPROM_MAGIC_HW) {
12914		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12915		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12916		u8 *buf8 = (u8 *) buf;
12917
12918		/* Separate the parity bits and the data bytes.  */
12919		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12920			if ((i == 0) || (i == 8)) {
12921				int l;
12922				u8 msk;
12923
12924				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12925					parity[k++] = buf8[i] & msk;
12926				i++;
12927			} else if (i == 16) {
12928				int l;
12929				u8 msk;
12930
12931				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12932					parity[k++] = buf8[i] & msk;
12933				i++;
12934
12935				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12936					parity[k++] = buf8[i] & msk;
12937				i++;
12938			}
12939			data[j++] = buf8[i];
12940		}
12941
12942		err = -EIO;
12943		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12944			u8 hw8 = hweight8(data[i]);
12945
12946			if ((hw8 & 0x1) && parity[i])
12947				goto out;
12948			else if (!(hw8 & 0x1) && !parity[i])
12949				goto out;
12950		}
12951		err = 0;
12952		goto out;
12953	}
12954
12955	err = -EIO;
12956
12957	/* Bootstrap checksum at offset 0x10 */
12958	csum = calc_crc((unsigned char *) buf, 0x10);
12959	if (csum != le32_to_cpu(buf[0x10/4]))
12960		goto out;
12961
12962	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12963	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12964	if (csum != le32_to_cpu(buf[0xfc/4]))
12965		goto out;
12966
12967	kfree(buf);
12968
12969	buf = tg3_vpd_readblock(tp, &len);
12970	if (!buf)
12971		return -ENOMEM;
12972
12973	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12974	if (i > 0) {
12975		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12976		if (j < 0)
12977			goto out;
12978
12979		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12980			goto out;
12981
12982		i += PCI_VPD_LRDT_TAG_SIZE;
12983		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12984					      PCI_VPD_RO_KEYWORD_CHKSUM);
12985		if (j > 0) {
12986			u8 csum8 = 0;
12987
12988			j += PCI_VPD_INFO_FLD_HDR_SIZE;
12989
12990			for (i = 0; i <= j; i++)
12991				csum8 += ((u8 *)buf)[i];
12992
12993			if (csum8)
12994				goto out;
12995		}
12996	}
12997
12998	err = 0;
12999
13000out:
13001	kfree(buf);
13002	return err;
13003}
13004
13005#define TG3_SERDES_TIMEOUT_SEC	2
13006#define TG3_COPPER_TIMEOUT_SEC	6
13007
13008static int tg3_test_link(struct tg3 *tp)
13009{
13010	int i, max;
13011
13012	if (!netif_running(tp->dev))
13013		return -ENODEV;
13014
13015	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
13016		max = TG3_SERDES_TIMEOUT_SEC;
13017	else
13018		max = TG3_COPPER_TIMEOUT_SEC;
13019
13020	for (i = 0; i < max; i++) {
13021		if (tp->link_up)
13022			return 0;
13023
13024		if (msleep_interruptible(1000))
13025			break;
13026	}
13027
13028	return -EIO;
13029}
13030
13031/* Only test the commonly used registers */
13032static int tg3_test_registers(struct tg3 *tp)
13033{
13034	int i, is_5705, is_5750;
13035	u32 offset, read_mask, write_mask, val, save_val, read_val;
13036	static struct {
13037		u16 offset;
13038		u16 flags;
13039#define TG3_FL_5705	0x1
13040#define TG3_FL_NOT_5705	0x2
13041#define TG3_FL_NOT_5788	0x4
13042#define TG3_FL_NOT_5750	0x8
13043		u32 read_mask;
13044		u32 write_mask;
13045	} reg_tbl[] = {
13046		/* MAC Control Registers */
13047		{ MAC_MODE, TG3_FL_NOT_5705,
13048			0x00000000, 0x00ef6f8c },
13049		{ MAC_MODE, TG3_FL_5705,
13050			0x00000000, 0x01ef6b8c },
13051		{ MAC_STATUS, TG3_FL_NOT_5705,
13052			0x03800107, 0x00000000 },
13053		{ MAC_STATUS, TG3_FL_5705,
13054			0x03800100, 0x00000000 },
13055		{ MAC_ADDR_0_HIGH, 0x0000,
13056			0x00000000, 0x0000ffff },
13057		{ MAC_ADDR_0_LOW, 0x0000,
13058			0x00000000, 0xffffffff },
13059		{ MAC_RX_MTU_SIZE, 0x0000,
13060			0x00000000, 0x0000ffff },
13061		{ MAC_TX_MODE, 0x0000,
13062			0x00000000, 0x00000070 },
13063		{ MAC_TX_LENGTHS, 0x0000,
13064			0x00000000, 0x00003fff },
13065		{ MAC_RX_MODE, TG3_FL_NOT_5705,
13066			0x00000000, 0x000007fc },
13067		{ MAC_RX_MODE, TG3_FL_5705,
13068			0x00000000, 0x000007dc },
13069		{ MAC_HASH_REG_0, 0x0000,
13070			0x00000000, 0xffffffff },
13071		{ MAC_HASH_REG_1, 0x0000,
13072			0x00000000, 0xffffffff },
13073		{ MAC_HASH_REG_2, 0x0000,
13074			0x00000000, 0xffffffff },
13075		{ MAC_HASH_REG_3, 0x0000,
13076			0x00000000, 0xffffffff },
13077
13078		/* Receive Data and Receive BD Initiator Control Registers. */
13079		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
13080			0x00000000, 0xffffffff },
13081		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
13082			0x00000000, 0xffffffff },
13083		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
13084			0x00000000, 0x00000003 },
13085		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
13086			0x00000000, 0xffffffff },
13087		{ RCVDBDI_STD_BD+0, 0x0000,
13088			0x00000000, 0xffffffff },
13089		{ RCVDBDI_STD_BD+4, 0x0000,
13090			0x00000000, 0xffffffff },
13091		{ RCVDBDI_STD_BD+8, 0x0000,
13092			0x00000000, 0xffff0002 },
13093		{ RCVDBDI_STD_BD+0xc, 0x0000,
13094			0x00000000, 0xffffffff },
13095
13096		/* Receive BD Initiator Control Registers. */
13097		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
13098			0x00000000, 0xffffffff },
13099		{ RCVBDI_STD_THRESH, TG3_FL_5705,
13100			0x00000000, 0x000003ff },
13101		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
13102			0x00000000, 0xffffffff },
13103
13104		/* Host Coalescing Control Registers. */
13105		{ HOSTCC_MODE, TG3_FL_NOT_5705,
13106			0x00000000, 0x00000004 },
13107		{ HOSTCC_MODE, TG3_FL_5705,
13108			0x00000000, 0x000000f6 },
13109		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
13110			0x00000000, 0xffffffff },
13111		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
13112			0x00000000, 0x000003ff },
13113		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
13114			0x00000000, 0xffffffff },
13115		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
13116			0x00000000, 0x000003ff },
13117		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
13118			0x00000000, 0xffffffff },
13119		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13120			0x00000000, 0x000000ff },
13121		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
13122			0x00000000, 0xffffffff },
13123		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
13124			0x00000000, 0x000000ff },
13125		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
13126			0x00000000, 0xffffffff },
13127		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
13128			0x00000000, 0xffffffff },
13129		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13130			0x00000000, 0xffffffff },
13131		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13132			0x00000000, 0x000000ff },
13133		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
13134			0x00000000, 0xffffffff },
13135		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
13136			0x00000000, 0x000000ff },
13137		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
13138			0x00000000, 0xffffffff },
13139		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
13140			0x00000000, 0xffffffff },
13141		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
13142			0x00000000, 0xffffffff },
13143		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
13144			0x00000000, 0xffffffff },
13145		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
13146			0x00000000, 0xffffffff },
13147		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
13148			0xffffffff, 0x00000000 },
13149		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
13150			0xffffffff, 0x00000000 },
13151
13152		/* Buffer Manager Control Registers. */
13153		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
13154			0x00000000, 0x007fff80 },
13155		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
13156			0x00000000, 0x007fffff },
13157		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
13158			0x00000000, 0x0000003f },
13159		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
13160			0x00000000, 0x000001ff },
13161		{ BUFMGR_MB_HIGH_WATER, 0x0000,
13162			0x00000000, 0x000001ff },
13163		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
13164			0xffffffff, 0x00000000 },
13165		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
13166			0xffffffff, 0x00000000 },
13167
13168		/* Mailbox Registers */
13169		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
13170			0x00000000, 0x000001ff },
13171		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
13172			0x00000000, 0x000001ff },
13173		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
13174			0x00000000, 0x000007ff },
13175		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
13176			0x00000000, 0x000001ff },
13177
13178		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
13179	};
13180
13181	is_5705 = is_5750 = 0;
13182	if (tg3_flag(tp, 5705_PLUS)) {
13183		is_5705 = 1;
13184		if (tg3_flag(tp, 5750_PLUS))
13185			is_5750 = 1;
13186	}
13187
13188	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
13189		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
13190			continue;
13191
13192		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
13193			continue;
13194
13195		if (tg3_flag(tp, IS_5788) &&
13196		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
13197			continue;
13198
13199		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
13200			continue;
13201
13202		offset = (u32) reg_tbl[i].offset;
13203		read_mask = reg_tbl[i].read_mask;
13204		write_mask = reg_tbl[i].write_mask;
13205
13206		/* Save the original register content */
13207		save_val = tr32(offset);
13208
13209		/* Determine the read-only value. */
13210		read_val = save_val & read_mask;
13211
13212		/* Write zero to the register, then make sure the read-only bits
13213		 * are not changed and the read/write bits are all zeros.
13214		 */
13215		tw32(offset, 0);
13216
13217		val = tr32(offset);
13218
13219		/* Test the read-only and read/write bits. */
13220		if (((val & read_mask) != read_val) || (val & write_mask))
13221			goto out;
13222
13223		/* Write ones to all the bits defined by RdMask and WrMask, then
13224		 * make sure the read-only bits are not changed and the
13225		 * read/write bits are all ones.
13226		 */
13227		tw32(offset, read_mask | write_mask);
13228
13229		val = tr32(offset);
13230
13231		/* Test the read-only bits. */
13232		if ((val & read_mask) != read_val)
13233			goto out;
13234
13235		/* Test the read/write bits. */
13236		if ((val & write_mask) != write_mask)
13237			goto out;
13238
13239		tw32(offset, save_val);
13240	}
13241
13242	return 0;
13243
13244out:
13245	if (netif_msg_hw(tp))
13246		netdev_err(tp->dev,
13247			   "Register test failed at offset %x\n", offset);
13248	tw32(offset, save_val);
13249	return -EIO;
13250}
13251
13252static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
13253{
13254	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
13255	int i;
13256	u32 j;
13257
13258	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
13259		for (j = 0; j < len; j += 4) {
13260			u32 val;
13261
13262			tg3_write_mem(tp, offset + j, test_pattern[i]);
13263			tg3_read_mem(tp, offset + j, &val);
13264			if (val != test_pattern[i])
13265				return -EIO;
13266		}
13267	}
13268	return 0;
13269}
13270
13271static int tg3_test_memory(struct tg3 *tp)
13272{
13273	static struct mem_entry {
13274		u32 offset;
13275		u32 len;
13276	} mem_tbl_570x[] = {
13277		{ 0x00000000, 0x00b50},
13278		{ 0x00002000, 0x1c000},
13279		{ 0xffffffff, 0x00000}
13280	}, mem_tbl_5705[] = {
13281		{ 0x00000100, 0x0000c},
13282		{ 0x00000200, 0x00008},
13283		{ 0x00004000, 0x00800},
13284		{ 0x00006000, 0x01000},
13285		{ 0x00008000, 0x02000},
13286		{ 0x00010000, 0x0e000},
13287		{ 0xffffffff, 0x00000}
13288	}, mem_tbl_5755[] = {
13289		{ 0x00000200, 0x00008},
13290		{ 0x00004000, 0x00800},
13291		{ 0x00006000, 0x00800},
13292		{ 0x00008000, 0x02000},
13293		{ 0x00010000, 0x0c000},
13294		{ 0xffffffff, 0x00000}
13295	}, mem_tbl_5906[] = {
13296		{ 0x00000200, 0x00008},
13297		{ 0x00004000, 0x00400},
13298		{ 0x00006000, 0x00400},
13299		{ 0x00008000, 0x01000},
13300		{ 0x00010000, 0x01000},
13301		{ 0xffffffff, 0x00000}
13302	}, mem_tbl_5717[] = {
13303		{ 0x00000200, 0x00008},
13304		{ 0x00010000, 0x0a000},
13305		{ 0x00020000, 0x13c00},
13306		{ 0xffffffff, 0x00000}
13307	}, mem_tbl_57765[] = {
13308		{ 0x00000200, 0x00008},
13309		{ 0x00004000, 0x00800},
13310		{ 0x00006000, 0x09800},
13311		{ 0x00010000, 0x0a000},
13312		{ 0xffffffff, 0x00000}
13313	};
13314	struct mem_entry *mem_tbl;
13315	int err = 0;
13316	int i;
13317
13318	if (tg3_flag(tp, 5717_PLUS))
13319		mem_tbl = mem_tbl_5717;
13320	else if (tg3_flag(tp, 57765_CLASS) ||
13321		 tg3_asic_rev(tp) == ASIC_REV_5762)
13322		mem_tbl = mem_tbl_57765;
13323	else if (tg3_flag(tp, 5755_PLUS))
13324		mem_tbl = mem_tbl_5755;
13325	else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13326		mem_tbl = mem_tbl_5906;
13327	else if (tg3_flag(tp, 5705_PLUS))
13328		mem_tbl = mem_tbl_5705;
13329	else
13330		mem_tbl = mem_tbl_570x;
13331
13332	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13333		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13334		if (err)
13335			break;
13336	}
13337
13338	return err;
13339}
13340
13341#define TG3_TSO_MSS		500
13342
13343#define TG3_TSO_IP_HDR_LEN	20
13344#define TG3_TSO_TCP_HDR_LEN	20
13345#define TG3_TSO_TCP_OPT_LEN	12
13346
13347static const u8 tg3_tso_header[] = {
133480x08, 0x00,
133490x45, 0x00, 0x00, 0x00,
133500x00, 0x00, 0x40, 0x00,
133510x40, 0x06, 0x00, 0x00,
133520x0a, 0x00, 0x00, 0x01,
133530x0a, 0x00, 0x00, 0x02,
133540x0d, 0x00, 0xe0, 0x00,
133550x00, 0x00, 0x01, 0x00,
133560x00, 0x00, 0x02, 0x00,
133570x80, 0x10, 0x10, 0x00,
133580x14, 0x09, 0x00, 0x00,
133590x01, 0x01, 0x08, 0x0a,
133600x11, 0x11, 0x11, 0x11,
133610x11, 0x11, 0x11, 0x11,
13362};
13363
13364static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13365{
13366	u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13367	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13368	u32 budget;
13369	struct sk_buff *skb;
13370	u8 *tx_data, *rx_data;
13371	dma_addr_t map;
13372	int num_pkts, tx_len, rx_len, i, err;
13373	struct tg3_rx_buffer_desc *desc;
13374	struct tg3_napi *tnapi, *rnapi;
13375	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13376
13377	tnapi = &tp->napi[0];
13378	rnapi = &tp->napi[0];
13379	if (tp->irq_cnt > 1) {
13380		if (tg3_flag(tp, ENABLE_RSS))
13381			rnapi = &tp->napi[1];
13382		if (tg3_flag(tp, ENABLE_TSS))
13383			tnapi = &tp->napi[1];
13384	}
13385	coal_now = tnapi->coal_now | rnapi->coal_now;
13386
13387	err = -EIO;
13388
13389	tx_len = pktsz;
13390	skb = netdev_alloc_skb(tp->dev, tx_len);
13391	if (!skb)
13392		return -ENOMEM;
13393
13394	tx_data = skb_put(skb, tx_len);
13395	memcpy(tx_data, tp->dev->dev_addr, ETH_ALEN);
13396	memset(tx_data + ETH_ALEN, 0x0, 8);
13397
13398	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13399
13400	if (tso_loopback) {
13401		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13402
13403		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13404			      TG3_TSO_TCP_OPT_LEN;
13405
13406		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13407		       sizeof(tg3_tso_header));
13408		mss = TG3_TSO_MSS;
13409
13410		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13411		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13412
13413		/* Set the total length field in the IP header */
13414		iph->tot_len = htons((u16)(mss + hdr_len));
13415
13416		base_flags = (TXD_FLAG_CPU_PRE_DMA |
13417			      TXD_FLAG_CPU_POST_DMA);
13418
13419		if (tg3_flag(tp, HW_TSO_1) ||
13420		    tg3_flag(tp, HW_TSO_2) ||
13421		    tg3_flag(tp, HW_TSO_3)) {
13422			struct tcphdr *th;
13423			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13424			th = (struct tcphdr *)&tx_data[val];
13425			th->check = 0;
13426		} else
13427			base_flags |= TXD_FLAG_TCPUDP_CSUM;
13428
13429		if (tg3_flag(tp, HW_TSO_3)) {
13430			mss |= (hdr_len & 0xc) << 12;
13431			if (hdr_len & 0x10)
13432				base_flags |= 0x00000010;
13433			base_flags |= (hdr_len & 0x3e0) << 5;
13434		} else if (tg3_flag(tp, HW_TSO_2))
13435			mss |= hdr_len << 9;
13436		else if (tg3_flag(tp, HW_TSO_1) ||
13437			 tg3_asic_rev(tp) == ASIC_REV_5705) {
13438			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13439		} else {
13440			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13441		}
13442
13443		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13444	} else {
13445		num_pkts = 1;
13446		data_off = ETH_HLEN;
13447
13448		if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13449		    tx_len > VLAN_ETH_FRAME_LEN)
13450			base_flags |= TXD_FLAG_JMB_PKT;
13451	}
13452
13453	for (i = data_off; i < tx_len; i++)
13454		tx_data[i] = (u8) (i & 0xff);
13455
13456	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13457	if (pci_dma_mapping_error(tp->pdev, map)) {
13458		dev_kfree_skb(skb);
13459		return -EIO;
13460	}
13461
13462	val = tnapi->tx_prod;
13463	tnapi->tx_buffers[val].skb = skb;
13464	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13465
13466	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13467	       rnapi->coal_now);
13468
13469	udelay(10);
13470
13471	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13472
13473	budget = tg3_tx_avail(tnapi);
13474	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13475			    base_flags | TXD_FLAG_END, mss, 0)) {
13476		tnapi->tx_buffers[val].skb = NULL;
13477		dev_kfree_skb(skb);
13478		return -EIO;
13479	}
13480
13481	tnapi->tx_prod++;
13482
13483	/* Sync BD data before updating mailbox */
13484	wmb();
13485
13486	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13487	tr32_mailbox(tnapi->prodmbox);
13488
13489	udelay(10);
13490
13491	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13492	for (i = 0; i < 35; i++) {
13493		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13494		       coal_now);
13495
13496		udelay(10);
13497
13498		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13499		rx_idx = rnapi->hw_status->idx[0].rx_producer;
13500		if ((tx_idx == tnapi->tx_prod) &&
13501		    (rx_idx == (rx_start_idx + num_pkts)))
13502			break;
13503	}
13504
13505	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13506	dev_kfree_skb(skb);
13507
13508	if (tx_idx != tnapi->tx_prod)
13509		goto out;
13510
13511	if (rx_idx != rx_start_idx + num_pkts)
13512		goto out;
13513
13514	val = data_off;
13515	while (rx_idx != rx_start_idx) {
13516		desc = &rnapi->rx_rcb[rx_start_idx++];
13517		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13518		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13519
13520		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13521		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13522			goto out;
13523
13524		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13525			 - ETH_FCS_LEN;
13526
13527		if (!tso_loopback) {
13528			if (rx_len != tx_len)
13529				goto out;
13530
13531			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13532				if (opaque_key != RXD_OPAQUE_RING_STD)
13533					goto out;
13534			} else {
13535				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13536					goto out;
13537			}
13538		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13539			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13540			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
13541			goto out;
13542		}
13543
13544		if (opaque_key == RXD_OPAQUE_RING_STD) {
13545			rx_data = tpr->rx_std_buffers[desc_idx].data;
13546			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13547					     mapping);
13548		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13549			rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13550			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13551					     mapping);
13552		} else
13553			goto out;
13554
13555		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13556					    PCI_DMA_FROMDEVICE);
13557
13558		rx_data += TG3_RX_OFFSET(tp);
13559		for (i = data_off; i < rx_len; i++, val++) {
13560			if (*(rx_data + i) != (u8) (val & 0xff))
13561				goto out;
13562		}
13563	}
13564
13565	err = 0;
13566
13567	/* tg3_free_rings will unmap and free the rx_data */
13568out:
13569	return err;
13570}
13571
13572#define TG3_STD_LOOPBACK_FAILED		1
13573#define TG3_JMB_LOOPBACK_FAILED		2
13574#define TG3_TSO_LOOPBACK_FAILED		4
13575#define TG3_LOOPBACK_FAILED \
13576	(TG3_STD_LOOPBACK_FAILED | \
13577	 TG3_JMB_LOOPBACK_FAILED | \
13578	 TG3_TSO_LOOPBACK_FAILED)
13579
13580static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13581{
13582	int err = -EIO;
13583	u32 eee_cap;
13584	u32 jmb_pkt_sz = 9000;
13585
13586	if (tp->dma_limit)
13587		jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13588
13589	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13590	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13591
13592	if (!netif_running(tp->dev)) {
13593		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13594		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13595		if (do_extlpbk)
13596			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13597		goto done;
13598	}
13599
13600	err = tg3_reset_hw(tp, true);
13601	if (err) {
13602		data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13603		data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13604		if (do_extlpbk)
13605			data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13606		goto done;
13607	}
13608
13609	if (tg3_flag(tp, ENABLE_RSS)) {
13610		int i;
13611
13612		/* Reroute all rx packets to the 1st queue */
13613		for (i = MAC_RSS_INDIR_TBL_0;
13614		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13615			tw32(i, 0x0);
13616	}
13617
13618	/* HW errata - mac loopback fails in some cases on 5780.
13619	 * Normal traffic and PHY loopback are not affected by
13620	 * errata.  Also, the MAC loopback test is deprecated for
13621	 * all newer ASIC revisions.
13622	 */
13623	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13624	    !tg3_flag(tp, CPMU_PRESENT)) {
13625		tg3_mac_loopback(tp, true);
13626
13627		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13628			data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13629
13630		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13631		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13632			data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13633
13634		tg3_mac_loopback(tp, false);
13635	}
13636
13637	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13638	    !tg3_flag(tp, USE_PHYLIB)) {
13639		int i;
13640
13641		tg3_phy_lpbk_set(tp, 0, false);
13642
13643		/* Wait for link */
13644		for (i = 0; i < 100; i++) {
13645			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13646				break;
13647			mdelay(1);
13648		}
13649
13650		if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13651			data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13652		if (tg3_flag(tp, TSO_CAPABLE) &&
13653		    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13654			data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13655		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13656		    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13657			data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13658
13659		if (do_extlpbk) {
13660			tg3_phy_lpbk_set(tp, 0, true);
13661
13662			/* All link indications report up, but the hardware
13663			 * isn't really ready for about 20 msec.  Double it
13664			 * to be sure.
13665			 */
13666			mdelay(40);
13667
13668			if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13669				data[TG3_EXT_LOOPB_TEST] |=
13670							TG3_STD_LOOPBACK_FAILED;
13671			if (tg3_flag(tp, TSO_CAPABLE) &&
13672			    tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13673				data[TG3_EXT_LOOPB_TEST] |=
13674							TG3_TSO_LOOPBACK_FAILED;
13675			if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13676			    tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13677				data[TG3_EXT_LOOPB_TEST] |=
13678							TG3_JMB_LOOPBACK_FAILED;
13679		}
13680
13681		/* Re-enable gphy autopowerdown. */
13682		if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13683			tg3_phy_toggle_apd(tp, true);
13684	}
13685
13686	err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13687	       data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13688
13689done:
13690	tp->phy_flags |= eee_cap;
13691
13692	return err;
13693}
13694
13695static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13696			  u64 *data)
13697{
13698	struct tg3 *tp = netdev_priv(dev);
13699	bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13700
13701	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
13702		if (tg3_power_up(tp)) {
13703			etest->flags |= ETH_TEST_FL_FAILED;
13704			memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13705			return;
13706		}
13707		tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
13708	}
13709
13710	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13711
13712	if (tg3_test_nvram(tp) != 0) {
13713		etest->flags |= ETH_TEST_FL_FAILED;
13714		data[TG3_NVRAM_TEST] = 1;
13715	}
13716	if (!doextlpbk && tg3_test_link(tp)) {
13717		etest->flags |= ETH_TEST_FL_FAILED;
13718		data[TG3_LINK_TEST] = 1;
13719	}
13720	if (etest->flags & ETH_TEST_FL_OFFLINE) {
13721		int err, err2 = 0, irq_sync = 0;
13722
13723		if (netif_running(dev)) {
13724			tg3_phy_stop(tp);
13725			tg3_netif_stop(tp);
13726			irq_sync = 1;
13727		}
13728
13729		tg3_full_lock(tp, irq_sync);
13730		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13731		err = tg3_nvram_lock(tp);
13732		tg3_halt_cpu(tp, RX_CPU_BASE);
13733		if (!tg3_flag(tp, 5705_PLUS))
13734			tg3_halt_cpu(tp, TX_CPU_BASE);
13735		if (!err)
13736			tg3_nvram_unlock(tp);
13737
13738		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13739			tg3_phy_reset(tp);
13740
13741		if (tg3_test_registers(tp) != 0) {
13742			etest->flags |= ETH_TEST_FL_FAILED;
13743			data[TG3_REGISTER_TEST] = 1;
13744		}
13745
13746		if (tg3_test_memory(tp) != 0) {
13747			etest->flags |= ETH_TEST_FL_FAILED;
13748			data[TG3_MEMORY_TEST] = 1;
13749		}
13750
13751		if (doextlpbk)
13752			etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13753
13754		if (tg3_test_loopback(tp, data, doextlpbk))
13755			etest->flags |= ETH_TEST_FL_FAILED;
13756
13757		tg3_full_unlock(tp);
13758
13759		if (tg3_test_interrupt(tp) != 0) {
13760			etest->flags |= ETH_TEST_FL_FAILED;
13761			data[TG3_INTERRUPT_TEST] = 1;
13762		}
13763
13764		tg3_full_lock(tp, 0);
13765
13766		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13767		if (netif_running(dev)) {
13768			tg3_flag_set(tp, INIT_COMPLETE);
13769			err2 = tg3_restart_hw(tp, true);
13770			if (!err2)
13771				tg3_netif_start(tp);
13772		}
13773
13774		tg3_full_unlock(tp);
13775
13776		if (irq_sync && !err2)
13777			tg3_phy_start(tp);
13778	}
13779	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13780		tg3_power_down_prepare(tp);
13781
13782}
13783
13784static int tg3_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
13785{
13786	struct tg3 *tp = netdev_priv(dev);
13787	struct hwtstamp_config stmpconf;
13788
13789	if (!tg3_flag(tp, PTP_CAPABLE))
13790		return -EOPNOTSUPP;
13791
13792	if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13793		return -EFAULT;
13794
13795	if (stmpconf.flags)
13796		return -EINVAL;
13797
13798	if (stmpconf.tx_type != HWTSTAMP_TX_ON &&
13799	    stmpconf.tx_type != HWTSTAMP_TX_OFF)
13800		return -ERANGE;
13801
13802	switch (stmpconf.rx_filter) {
13803	case HWTSTAMP_FILTER_NONE:
13804		tp->rxptpctl = 0;
13805		break;
13806	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13807		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13808			       TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13809		break;
13810	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13811		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13812			       TG3_RX_PTP_CTL_SYNC_EVNT;
13813		break;
13814	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13815		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13816			       TG3_RX_PTP_CTL_DELAY_REQ;
13817		break;
13818	case HWTSTAMP_FILTER_PTP_V2_EVENT:
13819		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13820			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13821		break;
13822	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13823		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13824			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13825		break;
13826	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13827		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13828			       TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13829		break;
13830	case HWTSTAMP_FILTER_PTP_V2_SYNC:
13831		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13832			       TG3_RX_PTP_CTL_SYNC_EVNT;
13833		break;
13834	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13835		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13836			       TG3_RX_PTP_CTL_SYNC_EVNT;
13837		break;
13838	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13839		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13840			       TG3_RX_PTP_CTL_SYNC_EVNT;
13841		break;
13842	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13843		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13844			       TG3_RX_PTP_CTL_DELAY_REQ;
13845		break;
13846	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13847		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13848			       TG3_RX_PTP_CTL_DELAY_REQ;
13849		break;
13850	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13851		tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13852			       TG3_RX_PTP_CTL_DELAY_REQ;
13853		break;
13854	default:
13855		return -ERANGE;
13856	}
13857
13858	if (netif_running(dev) && tp->rxptpctl)
13859		tw32(TG3_RX_PTP_CTL,
13860		     tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13861
13862	if (stmpconf.tx_type == HWTSTAMP_TX_ON)
13863		tg3_flag_set(tp, TX_TSTAMP_EN);
13864	else
13865		tg3_flag_clear(tp, TX_TSTAMP_EN);
13866
13867	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13868		-EFAULT : 0;
13869}
13870
13871static int tg3_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
13872{
13873	struct tg3 *tp = netdev_priv(dev);
13874	struct hwtstamp_config stmpconf;
13875
13876	if (!tg3_flag(tp, PTP_CAPABLE))
13877		return -EOPNOTSUPP;
13878
13879	stmpconf.flags = 0;
13880	stmpconf.tx_type = (tg3_flag(tp, TX_TSTAMP_EN) ?
13881			    HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF);
13882
13883	switch (tp->rxptpctl) {
13884	case 0:
13885		stmpconf.rx_filter = HWTSTAMP_FILTER_NONE;
13886		break;
13887	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_ALL_V1_EVENTS:
13888		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
13889		break;
13890	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13891		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
13892		break;
13893	case TG3_RX_PTP_CTL_RX_PTP_V1_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13894		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
13895		break;
13896	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13897		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
13898		break;
13899	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13900		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
13901		break;
13902	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_ALL_V2_EVENTS:
13903		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
13904		break;
13905	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13906		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
13907		break;
13908	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13909		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_SYNC;
13910		break;
13911	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_SYNC_EVNT:
13912		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
13913		break;
13914	case TG3_RX_PTP_CTL_RX_PTP_V2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13915		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
13916		break;
13917	case TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13918		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ;
13919		break;
13920	case TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN | TG3_RX_PTP_CTL_DELAY_REQ:
13921		stmpconf.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
13922		break;
13923	default:
13924		WARN_ON_ONCE(1);
13925		return -ERANGE;
13926	}
13927
13928	return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13929		-EFAULT : 0;
13930}
13931
13932static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13933{
13934	struct mii_ioctl_data *data = if_mii(ifr);
13935	struct tg3 *tp = netdev_priv(dev);
13936	int err;
13937
13938	if (tg3_flag(tp, USE_PHYLIB)) {
13939		struct phy_device *phydev;
13940		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13941			return -EAGAIN;
13942		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
13943		return phy_mii_ioctl(phydev, ifr, cmd);
13944	}
13945
13946	switch (cmd) {
13947	case SIOCGMIIPHY:
13948		data->phy_id = tp->phy_addr;
13949
13950		/* fallthru */
13951	case SIOCGMIIREG: {
13952		u32 mii_regval;
13953
13954		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13955			break;			/* We have no PHY */
13956
13957		if (!netif_running(dev))
13958			return -EAGAIN;
13959
13960		spin_lock_bh(&tp->lock);
13961		err = __tg3_readphy(tp, data->phy_id & 0x1f,
13962				    data->reg_num & 0x1f, &mii_regval);
13963		spin_unlock_bh(&tp->lock);
13964
13965		data->val_out = mii_regval;
13966
13967		return err;
13968	}
13969
13970	case SIOCSMIIREG:
13971		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13972			break;			/* We have no PHY */
13973
13974		if (!netif_running(dev))
13975			return -EAGAIN;
13976
13977		spin_lock_bh(&tp->lock);
13978		err = __tg3_writephy(tp, data->phy_id & 0x1f,
13979				     data->reg_num & 0x1f, data->val_in);
13980		spin_unlock_bh(&tp->lock);
13981
13982		return err;
13983
13984	case SIOCSHWTSTAMP:
13985		return tg3_hwtstamp_set(dev, ifr);
13986
13987	case SIOCGHWTSTAMP:
13988		return tg3_hwtstamp_get(dev, ifr);
13989
13990	default:
13991		/* do nothing */
13992		break;
13993	}
13994	return -EOPNOTSUPP;
13995}
13996
13997static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13998{
13999	struct tg3 *tp = netdev_priv(dev);
14000
14001	memcpy(ec, &tp->coal, sizeof(*ec));
14002	return 0;
14003}
14004
14005static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
14006{
14007	struct tg3 *tp = netdev_priv(dev);
14008	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
14009	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
14010
14011	if (!tg3_flag(tp, 5705_PLUS)) {
14012		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
14013		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
14014		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
14015		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
14016	}
14017
14018	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
14019	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
14020	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
14021	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
14022	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
14023	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
14024	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
14025	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
14026	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
14027	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
14028		return -EINVAL;
14029
14030	/* No rx interrupts will be generated if both are zero */
14031	if ((ec->rx_coalesce_usecs == 0) &&
14032	    (ec->rx_max_coalesced_frames == 0))
14033		return -EINVAL;
14034
14035	/* No tx interrupts will be generated if both are zero */
14036	if ((ec->tx_coalesce_usecs == 0) &&
14037	    (ec->tx_max_coalesced_frames == 0))
14038		return -EINVAL;
14039
14040	/* Only copy relevant parameters, ignore all others. */
14041	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
14042	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
14043	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
14044	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
14045	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
14046	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
14047	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
14048	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
14049	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
14050
14051	if (netif_running(dev)) {
14052		tg3_full_lock(tp, 0);
14053		__tg3_set_coalesce(tp, &tp->coal);
14054		tg3_full_unlock(tp);
14055	}
14056	return 0;
14057}
14058
14059static int tg3_set_eee(struct net_device *dev, struct ethtool_eee *edata)
14060{
14061	struct tg3 *tp = netdev_priv(dev);
14062
14063	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14064		netdev_warn(tp->dev, "Board does not support EEE!\n");
14065		return -EOPNOTSUPP;
14066	}
14067
14068	if (edata->advertised != tp->eee.advertised) {
14069		netdev_warn(tp->dev,
14070			    "Direct manipulation of EEE advertisement is not supported\n");
14071		return -EINVAL;
14072	}
14073
14074	if (edata->tx_lpi_timer > TG3_CPMU_DBTMR1_LNKIDLE_MAX) {
14075		netdev_warn(tp->dev,
14076			    "Maximal Tx Lpi timer supported is %#x(u)\n",
14077			    TG3_CPMU_DBTMR1_LNKIDLE_MAX);
14078		return -EINVAL;
14079	}
14080
14081	tp->eee = *edata;
14082
14083	tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
14084	tg3_warn_mgmt_link_flap(tp);
14085
14086	if (netif_running(tp->dev)) {
14087		tg3_full_lock(tp, 0);
14088		tg3_setup_eee(tp);
14089		tg3_phy_reset(tp);
14090		tg3_full_unlock(tp);
14091	}
14092
14093	return 0;
14094}
14095
14096static int tg3_get_eee(struct net_device *dev, struct ethtool_eee *edata)
14097{
14098	struct tg3 *tp = netdev_priv(dev);
14099
14100	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
14101		netdev_warn(tp->dev,
14102			    "Board does not support EEE!\n");
14103		return -EOPNOTSUPP;
14104	}
14105
14106	*edata = tp->eee;
14107	return 0;
14108}
14109
14110static const struct ethtool_ops tg3_ethtool_ops = {
14111	.get_settings		= tg3_get_settings,
14112	.set_settings		= tg3_set_settings,
14113	.get_drvinfo		= tg3_get_drvinfo,
14114	.get_regs_len		= tg3_get_regs_len,
14115	.get_regs		= tg3_get_regs,
14116	.get_wol		= tg3_get_wol,
14117	.set_wol		= tg3_set_wol,
14118	.get_msglevel		= tg3_get_msglevel,
14119	.set_msglevel		= tg3_set_msglevel,
14120	.nway_reset		= tg3_nway_reset,
14121	.get_link		= ethtool_op_get_link,
14122	.get_eeprom_len		= tg3_get_eeprom_len,
14123	.get_eeprom		= tg3_get_eeprom,
14124	.set_eeprom		= tg3_set_eeprom,
14125	.get_ringparam		= tg3_get_ringparam,
14126	.set_ringparam		= tg3_set_ringparam,
14127	.get_pauseparam		= tg3_get_pauseparam,
14128	.set_pauseparam		= tg3_set_pauseparam,
14129	.self_test		= tg3_self_test,
14130	.get_strings		= tg3_get_strings,
14131	.set_phys_id		= tg3_set_phys_id,
14132	.get_ethtool_stats	= tg3_get_ethtool_stats,
14133	.get_coalesce		= tg3_get_coalesce,
14134	.set_coalesce		= tg3_set_coalesce,
14135	.get_sset_count		= tg3_get_sset_count,
14136	.get_rxnfc		= tg3_get_rxnfc,
14137	.get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
14138	.get_rxfh		= tg3_get_rxfh,
14139	.set_rxfh		= tg3_set_rxfh,
14140	.get_channels		= tg3_get_channels,
14141	.set_channels		= tg3_set_channels,
14142	.get_ts_info		= tg3_get_ts_info,
14143	.get_eee		= tg3_get_eee,
14144	.set_eee		= tg3_set_eee,
14145};
14146
14147static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
14148						struct rtnl_link_stats64 *stats)
14149{
14150	struct tg3 *tp = netdev_priv(dev);
14151
14152	spin_lock_bh(&tp->lock);
14153	if (!tp->hw_stats) {
14154		*stats = tp->net_stats_prev;
14155		spin_unlock_bh(&tp->lock);
14156		return stats;
14157	}
14158
14159	tg3_get_nstats(tp, stats);
14160	spin_unlock_bh(&tp->lock);
14161
14162	return stats;
14163}
14164
14165static void tg3_set_rx_mode(struct net_device *dev)
14166{
14167	struct tg3 *tp = netdev_priv(dev);
14168
14169	if (!netif_running(dev))
14170		return;
14171
14172	tg3_full_lock(tp, 0);
14173	__tg3_set_rx_mode(dev);
14174	tg3_full_unlock(tp);
14175}
14176
14177static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
14178			       int new_mtu)
14179{
14180	dev->mtu = new_mtu;
14181
14182	if (new_mtu > ETH_DATA_LEN) {
14183		if (tg3_flag(tp, 5780_CLASS)) {
14184			netdev_update_features(dev);
14185			tg3_flag_clear(tp, TSO_CAPABLE);
14186		} else {
14187			tg3_flag_set(tp, JUMBO_RING_ENABLE);
14188		}
14189	} else {
14190		if (tg3_flag(tp, 5780_CLASS)) {
14191			tg3_flag_set(tp, TSO_CAPABLE);
14192			netdev_update_features(dev);
14193		}
14194		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
14195	}
14196}
14197
14198static int tg3_change_mtu(struct net_device *dev, int new_mtu)
14199{
14200	struct tg3 *tp = netdev_priv(dev);
14201	int err;
14202	bool reset_phy = false;
14203
14204	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
14205		return -EINVAL;
14206
14207	if (!netif_running(dev)) {
14208		/* We'll just catch it later when the
14209		 * device is up'd.
14210		 */
14211		tg3_set_mtu(dev, tp, new_mtu);
14212		return 0;
14213	}
14214
14215	tg3_phy_stop(tp);
14216
14217	tg3_netif_stop(tp);
14218
14219	tg3_set_mtu(dev, tp, new_mtu);
14220
14221	tg3_full_lock(tp, 1);
14222
14223	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
14224
14225	/* Reset PHY, otherwise the read DMA engine will be in a mode that
14226	 * breaks all requests to 256 bytes.
14227	 */
14228	if (tg3_asic_rev(tp) == ASIC_REV_57766)
14229		reset_phy = true;
14230
14231	err = tg3_restart_hw(tp, reset_phy);
14232
14233	if (!err)
14234		tg3_netif_start(tp);
14235
14236	tg3_full_unlock(tp);
14237
14238	if (!err)
14239		tg3_phy_start(tp);
14240
14241	return err;
14242}
14243
14244static const struct net_device_ops tg3_netdev_ops = {
14245	.ndo_open		= tg3_open,
14246	.ndo_stop		= tg3_close,
14247	.ndo_start_xmit		= tg3_start_xmit,
14248	.ndo_get_stats64	= tg3_get_stats64,
14249	.ndo_validate_addr	= eth_validate_addr,
14250	.ndo_set_rx_mode	= tg3_set_rx_mode,
14251	.ndo_set_mac_address	= tg3_set_mac_addr,
14252	.ndo_do_ioctl		= tg3_ioctl,
14253	.ndo_tx_timeout		= tg3_tx_timeout,
14254	.ndo_change_mtu		= tg3_change_mtu,
14255	.ndo_fix_features	= tg3_fix_features,
14256	.ndo_set_features	= tg3_set_features,
14257#ifdef CONFIG_NET_POLL_CONTROLLER
14258	.ndo_poll_controller	= tg3_poll_controller,
14259#endif
14260};
14261
14262static void tg3_get_eeprom_size(struct tg3 *tp)
14263{
14264	u32 cursize, val, magic;
14265
14266	tp->nvram_size = EEPROM_CHIP_SIZE;
14267
14268	if (tg3_nvram_read(tp, 0, &magic) != 0)
14269		return;
14270
14271	if ((magic != TG3_EEPROM_MAGIC) &&
14272	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
14273	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
14274		return;
14275
14276	/*
14277	 * Size the chip by reading offsets at increasing powers of two.
14278	 * When we encounter our validation signature, we know the addressing
14279	 * has wrapped around, and thus have our chip size.
14280	 */
14281	cursize = 0x10;
14282
14283	while (cursize < tp->nvram_size) {
14284		if (tg3_nvram_read(tp, cursize, &val) != 0)
14285			return;
14286
14287		if (val == magic)
14288			break;
14289
14290		cursize <<= 1;
14291	}
14292
14293	tp->nvram_size = cursize;
14294}
14295
14296static void tg3_get_nvram_size(struct tg3 *tp)
14297{
14298	u32 val;
14299
14300	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
14301		return;
14302
14303	/* Selfboot format */
14304	if (val != TG3_EEPROM_MAGIC) {
14305		tg3_get_eeprom_size(tp);
14306		return;
14307	}
14308
14309	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
14310		if (val != 0) {
14311			/* This is confusing.  We want to operate on the
14312			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
14313			 * call will read from NVRAM and byteswap the data
14314			 * according to the byteswapping settings for all
14315			 * other register accesses.  This ensures the data we
14316			 * want will always reside in the lower 16-bits.
14317			 * However, the data in NVRAM is in LE format, which
14318			 * means the data from the NVRAM read will always be
14319			 * opposite the endianness of the CPU.  The 16-bit
14320			 * byteswap then brings the data to CPU endianness.
14321			 */
14322			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
14323			return;
14324		}
14325	}
14326	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14327}
14328
14329static void tg3_get_nvram_info(struct tg3 *tp)
14330{
14331	u32 nvcfg1;
14332
14333	nvcfg1 = tr32(NVRAM_CFG1);
14334	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
14335		tg3_flag_set(tp, FLASH);
14336	} else {
14337		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14338		tw32(NVRAM_CFG1, nvcfg1);
14339	}
14340
14341	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
14342	    tg3_flag(tp, 5780_CLASS)) {
14343		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
14344		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
14345			tp->nvram_jedecnum = JEDEC_ATMEL;
14346			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14347			tg3_flag_set(tp, NVRAM_BUFFERED);
14348			break;
14349		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
14350			tp->nvram_jedecnum = JEDEC_ATMEL;
14351			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
14352			break;
14353		case FLASH_VENDOR_ATMEL_EEPROM:
14354			tp->nvram_jedecnum = JEDEC_ATMEL;
14355			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14356			tg3_flag_set(tp, NVRAM_BUFFERED);
14357			break;
14358		case FLASH_VENDOR_ST:
14359			tp->nvram_jedecnum = JEDEC_ST;
14360			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
14361			tg3_flag_set(tp, NVRAM_BUFFERED);
14362			break;
14363		case FLASH_VENDOR_SAIFUN:
14364			tp->nvram_jedecnum = JEDEC_SAIFUN;
14365			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
14366			break;
14367		case FLASH_VENDOR_SST_SMALL:
14368		case FLASH_VENDOR_SST_LARGE:
14369			tp->nvram_jedecnum = JEDEC_SST;
14370			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
14371			break;
14372		}
14373	} else {
14374		tp->nvram_jedecnum = JEDEC_ATMEL;
14375		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
14376		tg3_flag_set(tp, NVRAM_BUFFERED);
14377	}
14378}
14379
14380static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
14381{
14382	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
14383	case FLASH_5752PAGE_SIZE_256:
14384		tp->nvram_pagesize = 256;
14385		break;
14386	case FLASH_5752PAGE_SIZE_512:
14387		tp->nvram_pagesize = 512;
14388		break;
14389	case FLASH_5752PAGE_SIZE_1K:
14390		tp->nvram_pagesize = 1024;
14391		break;
14392	case FLASH_5752PAGE_SIZE_2K:
14393		tp->nvram_pagesize = 2048;
14394		break;
14395	case FLASH_5752PAGE_SIZE_4K:
14396		tp->nvram_pagesize = 4096;
14397		break;
14398	case FLASH_5752PAGE_SIZE_264:
14399		tp->nvram_pagesize = 264;
14400		break;
14401	case FLASH_5752PAGE_SIZE_528:
14402		tp->nvram_pagesize = 528;
14403		break;
14404	}
14405}
14406
14407static void tg3_get_5752_nvram_info(struct tg3 *tp)
14408{
14409	u32 nvcfg1;
14410
14411	nvcfg1 = tr32(NVRAM_CFG1);
14412
14413	/* NVRAM protection for TPM */
14414	if (nvcfg1 & (1 << 27))
14415		tg3_flag_set(tp, PROTECTED_NVRAM);
14416
14417	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14418	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
14419	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14420		tp->nvram_jedecnum = JEDEC_ATMEL;
14421		tg3_flag_set(tp, NVRAM_BUFFERED);
14422		break;
14423	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14424		tp->nvram_jedecnum = JEDEC_ATMEL;
14425		tg3_flag_set(tp, NVRAM_BUFFERED);
14426		tg3_flag_set(tp, FLASH);
14427		break;
14428	case FLASH_5752VENDOR_ST_M45PE10:
14429	case FLASH_5752VENDOR_ST_M45PE20:
14430	case FLASH_5752VENDOR_ST_M45PE40:
14431		tp->nvram_jedecnum = JEDEC_ST;
14432		tg3_flag_set(tp, NVRAM_BUFFERED);
14433		tg3_flag_set(tp, FLASH);
14434		break;
14435	}
14436
14437	if (tg3_flag(tp, FLASH)) {
14438		tg3_nvram_get_pagesize(tp, nvcfg1);
14439	} else {
14440		/* For eeprom, set pagesize to maximum eeprom size */
14441		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14442
14443		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14444		tw32(NVRAM_CFG1, nvcfg1);
14445	}
14446}
14447
14448static void tg3_get_5755_nvram_info(struct tg3 *tp)
14449{
14450	u32 nvcfg1, protect = 0;
14451
14452	nvcfg1 = tr32(NVRAM_CFG1);
14453
14454	/* NVRAM protection for TPM */
14455	if (nvcfg1 & (1 << 27)) {
14456		tg3_flag_set(tp, PROTECTED_NVRAM);
14457		protect = 1;
14458	}
14459
14460	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14461	switch (nvcfg1) {
14462	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14463	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14464	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14465	case FLASH_5755VENDOR_ATMEL_FLASH_5:
14466		tp->nvram_jedecnum = JEDEC_ATMEL;
14467		tg3_flag_set(tp, NVRAM_BUFFERED);
14468		tg3_flag_set(tp, FLASH);
14469		tp->nvram_pagesize = 264;
14470		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14471		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14472			tp->nvram_size = (protect ? 0x3e200 :
14473					  TG3_NVRAM_SIZE_512KB);
14474		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14475			tp->nvram_size = (protect ? 0x1f200 :
14476					  TG3_NVRAM_SIZE_256KB);
14477		else
14478			tp->nvram_size = (protect ? 0x1f200 :
14479					  TG3_NVRAM_SIZE_128KB);
14480		break;
14481	case FLASH_5752VENDOR_ST_M45PE10:
14482	case FLASH_5752VENDOR_ST_M45PE20:
14483	case FLASH_5752VENDOR_ST_M45PE40:
14484		tp->nvram_jedecnum = JEDEC_ST;
14485		tg3_flag_set(tp, NVRAM_BUFFERED);
14486		tg3_flag_set(tp, FLASH);
14487		tp->nvram_pagesize = 256;
14488		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14489			tp->nvram_size = (protect ?
14490					  TG3_NVRAM_SIZE_64KB :
14491					  TG3_NVRAM_SIZE_128KB);
14492		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14493			tp->nvram_size = (protect ?
14494					  TG3_NVRAM_SIZE_64KB :
14495					  TG3_NVRAM_SIZE_256KB);
14496		else
14497			tp->nvram_size = (protect ?
14498					  TG3_NVRAM_SIZE_128KB :
14499					  TG3_NVRAM_SIZE_512KB);
14500		break;
14501	}
14502}
14503
14504static void tg3_get_5787_nvram_info(struct tg3 *tp)
14505{
14506	u32 nvcfg1;
14507
14508	nvcfg1 = tr32(NVRAM_CFG1);
14509
14510	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14511	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14512	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14513	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14514	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14515		tp->nvram_jedecnum = JEDEC_ATMEL;
14516		tg3_flag_set(tp, NVRAM_BUFFERED);
14517		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14518
14519		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14520		tw32(NVRAM_CFG1, nvcfg1);
14521		break;
14522	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14523	case FLASH_5755VENDOR_ATMEL_FLASH_1:
14524	case FLASH_5755VENDOR_ATMEL_FLASH_2:
14525	case FLASH_5755VENDOR_ATMEL_FLASH_3:
14526		tp->nvram_jedecnum = JEDEC_ATMEL;
14527		tg3_flag_set(tp, NVRAM_BUFFERED);
14528		tg3_flag_set(tp, FLASH);
14529		tp->nvram_pagesize = 264;
14530		break;
14531	case FLASH_5752VENDOR_ST_M45PE10:
14532	case FLASH_5752VENDOR_ST_M45PE20:
14533	case FLASH_5752VENDOR_ST_M45PE40:
14534		tp->nvram_jedecnum = JEDEC_ST;
14535		tg3_flag_set(tp, NVRAM_BUFFERED);
14536		tg3_flag_set(tp, FLASH);
14537		tp->nvram_pagesize = 256;
14538		break;
14539	}
14540}
14541
14542static void tg3_get_5761_nvram_info(struct tg3 *tp)
14543{
14544	u32 nvcfg1, protect = 0;
14545
14546	nvcfg1 = tr32(NVRAM_CFG1);
14547
14548	/* NVRAM protection for TPM */
14549	if (nvcfg1 & (1 << 27)) {
14550		tg3_flag_set(tp, PROTECTED_NVRAM);
14551		protect = 1;
14552	}
14553
14554	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14555	switch (nvcfg1) {
14556	case FLASH_5761VENDOR_ATMEL_ADB021D:
14557	case FLASH_5761VENDOR_ATMEL_ADB041D:
14558	case FLASH_5761VENDOR_ATMEL_ADB081D:
14559	case FLASH_5761VENDOR_ATMEL_ADB161D:
14560	case FLASH_5761VENDOR_ATMEL_MDB021D:
14561	case FLASH_5761VENDOR_ATMEL_MDB041D:
14562	case FLASH_5761VENDOR_ATMEL_MDB081D:
14563	case FLASH_5761VENDOR_ATMEL_MDB161D:
14564		tp->nvram_jedecnum = JEDEC_ATMEL;
14565		tg3_flag_set(tp, NVRAM_BUFFERED);
14566		tg3_flag_set(tp, FLASH);
14567		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14568		tp->nvram_pagesize = 256;
14569		break;
14570	case FLASH_5761VENDOR_ST_A_M45PE20:
14571	case FLASH_5761VENDOR_ST_A_M45PE40:
14572	case FLASH_5761VENDOR_ST_A_M45PE80:
14573	case FLASH_5761VENDOR_ST_A_M45PE16:
14574	case FLASH_5761VENDOR_ST_M_M45PE20:
14575	case FLASH_5761VENDOR_ST_M_M45PE40:
14576	case FLASH_5761VENDOR_ST_M_M45PE80:
14577	case FLASH_5761VENDOR_ST_M_M45PE16:
14578		tp->nvram_jedecnum = JEDEC_ST;
14579		tg3_flag_set(tp, NVRAM_BUFFERED);
14580		tg3_flag_set(tp, FLASH);
14581		tp->nvram_pagesize = 256;
14582		break;
14583	}
14584
14585	if (protect) {
14586		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14587	} else {
14588		switch (nvcfg1) {
14589		case FLASH_5761VENDOR_ATMEL_ADB161D:
14590		case FLASH_5761VENDOR_ATMEL_MDB161D:
14591		case FLASH_5761VENDOR_ST_A_M45PE16:
14592		case FLASH_5761VENDOR_ST_M_M45PE16:
14593			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14594			break;
14595		case FLASH_5761VENDOR_ATMEL_ADB081D:
14596		case FLASH_5761VENDOR_ATMEL_MDB081D:
14597		case FLASH_5761VENDOR_ST_A_M45PE80:
14598		case FLASH_5761VENDOR_ST_M_M45PE80:
14599			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14600			break;
14601		case FLASH_5761VENDOR_ATMEL_ADB041D:
14602		case FLASH_5761VENDOR_ATMEL_MDB041D:
14603		case FLASH_5761VENDOR_ST_A_M45PE40:
14604		case FLASH_5761VENDOR_ST_M_M45PE40:
14605			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14606			break;
14607		case FLASH_5761VENDOR_ATMEL_ADB021D:
14608		case FLASH_5761VENDOR_ATMEL_MDB021D:
14609		case FLASH_5761VENDOR_ST_A_M45PE20:
14610		case FLASH_5761VENDOR_ST_M_M45PE20:
14611			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14612			break;
14613		}
14614	}
14615}
14616
14617static void tg3_get_5906_nvram_info(struct tg3 *tp)
14618{
14619	tp->nvram_jedecnum = JEDEC_ATMEL;
14620	tg3_flag_set(tp, NVRAM_BUFFERED);
14621	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14622}
14623
14624static void tg3_get_57780_nvram_info(struct tg3 *tp)
14625{
14626	u32 nvcfg1;
14627
14628	nvcfg1 = tr32(NVRAM_CFG1);
14629
14630	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14631	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14632	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14633		tp->nvram_jedecnum = JEDEC_ATMEL;
14634		tg3_flag_set(tp, NVRAM_BUFFERED);
14635		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14636
14637		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14638		tw32(NVRAM_CFG1, nvcfg1);
14639		return;
14640	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14641	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14642	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14643	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14644	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14645	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14646	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14647		tp->nvram_jedecnum = JEDEC_ATMEL;
14648		tg3_flag_set(tp, NVRAM_BUFFERED);
14649		tg3_flag_set(tp, FLASH);
14650
14651		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14652		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14653		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14654		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14655			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14656			break;
14657		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14658		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14659			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14660			break;
14661		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14662		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14663			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14664			break;
14665		}
14666		break;
14667	case FLASH_5752VENDOR_ST_M45PE10:
14668	case FLASH_5752VENDOR_ST_M45PE20:
14669	case FLASH_5752VENDOR_ST_M45PE40:
14670		tp->nvram_jedecnum = JEDEC_ST;
14671		tg3_flag_set(tp, NVRAM_BUFFERED);
14672		tg3_flag_set(tp, FLASH);
14673
14674		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14675		case FLASH_5752VENDOR_ST_M45PE10:
14676			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14677			break;
14678		case FLASH_5752VENDOR_ST_M45PE20:
14679			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14680			break;
14681		case FLASH_5752VENDOR_ST_M45PE40:
14682			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14683			break;
14684		}
14685		break;
14686	default:
14687		tg3_flag_set(tp, NO_NVRAM);
14688		return;
14689	}
14690
14691	tg3_nvram_get_pagesize(tp, nvcfg1);
14692	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14693		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14694}
14695
14696
14697static void tg3_get_5717_nvram_info(struct tg3 *tp)
14698{
14699	u32 nvcfg1;
14700
14701	nvcfg1 = tr32(NVRAM_CFG1);
14702
14703	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14704	case FLASH_5717VENDOR_ATMEL_EEPROM:
14705	case FLASH_5717VENDOR_MICRO_EEPROM:
14706		tp->nvram_jedecnum = JEDEC_ATMEL;
14707		tg3_flag_set(tp, NVRAM_BUFFERED);
14708		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14709
14710		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14711		tw32(NVRAM_CFG1, nvcfg1);
14712		return;
14713	case FLASH_5717VENDOR_ATMEL_MDB011D:
14714	case FLASH_5717VENDOR_ATMEL_ADB011B:
14715	case FLASH_5717VENDOR_ATMEL_ADB011D:
14716	case FLASH_5717VENDOR_ATMEL_MDB021D:
14717	case FLASH_5717VENDOR_ATMEL_ADB021B:
14718	case FLASH_5717VENDOR_ATMEL_ADB021D:
14719	case FLASH_5717VENDOR_ATMEL_45USPT:
14720		tp->nvram_jedecnum = JEDEC_ATMEL;
14721		tg3_flag_set(tp, NVRAM_BUFFERED);
14722		tg3_flag_set(tp, FLASH);
14723
14724		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14725		case FLASH_5717VENDOR_ATMEL_MDB021D:
14726			/* Detect size with tg3_nvram_get_size() */
14727			break;
14728		case FLASH_5717VENDOR_ATMEL_ADB021B:
14729		case FLASH_5717VENDOR_ATMEL_ADB021D:
14730			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14731			break;
14732		default:
14733			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14734			break;
14735		}
14736		break;
14737	case FLASH_5717VENDOR_ST_M_M25PE10:
14738	case FLASH_5717VENDOR_ST_A_M25PE10:
14739	case FLASH_5717VENDOR_ST_M_M45PE10:
14740	case FLASH_5717VENDOR_ST_A_M45PE10:
14741	case FLASH_5717VENDOR_ST_M_M25PE20:
14742	case FLASH_5717VENDOR_ST_A_M25PE20:
14743	case FLASH_5717VENDOR_ST_M_M45PE20:
14744	case FLASH_5717VENDOR_ST_A_M45PE20:
14745	case FLASH_5717VENDOR_ST_25USPT:
14746	case FLASH_5717VENDOR_ST_45USPT:
14747		tp->nvram_jedecnum = JEDEC_ST;
14748		tg3_flag_set(tp, NVRAM_BUFFERED);
14749		tg3_flag_set(tp, FLASH);
14750
14751		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14752		case FLASH_5717VENDOR_ST_M_M25PE20:
14753		case FLASH_5717VENDOR_ST_M_M45PE20:
14754			/* Detect size with tg3_nvram_get_size() */
14755			break;
14756		case FLASH_5717VENDOR_ST_A_M25PE20:
14757		case FLASH_5717VENDOR_ST_A_M45PE20:
14758			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14759			break;
14760		default:
14761			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14762			break;
14763		}
14764		break;
14765	default:
14766		tg3_flag_set(tp, NO_NVRAM);
14767		return;
14768	}
14769
14770	tg3_nvram_get_pagesize(tp, nvcfg1);
14771	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14772		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14773}
14774
14775static void tg3_get_5720_nvram_info(struct tg3 *tp)
14776{
14777	u32 nvcfg1, nvmpinstrp;
14778
14779	nvcfg1 = tr32(NVRAM_CFG1);
14780	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14781
14782	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14783		if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14784			tg3_flag_set(tp, NO_NVRAM);
14785			return;
14786		}
14787
14788		switch (nvmpinstrp) {
14789		case FLASH_5762_EEPROM_HD:
14790			nvmpinstrp = FLASH_5720_EEPROM_HD;
14791			break;
14792		case FLASH_5762_EEPROM_LD:
14793			nvmpinstrp = FLASH_5720_EEPROM_LD;
14794			break;
14795		case FLASH_5720VENDOR_M_ST_M45PE20:
14796			/* This pinstrap supports multiple sizes, so force it
14797			 * to read the actual size from location 0xf0.
14798			 */
14799			nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14800			break;
14801		}
14802	}
14803
14804	switch (nvmpinstrp) {
14805	case FLASH_5720_EEPROM_HD:
14806	case FLASH_5720_EEPROM_LD:
14807		tp->nvram_jedecnum = JEDEC_ATMEL;
14808		tg3_flag_set(tp, NVRAM_BUFFERED);
14809
14810		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14811		tw32(NVRAM_CFG1, nvcfg1);
14812		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14813			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14814		else
14815			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14816		return;
14817	case FLASH_5720VENDOR_M_ATMEL_DB011D:
14818	case FLASH_5720VENDOR_A_ATMEL_DB011B:
14819	case FLASH_5720VENDOR_A_ATMEL_DB011D:
14820	case FLASH_5720VENDOR_M_ATMEL_DB021D:
14821	case FLASH_5720VENDOR_A_ATMEL_DB021B:
14822	case FLASH_5720VENDOR_A_ATMEL_DB021D:
14823	case FLASH_5720VENDOR_M_ATMEL_DB041D:
14824	case FLASH_5720VENDOR_A_ATMEL_DB041B:
14825	case FLASH_5720VENDOR_A_ATMEL_DB041D:
14826	case FLASH_5720VENDOR_M_ATMEL_DB081D:
14827	case FLASH_5720VENDOR_A_ATMEL_DB081D:
14828	case FLASH_5720VENDOR_ATMEL_45USPT:
14829		tp->nvram_jedecnum = JEDEC_ATMEL;
14830		tg3_flag_set(tp, NVRAM_BUFFERED);
14831		tg3_flag_set(tp, FLASH);
14832
14833		switch (nvmpinstrp) {
14834		case FLASH_5720VENDOR_M_ATMEL_DB021D:
14835		case FLASH_5720VENDOR_A_ATMEL_DB021B:
14836		case FLASH_5720VENDOR_A_ATMEL_DB021D:
14837			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14838			break;
14839		case FLASH_5720VENDOR_M_ATMEL_DB041D:
14840		case FLASH_5720VENDOR_A_ATMEL_DB041B:
14841		case FLASH_5720VENDOR_A_ATMEL_DB041D:
14842			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14843			break;
14844		case FLASH_5720VENDOR_M_ATMEL_DB081D:
14845		case FLASH_5720VENDOR_A_ATMEL_DB081D:
14846			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14847			break;
14848		default:
14849			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14850				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14851			break;
14852		}
14853		break;
14854	case FLASH_5720VENDOR_M_ST_M25PE10:
14855	case FLASH_5720VENDOR_M_ST_M45PE10:
14856	case FLASH_5720VENDOR_A_ST_M25PE10:
14857	case FLASH_5720VENDOR_A_ST_M45PE10:
14858	case FLASH_5720VENDOR_M_ST_M25PE20:
14859	case FLASH_5720VENDOR_M_ST_M45PE20:
14860	case FLASH_5720VENDOR_A_ST_M25PE20:
14861	case FLASH_5720VENDOR_A_ST_M45PE20:
14862	case FLASH_5720VENDOR_M_ST_M25PE40:
14863	case FLASH_5720VENDOR_M_ST_M45PE40:
14864	case FLASH_5720VENDOR_A_ST_M25PE40:
14865	case FLASH_5720VENDOR_A_ST_M45PE40:
14866	case FLASH_5720VENDOR_M_ST_M25PE80:
14867	case FLASH_5720VENDOR_M_ST_M45PE80:
14868	case FLASH_5720VENDOR_A_ST_M25PE80:
14869	case FLASH_5720VENDOR_A_ST_M45PE80:
14870	case FLASH_5720VENDOR_ST_25USPT:
14871	case FLASH_5720VENDOR_ST_45USPT:
14872		tp->nvram_jedecnum = JEDEC_ST;
14873		tg3_flag_set(tp, NVRAM_BUFFERED);
14874		tg3_flag_set(tp, FLASH);
14875
14876		switch (nvmpinstrp) {
14877		case FLASH_5720VENDOR_M_ST_M25PE20:
14878		case FLASH_5720VENDOR_M_ST_M45PE20:
14879		case FLASH_5720VENDOR_A_ST_M25PE20:
14880		case FLASH_5720VENDOR_A_ST_M45PE20:
14881			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14882			break;
14883		case FLASH_5720VENDOR_M_ST_M25PE40:
14884		case FLASH_5720VENDOR_M_ST_M45PE40:
14885		case FLASH_5720VENDOR_A_ST_M25PE40:
14886		case FLASH_5720VENDOR_A_ST_M45PE40:
14887			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14888			break;
14889		case FLASH_5720VENDOR_M_ST_M25PE80:
14890		case FLASH_5720VENDOR_M_ST_M45PE80:
14891		case FLASH_5720VENDOR_A_ST_M25PE80:
14892		case FLASH_5720VENDOR_A_ST_M45PE80:
14893			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14894			break;
14895		default:
14896			if (tg3_asic_rev(tp) != ASIC_REV_5762)
14897				tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14898			break;
14899		}
14900		break;
14901	default:
14902		tg3_flag_set(tp, NO_NVRAM);
14903		return;
14904	}
14905
14906	tg3_nvram_get_pagesize(tp, nvcfg1);
14907	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14908		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14909
14910	if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14911		u32 val;
14912
14913		if (tg3_nvram_read(tp, 0, &val))
14914			return;
14915
14916		if (val != TG3_EEPROM_MAGIC &&
14917		    (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14918			tg3_flag_set(tp, NO_NVRAM);
14919	}
14920}
14921
14922/* Chips other than 5700/5701 use the NVRAM for fetching info. */
14923static void tg3_nvram_init(struct tg3 *tp)
14924{
14925	if (tg3_flag(tp, IS_SSB_CORE)) {
14926		/* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14927		tg3_flag_clear(tp, NVRAM);
14928		tg3_flag_clear(tp, NVRAM_BUFFERED);
14929		tg3_flag_set(tp, NO_NVRAM);
14930		return;
14931	}
14932
14933	tw32_f(GRC_EEPROM_ADDR,
14934	     (EEPROM_ADDR_FSM_RESET |
14935	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
14936	       EEPROM_ADDR_CLKPERD_SHIFT)));
14937
14938	msleep(1);
14939
14940	/* Enable seeprom accesses. */
14941	tw32_f(GRC_LOCAL_CTRL,
14942	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14943	udelay(100);
14944
14945	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14946	    tg3_asic_rev(tp) != ASIC_REV_5701) {
14947		tg3_flag_set(tp, NVRAM);
14948
14949		if (tg3_nvram_lock(tp)) {
14950			netdev_warn(tp->dev,
14951				    "Cannot get nvram lock, %s failed\n",
14952				    __func__);
14953			return;
14954		}
14955		tg3_enable_nvram_access(tp);
14956
14957		tp->nvram_size = 0;
14958
14959		if (tg3_asic_rev(tp) == ASIC_REV_5752)
14960			tg3_get_5752_nvram_info(tp);
14961		else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14962			tg3_get_5755_nvram_info(tp);
14963		else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14964			 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14965			 tg3_asic_rev(tp) == ASIC_REV_5785)
14966			tg3_get_5787_nvram_info(tp);
14967		else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14968			tg3_get_5761_nvram_info(tp);
14969		else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14970			tg3_get_5906_nvram_info(tp);
14971		else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14972			 tg3_flag(tp, 57765_CLASS))
14973			tg3_get_57780_nvram_info(tp);
14974		else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14975			 tg3_asic_rev(tp) == ASIC_REV_5719)
14976			tg3_get_5717_nvram_info(tp);
14977		else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14978			 tg3_asic_rev(tp) == ASIC_REV_5762)
14979			tg3_get_5720_nvram_info(tp);
14980		else
14981			tg3_get_nvram_info(tp);
14982
14983		if (tp->nvram_size == 0)
14984			tg3_get_nvram_size(tp);
14985
14986		tg3_disable_nvram_access(tp);
14987		tg3_nvram_unlock(tp);
14988
14989	} else {
14990		tg3_flag_clear(tp, NVRAM);
14991		tg3_flag_clear(tp, NVRAM_BUFFERED);
14992
14993		tg3_get_eeprom_size(tp);
14994	}
14995}
14996
14997struct subsys_tbl_ent {
14998	u16 subsys_vendor, subsys_devid;
14999	u32 phy_id;
15000};
15001
15002static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
15003	/* Broadcom boards. */
15004	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15005	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
15006	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15007	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
15008	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15009	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
15010	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15011	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
15012	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15013	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
15014	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15015	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
15016	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15017	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
15018	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15019	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
15020	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15021	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
15022	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15023	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
15024	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
15025	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
15026
15027	/* 3com boards. */
15028	{ TG3PCI_SUBVENDOR_ID_3COM,
15029	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
15030	{ TG3PCI_SUBVENDOR_ID_3COM,
15031	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
15032	{ TG3PCI_SUBVENDOR_ID_3COM,
15033	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
15034	{ TG3PCI_SUBVENDOR_ID_3COM,
15035	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
15036	{ TG3PCI_SUBVENDOR_ID_3COM,
15037	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
15038
15039	/* DELL boards. */
15040	{ TG3PCI_SUBVENDOR_ID_DELL,
15041	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
15042	{ TG3PCI_SUBVENDOR_ID_DELL,
15043	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
15044	{ TG3PCI_SUBVENDOR_ID_DELL,
15045	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
15046	{ TG3PCI_SUBVENDOR_ID_DELL,
15047	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
15048
15049	/* Compaq boards. */
15050	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15051	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
15052	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15053	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
15054	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15055	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
15056	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15057	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
15058	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
15059	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
15060
15061	/* IBM boards. */
15062	{ TG3PCI_SUBVENDOR_ID_IBM,
15063	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
15064};
15065
15066static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
15067{
15068	int i;
15069
15070	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
15071		if ((subsys_id_to_phy_id[i].subsys_vendor ==
15072		     tp->pdev->subsystem_vendor) &&
15073		    (subsys_id_to_phy_id[i].subsys_devid ==
15074		     tp->pdev->subsystem_device))
15075			return &subsys_id_to_phy_id[i];
15076	}
15077	return NULL;
15078}
15079
15080static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
15081{
15082	u32 val;
15083
15084	tp->phy_id = TG3_PHY_ID_INVALID;
15085	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15086
15087	/* Assume an onboard device and WOL capable by default.  */
15088	tg3_flag_set(tp, EEPROM_WRITE_PROT);
15089	tg3_flag_set(tp, WOL_CAP);
15090
15091	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15092		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
15093			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15094			tg3_flag_set(tp, IS_NIC);
15095		}
15096		val = tr32(VCPU_CFGSHDW);
15097		if (val & VCPU_CFGSHDW_ASPM_DBNC)
15098			tg3_flag_set(tp, ASPM_WORKAROUND);
15099		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
15100		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
15101			tg3_flag_set(tp, WOL_ENABLE);
15102			device_set_wakeup_enable(&tp->pdev->dev, true);
15103		}
15104		goto done;
15105	}
15106
15107	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
15108	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
15109		u32 nic_cfg, led_cfg;
15110		u32 cfg2 = 0, cfg4 = 0, cfg5 = 0;
15111		u32 nic_phy_id, ver, eeprom_phy_id;
15112		int eeprom_phy_serdes = 0;
15113
15114		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
15115		tp->nic_sram_data_cfg = nic_cfg;
15116
15117		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
15118		ver >>= NIC_SRAM_DATA_VER_SHIFT;
15119		if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15120		    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15121		    tg3_asic_rev(tp) != ASIC_REV_5703 &&
15122		    (ver > 0) && (ver < 0x100))
15123			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
15124
15125		if (tg3_asic_rev(tp) == ASIC_REV_5785)
15126			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
15127
15128		if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15129		    tg3_asic_rev(tp) == ASIC_REV_5719 ||
15130		    tg3_asic_rev(tp) == ASIC_REV_5720)
15131			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_5, &cfg5);
15132
15133		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
15134		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
15135			eeprom_phy_serdes = 1;
15136
15137		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
15138		if (nic_phy_id != 0) {
15139			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
15140			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
15141
15142			eeprom_phy_id  = (id1 >> 16) << 10;
15143			eeprom_phy_id |= (id2 & 0xfc00) << 16;
15144			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
15145		} else
15146			eeprom_phy_id = 0;
15147
15148		tp->phy_id = eeprom_phy_id;
15149		if (eeprom_phy_serdes) {
15150			if (!tg3_flag(tp, 5705_PLUS))
15151				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15152			else
15153				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
15154		}
15155
15156		if (tg3_flag(tp, 5750_PLUS))
15157			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
15158				    SHASTA_EXT_LED_MODE_MASK);
15159		else
15160			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
15161
15162		switch (led_cfg) {
15163		default:
15164		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
15165			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15166			break;
15167
15168		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
15169			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15170			break;
15171
15172		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
15173			tp->led_ctrl = LED_CTRL_MODE_MAC;
15174
15175			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
15176			 * read on some older 5700/5701 bootcode.
15177			 */
15178			if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
15179			    tg3_asic_rev(tp) == ASIC_REV_5701)
15180				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15181
15182			break;
15183
15184		case SHASTA_EXT_LED_SHARED:
15185			tp->led_ctrl = LED_CTRL_MODE_SHARED;
15186			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
15187			    tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
15188				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15189						 LED_CTRL_MODE_PHY_2);
15190
15191			if (tg3_flag(tp, 5717_PLUS) ||
15192			    tg3_asic_rev(tp) == ASIC_REV_5762)
15193				tp->led_ctrl |= LED_CTRL_BLINK_RATE_OVERRIDE |
15194						LED_CTRL_BLINK_RATE_MASK;
15195
15196			break;
15197
15198		case SHASTA_EXT_LED_MAC:
15199			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
15200			break;
15201
15202		case SHASTA_EXT_LED_COMBO:
15203			tp->led_ctrl = LED_CTRL_MODE_COMBO;
15204			if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
15205				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
15206						 LED_CTRL_MODE_PHY_2);
15207			break;
15208
15209		}
15210
15211		if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
15212		     tg3_asic_rev(tp) == ASIC_REV_5701) &&
15213		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
15214			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
15215
15216		if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
15217			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
15218
15219		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
15220			tg3_flag_set(tp, EEPROM_WRITE_PROT);
15221			if ((tp->pdev->subsystem_vendor ==
15222			     PCI_VENDOR_ID_ARIMA) &&
15223			    (tp->pdev->subsystem_device == 0x205a ||
15224			     tp->pdev->subsystem_device == 0x2063))
15225				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15226		} else {
15227			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
15228			tg3_flag_set(tp, IS_NIC);
15229		}
15230
15231		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
15232			tg3_flag_set(tp, ENABLE_ASF);
15233			if (tg3_flag(tp, 5750_PLUS))
15234				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
15235		}
15236
15237		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
15238		    tg3_flag(tp, 5750_PLUS))
15239			tg3_flag_set(tp, ENABLE_APE);
15240
15241		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
15242		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
15243			tg3_flag_clear(tp, WOL_CAP);
15244
15245		if (tg3_flag(tp, WOL_CAP) &&
15246		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
15247			tg3_flag_set(tp, WOL_ENABLE);
15248			device_set_wakeup_enable(&tp->pdev->dev, true);
15249		}
15250
15251		if (cfg2 & (1 << 17))
15252			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
15253
15254		/* serdes signal pre-emphasis in register 0x590 set by */
15255		/* bootcode if bit 18 is set */
15256		if (cfg2 & (1 << 18))
15257			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
15258
15259		if ((tg3_flag(tp, 57765_PLUS) ||
15260		     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
15261		      tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
15262		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
15263			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
15264
15265		if (tg3_flag(tp, PCI_EXPRESS)) {
15266			u32 cfg3;
15267
15268			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
15269			if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
15270			    !tg3_flag(tp, 57765_PLUS) &&
15271			    (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
15272				tg3_flag_set(tp, ASPM_WORKAROUND);
15273			if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
15274				tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
15275			if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
15276				tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
15277		}
15278
15279		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
15280			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
15281		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
15282			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
15283		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
15284			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
15285
15286		if (cfg5 & NIC_SRAM_DISABLE_1G_HALF_ADV)
15287			tp->phy_flags |= TG3_PHYFLG_DISABLE_1G_HD_ADV;
15288	}
15289done:
15290	if (tg3_flag(tp, WOL_CAP))
15291		device_set_wakeup_enable(&tp->pdev->dev,
15292					 tg3_flag(tp, WOL_ENABLE));
15293	else
15294		device_set_wakeup_capable(&tp->pdev->dev, false);
15295}
15296
15297static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
15298{
15299	int i, err;
15300	u32 val2, off = offset * 8;
15301
15302	err = tg3_nvram_lock(tp);
15303	if (err)
15304		return err;
15305
15306	tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
15307	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
15308			APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
15309	tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
15310	udelay(10);
15311
15312	for (i = 0; i < 100; i++) {
15313		val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
15314		if (val2 & APE_OTP_STATUS_CMD_DONE) {
15315			*val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
15316			break;
15317		}
15318		udelay(10);
15319	}
15320
15321	tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
15322
15323	tg3_nvram_unlock(tp);
15324	if (val2 & APE_OTP_STATUS_CMD_DONE)
15325		return 0;
15326
15327	return -EBUSY;
15328}
15329
15330static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
15331{
15332	int i;
15333	u32 val;
15334
15335	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
15336	tw32(OTP_CTRL, cmd);
15337
15338	/* Wait for up to 1 ms for command to execute. */
15339	for (i = 0; i < 100; i++) {
15340		val = tr32(OTP_STATUS);
15341		if (val & OTP_STATUS_CMD_DONE)
15342			break;
15343		udelay(10);
15344	}
15345
15346	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
15347}
15348
15349/* Read the gphy configuration from the OTP region of the chip.  The gphy
15350 * configuration is a 32-bit value that straddles the alignment boundary.
15351 * We do two 32-bit reads and then shift and merge the results.
15352 */
15353static u32 tg3_read_otp_phycfg(struct tg3 *tp)
15354{
15355	u32 bhalf_otp, thalf_otp;
15356
15357	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
15358
15359	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
15360		return 0;
15361
15362	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
15363
15364	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15365		return 0;
15366
15367	thalf_otp = tr32(OTP_READ_DATA);
15368
15369	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
15370
15371	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
15372		return 0;
15373
15374	bhalf_otp = tr32(OTP_READ_DATA);
15375
15376	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
15377}
15378
15379static void tg3_phy_init_link_config(struct tg3 *tp)
15380{
15381	u32 adv = ADVERTISED_Autoneg;
15382
15383	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
15384		if (!(tp->phy_flags & TG3_PHYFLG_DISABLE_1G_HD_ADV))
15385			adv |= ADVERTISED_1000baseT_Half;
15386		adv |= ADVERTISED_1000baseT_Full;
15387	}
15388
15389	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
15390		adv |= ADVERTISED_100baseT_Half |
15391		       ADVERTISED_100baseT_Full |
15392		       ADVERTISED_10baseT_Half |
15393		       ADVERTISED_10baseT_Full |
15394		       ADVERTISED_TP;
15395	else
15396		adv |= ADVERTISED_FIBRE;
15397
15398	tp->link_config.advertising = adv;
15399	tp->link_config.speed = SPEED_UNKNOWN;
15400	tp->link_config.duplex = DUPLEX_UNKNOWN;
15401	tp->link_config.autoneg = AUTONEG_ENABLE;
15402	tp->link_config.active_speed = SPEED_UNKNOWN;
15403	tp->link_config.active_duplex = DUPLEX_UNKNOWN;
15404
15405	tp->old_link = -1;
15406}
15407
15408static int tg3_phy_probe(struct tg3 *tp)
15409{
15410	u32 hw_phy_id_1, hw_phy_id_2;
15411	u32 hw_phy_id, hw_phy_id_masked;
15412	int err;
15413
15414	/* flow control autonegotiation is default behavior */
15415	tg3_flag_set(tp, PAUSE_AUTONEG);
15416	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
15417
15418	if (tg3_flag(tp, ENABLE_APE)) {
15419		switch (tp->pci_fn) {
15420		case 0:
15421			tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
15422			break;
15423		case 1:
15424			tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
15425			break;
15426		case 2:
15427			tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
15428			break;
15429		case 3:
15430			tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
15431			break;
15432		}
15433	}
15434
15435	if (!tg3_flag(tp, ENABLE_ASF) &&
15436	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15437	    !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15438		tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15439				   TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15440
15441	if (tg3_flag(tp, USE_PHYLIB))
15442		return tg3_phy_init(tp);
15443
15444	/* Reading the PHY ID register can conflict with ASF
15445	 * firmware access to the PHY hardware.
15446	 */
15447	err = 0;
15448	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15449		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15450	} else {
15451		/* Now read the physical PHY_ID from the chip and verify
15452		 * that it is sane.  If it doesn't look good, we fall back
15453		 * to either the hard-coded table based PHY_ID and failing
15454		 * that the value found in the eeprom area.
15455		 */
15456		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15457		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15458
15459		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15460		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15461		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15462
15463		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15464	}
15465
15466	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15467		tp->phy_id = hw_phy_id;
15468		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15469			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15470		else
15471			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15472	} else {
15473		if (tp->phy_id != TG3_PHY_ID_INVALID) {
15474			/* Do nothing, phy ID already set up in
15475			 * tg3_get_eeprom_hw_cfg().
15476			 */
15477		} else {
15478			struct subsys_tbl_ent *p;
15479
15480			/* No eeprom signature?  Try the hardcoded
15481			 * subsys device table.
15482			 */
15483			p = tg3_lookup_by_subsys(tp);
15484			if (p) {
15485				tp->phy_id = p->phy_id;
15486			} else if (!tg3_flag(tp, IS_SSB_CORE)) {
15487				/* For now we saw the IDs 0xbc050cd0,
15488				 * 0xbc050f80 and 0xbc050c30 on devices
15489				 * connected to an BCM4785 and there are
15490				 * probably more. Just assume that the phy is
15491				 * supported when it is connected to a SSB core
15492				 * for now.
15493				 */
15494				return -ENODEV;
15495			}
15496
15497			if (!tp->phy_id ||
15498			    tp->phy_id == TG3_PHY_ID_BCM8002)
15499				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15500		}
15501	}
15502
15503	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15504	    (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15505	     tg3_asic_rev(tp) == ASIC_REV_5720 ||
15506	     tg3_asic_rev(tp) == ASIC_REV_57766 ||
15507	     tg3_asic_rev(tp) == ASIC_REV_5762 ||
15508	     (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15509	      tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15510	     (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15511	      tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0))) {
15512		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15513
15514		tp->eee.supported = SUPPORTED_100baseT_Full |
15515				    SUPPORTED_1000baseT_Full;
15516		tp->eee.advertised = ADVERTISED_100baseT_Full |
15517				     ADVERTISED_1000baseT_Full;
15518		tp->eee.eee_enabled = 1;
15519		tp->eee.tx_lpi_enabled = 1;
15520		tp->eee.tx_lpi_timer = TG3_CPMU_DBTMR1_LNKIDLE_2047US;
15521	}
15522
15523	tg3_phy_init_link_config(tp);
15524
15525	if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15526	    !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15527	    !tg3_flag(tp, ENABLE_APE) &&
15528	    !tg3_flag(tp, ENABLE_ASF)) {
15529		u32 bmsr, dummy;
15530
15531		tg3_readphy(tp, MII_BMSR, &bmsr);
15532		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15533		    (bmsr & BMSR_LSTATUS))
15534			goto skip_phy_reset;
15535
15536		err = tg3_phy_reset(tp);
15537		if (err)
15538			return err;
15539
15540		tg3_phy_set_wirespeed(tp);
15541
15542		if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15543			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15544					    tp->link_config.flowctrl);
15545
15546			tg3_writephy(tp, MII_BMCR,
15547				     BMCR_ANENABLE | BMCR_ANRESTART);
15548		}
15549	}
15550
15551skip_phy_reset:
15552	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15553		err = tg3_init_5401phy_dsp(tp);
15554		if (err)
15555			return err;
15556
15557		err = tg3_init_5401phy_dsp(tp);
15558	}
15559
15560	return err;
15561}
15562
15563static void tg3_read_vpd(struct tg3 *tp)
15564{
15565	u8 *vpd_data;
15566	unsigned int block_end, rosize, len;
15567	u32 vpdlen;
15568	int j, i = 0;
15569
15570	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15571	if (!vpd_data)
15572		goto out_no_vpd;
15573
15574	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15575	if (i < 0)
15576		goto out_not_found;
15577
15578	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15579	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15580	i += PCI_VPD_LRDT_TAG_SIZE;
15581
15582	if (block_end > vpdlen)
15583		goto out_not_found;
15584
15585	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15586				      PCI_VPD_RO_KEYWORD_MFR_ID);
15587	if (j > 0) {
15588		len = pci_vpd_info_field_size(&vpd_data[j]);
15589
15590		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15591		if (j + len > block_end || len != 4 ||
15592		    memcmp(&vpd_data[j], "1028", 4))
15593			goto partno;
15594
15595		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15596					      PCI_VPD_RO_KEYWORD_VENDOR0);
15597		if (j < 0)
15598			goto partno;
15599
15600		len = pci_vpd_info_field_size(&vpd_data[j]);
15601
15602		j += PCI_VPD_INFO_FLD_HDR_SIZE;
15603		if (j + len > block_end)
15604			goto partno;
15605
15606		if (len >= sizeof(tp->fw_ver))
15607			len = sizeof(tp->fw_ver) - 1;
15608		memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15609		snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15610			 &vpd_data[j]);
15611	}
15612
15613partno:
15614	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15615				      PCI_VPD_RO_KEYWORD_PARTNO);
15616	if (i < 0)
15617		goto out_not_found;
15618
15619	len = pci_vpd_info_field_size(&vpd_data[i]);
15620
15621	i += PCI_VPD_INFO_FLD_HDR_SIZE;
15622	if (len > TG3_BPN_SIZE ||
15623	    (len + i) > vpdlen)
15624		goto out_not_found;
15625
15626	memcpy(tp->board_part_number, &vpd_data[i], len);
15627
15628out_not_found:
15629	kfree(vpd_data);
15630	if (tp->board_part_number[0])
15631		return;
15632
15633out_no_vpd:
15634	if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15635		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15636		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15637			strcpy(tp->board_part_number, "BCM5717");
15638		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15639			strcpy(tp->board_part_number, "BCM5718");
15640		else
15641			goto nomatch;
15642	} else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15643		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15644			strcpy(tp->board_part_number, "BCM57780");
15645		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15646			strcpy(tp->board_part_number, "BCM57760");
15647		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15648			strcpy(tp->board_part_number, "BCM57790");
15649		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15650			strcpy(tp->board_part_number, "BCM57788");
15651		else
15652			goto nomatch;
15653	} else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15654		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15655			strcpy(tp->board_part_number, "BCM57761");
15656		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15657			strcpy(tp->board_part_number, "BCM57765");
15658		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15659			strcpy(tp->board_part_number, "BCM57781");
15660		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15661			strcpy(tp->board_part_number, "BCM57785");
15662		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15663			strcpy(tp->board_part_number, "BCM57791");
15664		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15665			strcpy(tp->board_part_number, "BCM57795");
15666		else
15667			goto nomatch;
15668	} else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15669		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15670			strcpy(tp->board_part_number, "BCM57762");
15671		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15672			strcpy(tp->board_part_number, "BCM57766");
15673		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15674			strcpy(tp->board_part_number, "BCM57782");
15675		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15676			strcpy(tp->board_part_number, "BCM57786");
15677		else
15678			goto nomatch;
15679	} else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15680		strcpy(tp->board_part_number, "BCM95906");
15681	} else {
15682nomatch:
15683		strcpy(tp->board_part_number, "none");
15684	}
15685}
15686
15687static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15688{
15689	u32 val;
15690
15691	if (tg3_nvram_read(tp, offset, &val) ||
15692	    (val & 0xfc000000) != 0x0c000000 ||
15693	    tg3_nvram_read(tp, offset + 4, &val) ||
15694	    val != 0)
15695		return 0;
15696
15697	return 1;
15698}
15699
15700static void tg3_read_bc_ver(struct tg3 *tp)
15701{
15702	u32 val, offset, start, ver_offset;
15703	int i, dst_off;
15704	bool newver = false;
15705
15706	if (tg3_nvram_read(tp, 0xc, &offset) ||
15707	    tg3_nvram_read(tp, 0x4, &start))
15708		return;
15709
15710	offset = tg3_nvram_logical_addr(tp, offset);
15711
15712	if (tg3_nvram_read(tp, offset, &val))
15713		return;
15714
15715	if ((val & 0xfc000000) == 0x0c000000) {
15716		if (tg3_nvram_read(tp, offset + 4, &val))
15717			return;
15718
15719		if (val == 0)
15720			newver = true;
15721	}
15722
15723	dst_off = strlen(tp->fw_ver);
15724
15725	if (newver) {
15726		if (TG3_VER_SIZE - dst_off < 16 ||
15727		    tg3_nvram_read(tp, offset + 8, &ver_offset))
15728			return;
15729
15730		offset = offset + ver_offset - start;
15731		for (i = 0; i < 16; i += 4) {
15732			__be32 v;
15733			if (tg3_nvram_read_be32(tp, offset + i, &v))
15734				return;
15735
15736			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15737		}
15738	} else {
15739		u32 major, minor;
15740
15741		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15742			return;
15743
15744		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15745			TG3_NVM_BCVER_MAJSFT;
15746		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15747		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15748			 "v%d.%02d", major, minor);
15749	}
15750}
15751
15752static void tg3_read_hwsb_ver(struct tg3 *tp)
15753{
15754	u32 val, major, minor;
15755
15756	/* Use native endian representation */
15757	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15758		return;
15759
15760	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15761		TG3_NVM_HWSB_CFG1_MAJSFT;
15762	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15763		TG3_NVM_HWSB_CFG1_MINSFT;
15764
15765	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15766}
15767
15768static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15769{
15770	u32 offset, major, minor, build;
15771
15772	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15773
15774	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15775		return;
15776
15777	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15778	case TG3_EEPROM_SB_REVISION_0:
15779		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15780		break;
15781	case TG3_EEPROM_SB_REVISION_2:
15782		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15783		break;
15784	case TG3_EEPROM_SB_REVISION_3:
15785		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15786		break;
15787	case TG3_EEPROM_SB_REVISION_4:
15788		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15789		break;
15790	case TG3_EEPROM_SB_REVISION_5:
15791		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15792		break;
15793	case TG3_EEPROM_SB_REVISION_6:
15794		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15795		break;
15796	default:
15797		return;
15798	}
15799
15800	if (tg3_nvram_read(tp, offset, &val))
15801		return;
15802
15803	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15804		TG3_EEPROM_SB_EDH_BLD_SHFT;
15805	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15806		TG3_EEPROM_SB_EDH_MAJ_SHFT;
15807	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15808
15809	if (minor > 99 || build > 26)
15810		return;
15811
15812	offset = strlen(tp->fw_ver);
15813	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15814		 " v%d.%02d", major, minor);
15815
15816	if (build > 0) {
15817		offset = strlen(tp->fw_ver);
15818		if (offset < TG3_VER_SIZE - 1)
15819			tp->fw_ver[offset] = 'a' + build - 1;
15820	}
15821}
15822
15823static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15824{
15825	u32 val, offset, start;
15826	int i, vlen;
15827
15828	for (offset = TG3_NVM_DIR_START;
15829	     offset < TG3_NVM_DIR_END;
15830	     offset += TG3_NVM_DIRENT_SIZE) {
15831		if (tg3_nvram_read(tp, offset, &val))
15832			return;
15833
15834		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15835			break;
15836	}
15837
15838	if (offset == TG3_NVM_DIR_END)
15839		return;
15840
15841	if (!tg3_flag(tp, 5705_PLUS))
15842		start = 0x08000000;
15843	else if (tg3_nvram_read(tp, offset - 4, &start))
15844		return;
15845
15846	if (tg3_nvram_read(tp, offset + 4, &offset) ||
15847	    !tg3_fw_img_is_valid(tp, offset) ||
15848	    tg3_nvram_read(tp, offset + 8, &val))
15849		return;
15850
15851	offset += val - start;
15852
15853	vlen = strlen(tp->fw_ver);
15854
15855	tp->fw_ver[vlen++] = ',';
15856	tp->fw_ver[vlen++] = ' ';
15857
15858	for (i = 0; i < 4; i++) {
15859		__be32 v;
15860		if (tg3_nvram_read_be32(tp, offset, &v))
15861			return;
15862
15863		offset += sizeof(v);
15864
15865		if (vlen > TG3_VER_SIZE - sizeof(v)) {
15866			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15867			break;
15868		}
15869
15870		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15871		vlen += sizeof(v);
15872	}
15873}
15874
15875static void tg3_probe_ncsi(struct tg3 *tp)
15876{
15877	u32 apedata;
15878
15879	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15880	if (apedata != APE_SEG_SIG_MAGIC)
15881		return;
15882
15883	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15884	if (!(apedata & APE_FW_STATUS_READY))
15885		return;
15886
15887	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15888		tg3_flag_set(tp, APE_HAS_NCSI);
15889}
15890
15891static void tg3_read_dash_ver(struct tg3 *tp)
15892{
15893	int vlen;
15894	u32 apedata;
15895	char *fwtype;
15896
15897	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15898
15899	if (tg3_flag(tp, APE_HAS_NCSI))
15900		fwtype = "NCSI";
15901	else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15902		fwtype = "SMASH";
15903	else
15904		fwtype = "DASH";
15905
15906	vlen = strlen(tp->fw_ver);
15907
15908	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15909		 fwtype,
15910		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15911		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15912		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15913		 (apedata & APE_FW_VERSION_BLDMSK));
15914}
15915
15916static void tg3_read_otp_ver(struct tg3 *tp)
15917{
15918	u32 val, val2;
15919
15920	if (tg3_asic_rev(tp) != ASIC_REV_5762)
15921		return;
15922
15923	if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15924	    !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15925	    TG3_OTP_MAGIC0_VALID(val)) {
15926		u64 val64 = (u64) val << 32 | val2;
15927		u32 ver = 0;
15928		int i, vlen;
15929
15930		for (i = 0; i < 7; i++) {
15931			if ((val64 & 0xff) == 0)
15932				break;
15933			ver = val64 & 0xff;
15934			val64 >>= 8;
15935		}
15936		vlen = strlen(tp->fw_ver);
15937		snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15938	}
15939}
15940
15941static void tg3_read_fw_ver(struct tg3 *tp)
15942{
15943	u32 val;
15944	bool vpd_vers = false;
15945
15946	if (tp->fw_ver[0] != 0)
15947		vpd_vers = true;
15948
15949	if (tg3_flag(tp, NO_NVRAM)) {
15950		strcat(tp->fw_ver, "sb");
15951		tg3_read_otp_ver(tp);
15952		return;
15953	}
15954
15955	if (tg3_nvram_read(tp, 0, &val))
15956		return;
15957
15958	if (val == TG3_EEPROM_MAGIC)
15959		tg3_read_bc_ver(tp);
15960	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15961		tg3_read_sb_ver(tp, val);
15962	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15963		tg3_read_hwsb_ver(tp);
15964
15965	if (tg3_flag(tp, ENABLE_ASF)) {
15966		if (tg3_flag(tp, ENABLE_APE)) {
15967			tg3_probe_ncsi(tp);
15968			if (!vpd_vers)
15969				tg3_read_dash_ver(tp);
15970		} else if (!vpd_vers) {
15971			tg3_read_mgmtfw_ver(tp);
15972		}
15973	}
15974
15975	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15976}
15977
15978static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15979{
15980	if (tg3_flag(tp, LRG_PROD_RING_CAP))
15981		return TG3_RX_RET_MAX_SIZE_5717;
15982	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15983		return TG3_RX_RET_MAX_SIZE_5700;
15984	else
15985		return TG3_RX_RET_MAX_SIZE_5705;
15986}
15987
15988static const struct pci_device_id tg3_write_reorder_chipsets[] = {
15989	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15990	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15991	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15992	{ },
15993};
15994
15995static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15996{
15997	struct pci_dev *peer;
15998	unsigned int func, devnr = tp->pdev->devfn & ~7;
15999
16000	for (func = 0; func < 8; func++) {
16001		peer = pci_get_slot(tp->pdev->bus, devnr | func);
16002		if (peer && peer != tp->pdev)
16003			break;
16004		pci_dev_put(peer);
16005	}
16006	/* 5704 can be configured in single-port mode, set peer to
16007	 * tp->pdev in that case.
16008	 */
16009	if (!peer) {
16010		peer = tp->pdev;
16011		return peer;
16012	}
16013
16014	/*
16015	 * We don't need to keep the refcount elevated; there's no way
16016	 * to remove one half of this device without removing the other
16017	 */
16018	pci_dev_put(peer);
16019
16020	return peer;
16021}
16022
16023static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
16024{
16025	tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
16026	if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
16027		u32 reg;
16028
16029		/* All devices that use the alternate
16030		 * ASIC REV location have a CPMU.
16031		 */
16032		tg3_flag_set(tp, CPMU_PRESENT);
16033
16034		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
16035		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
16036		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
16037		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
16038		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
16039		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
16040		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
16041		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
16042		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
16043		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
16044		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787)
16045			reg = TG3PCI_GEN2_PRODID_ASICREV;
16046		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
16047			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
16048			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
16049			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
16050			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
16051			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
16052			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
16053			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
16054			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
16055			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
16056			reg = TG3PCI_GEN15_PRODID_ASICREV;
16057		else
16058			reg = TG3PCI_PRODID_ASICREV;
16059
16060		pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
16061	}
16062
16063	/* Wrong chip ID in 5752 A0. This code can be removed later
16064	 * as A0 is not in production.
16065	 */
16066	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
16067		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
16068
16069	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
16070		tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
16071
16072	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16073	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16074	    tg3_asic_rev(tp) == ASIC_REV_5720)
16075		tg3_flag_set(tp, 5717_PLUS);
16076
16077	if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
16078	    tg3_asic_rev(tp) == ASIC_REV_57766)
16079		tg3_flag_set(tp, 57765_CLASS);
16080
16081	if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
16082	     tg3_asic_rev(tp) == ASIC_REV_5762)
16083		tg3_flag_set(tp, 57765_PLUS);
16084
16085	/* Intentionally exclude ASIC_REV_5906 */
16086	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16087	    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16088	    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16089	    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16090	    tg3_asic_rev(tp) == ASIC_REV_5785 ||
16091	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16092	    tg3_flag(tp, 57765_PLUS))
16093		tg3_flag_set(tp, 5755_PLUS);
16094
16095	if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
16096	    tg3_asic_rev(tp) == ASIC_REV_5714)
16097		tg3_flag_set(tp, 5780_CLASS);
16098
16099	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16100	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16101	    tg3_asic_rev(tp) == ASIC_REV_5906 ||
16102	    tg3_flag(tp, 5755_PLUS) ||
16103	    tg3_flag(tp, 5780_CLASS))
16104		tg3_flag_set(tp, 5750_PLUS);
16105
16106	if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16107	    tg3_flag(tp, 5750_PLUS))
16108		tg3_flag_set(tp, 5705_PLUS);
16109}
16110
16111static bool tg3_10_100_only_device(struct tg3 *tp,
16112				   const struct pci_device_id *ent)
16113{
16114	u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
16115
16116	if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
16117	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
16118	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
16119		return true;
16120
16121	if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
16122		if (tg3_asic_rev(tp) == ASIC_REV_5705) {
16123			if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
16124				return true;
16125		} else {
16126			return true;
16127		}
16128	}
16129
16130	return false;
16131}
16132
16133static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16134{
16135	u32 misc_ctrl_reg;
16136	u32 pci_state_reg, grc_misc_cfg;
16137	u32 val;
16138	u16 pci_cmd;
16139	int err;
16140
16141	/* Force memory write invalidate off.  If we leave it on,
16142	 * then on 5700_BX chips we have to enable a workaround.
16143	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
16144	 * to match the cacheline size.  The Broadcom driver have this
16145	 * workaround but turns MWI off all the times so never uses
16146	 * it.  This seems to suggest that the workaround is insufficient.
16147	 */
16148	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16149	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
16150	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16151
16152	/* Important! -- Make sure register accesses are byteswapped
16153	 * correctly.  Also, for those chips that require it, make
16154	 * sure that indirect register accesses are enabled before
16155	 * the first operation.
16156	 */
16157	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16158			      &misc_ctrl_reg);
16159	tp->misc_host_ctrl |= (misc_ctrl_reg &
16160			       MISC_HOST_CTRL_CHIPREV);
16161	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16162			       tp->misc_host_ctrl);
16163
16164	tg3_detect_asic_rev(tp, misc_ctrl_reg);
16165
16166	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
16167	 * we need to disable memory and use config. cycles
16168	 * only to access all registers. The 5702/03 chips
16169	 * can mistakenly decode the special cycles from the
16170	 * ICH chipsets as memory write cycles, causing corruption
16171	 * of register and memory space. Only certain ICH bridges
16172	 * will drive special cycles with non-zero data during the
16173	 * address phase which can fall within the 5703's address
16174	 * range. This is not an ICH bug as the PCI spec allows
16175	 * non-zero address during special cycles. However, only
16176	 * these ICH bridges are known to drive non-zero addresses
16177	 * during special cycles.
16178	 *
16179	 * Since special cycles do not cross PCI bridges, we only
16180	 * enable this workaround if the 5703 is on the secondary
16181	 * bus of these ICH bridges.
16182	 */
16183	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
16184	    (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
16185		static struct tg3_dev_id {
16186			u32	vendor;
16187			u32	device;
16188			u32	rev;
16189		} ich_chipsets[] = {
16190			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
16191			  PCI_ANY_ID },
16192			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
16193			  PCI_ANY_ID },
16194			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
16195			  0xa },
16196			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
16197			  PCI_ANY_ID },
16198			{ },
16199		};
16200		struct tg3_dev_id *pci_id = &ich_chipsets[0];
16201		struct pci_dev *bridge = NULL;
16202
16203		while (pci_id->vendor != 0) {
16204			bridge = pci_get_device(pci_id->vendor, pci_id->device,
16205						bridge);
16206			if (!bridge) {
16207				pci_id++;
16208				continue;
16209			}
16210			if (pci_id->rev != PCI_ANY_ID) {
16211				if (bridge->revision > pci_id->rev)
16212					continue;
16213			}
16214			if (bridge->subordinate &&
16215			    (bridge->subordinate->number ==
16216			     tp->pdev->bus->number)) {
16217				tg3_flag_set(tp, ICH_WORKAROUND);
16218				pci_dev_put(bridge);
16219				break;
16220			}
16221		}
16222	}
16223
16224	if (tg3_asic_rev(tp) == ASIC_REV_5701) {
16225		static struct tg3_dev_id {
16226			u32	vendor;
16227			u32	device;
16228		} bridge_chipsets[] = {
16229			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
16230			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
16231			{ },
16232		};
16233		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
16234		struct pci_dev *bridge = NULL;
16235
16236		while (pci_id->vendor != 0) {
16237			bridge = pci_get_device(pci_id->vendor,
16238						pci_id->device,
16239						bridge);
16240			if (!bridge) {
16241				pci_id++;
16242				continue;
16243			}
16244			if (bridge->subordinate &&
16245			    (bridge->subordinate->number <=
16246			     tp->pdev->bus->number) &&
16247			    (bridge->subordinate->busn_res.end >=
16248			     tp->pdev->bus->number)) {
16249				tg3_flag_set(tp, 5701_DMA_BUG);
16250				pci_dev_put(bridge);
16251				break;
16252			}
16253		}
16254	}
16255
16256	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
16257	 * DMA addresses > 40-bit. This bridge may have other additional
16258	 * 57xx devices behind it in some 4-port NIC designs for example.
16259	 * Any tg3 device found behind the bridge will also need the 40-bit
16260	 * DMA workaround.
16261	 */
16262	if (tg3_flag(tp, 5780_CLASS)) {
16263		tg3_flag_set(tp, 40BIT_DMA_BUG);
16264		tp->msi_cap = tp->pdev->msi_cap;
16265	} else {
16266		struct pci_dev *bridge = NULL;
16267
16268		do {
16269			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
16270						PCI_DEVICE_ID_SERVERWORKS_EPB,
16271						bridge);
16272			if (bridge && bridge->subordinate &&
16273			    (bridge->subordinate->number <=
16274			     tp->pdev->bus->number) &&
16275			    (bridge->subordinate->busn_res.end >=
16276			     tp->pdev->bus->number)) {
16277				tg3_flag_set(tp, 40BIT_DMA_BUG);
16278				pci_dev_put(bridge);
16279				break;
16280			}
16281		} while (bridge);
16282	}
16283
16284	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16285	    tg3_asic_rev(tp) == ASIC_REV_5714)
16286		tp->pdev_peer = tg3_find_peer(tp);
16287
16288	/* Determine TSO capabilities */
16289	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
16290		; /* Do nothing. HW bug. */
16291	else if (tg3_flag(tp, 57765_PLUS))
16292		tg3_flag_set(tp, HW_TSO_3);
16293	else if (tg3_flag(tp, 5755_PLUS) ||
16294		 tg3_asic_rev(tp) == ASIC_REV_5906)
16295		tg3_flag_set(tp, HW_TSO_2);
16296	else if (tg3_flag(tp, 5750_PLUS)) {
16297		tg3_flag_set(tp, HW_TSO_1);
16298		tg3_flag_set(tp, TSO_BUG);
16299		if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
16300		    tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
16301			tg3_flag_clear(tp, TSO_BUG);
16302	} else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16303		   tg3_asic_rev(tp) != ASIC_REV_5701 &&
16304		   tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
16305		tg3_flag_set(tp, FW_TSO);
16306		tg3_flag_set(tp, TSO_BUG);
16307		if (tg3_asic_rev(tp) == ASIC_REV_5705)
16308			tp->fw_needed = FIRMWARE_TG3TSO5;
16309		else
16310			tp->fw_needed = FIRMWARE_TG3TSO;
16311	}
16312
16313	/* Selectively allow TSO based on operating conditions */
16314	if (tg3_flag(tp, HW_TSO_1) ||
16315	    tg3_flag(tp, HW_TSO_2) ||
16316	    tg3_flag(tp, HW_TSO_3) ||
16317	    tg3_flag(tp, FW_TSO)) {
16318		/* For firmware TSO, assume ASF is disabled.
16319		 * We'll disable TSO later if we discover ASF
16320		 * is enabled in tg3_get_eeprom_hw_cfg().
16321		 */
16322		tg3_flag_set(tp, TSO_CAPABLE);
16323	} else {
16324		tg3_flag_clear(tp, TSO_CAPABLE);
16325		tg3_flag_clear(tp, TSO_BUG);
16326		tp->fw_needed = NULL;
16327	}
16328
16329	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
16330		tp->fw_needed = FIRMWARE_TG3;
16331
16332	if (tg3_asic_rev(tp) == ASIC_REV_57766)
16333		tp->fw_needed = FIRMWARE_TG357766;
16334
16335	tp->irq_max = 1;
16336
16337	if (tg3_flag(tp, 5750_PLUS)) {
16338		tg3_flag_set(tp, SUPPORT_MSI);
16339		if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
16340		    tg3_chip_rev(tp) == CHIPREV_5750_BX ||
16341		    (tg3_asic_rev(tp) == ASIC_REV_5714 &&
16342		     tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
16343		     tp->pdev_peer == tp->pdev))
16344			tg3_flag_clear(tp, SUPPORT_MSI);
16345
16346		if (tg3_flag(tp, 5755_PLUS) ||
16347		    tg3_asic_rev(tp) == ASIC_REV_5906) {
16348			tg3_flag_set(tp, 1SHOT_MSI);
16349		}
16350
16351		if (tg3_flag(tp, 57765_PLUS)) {
16352			tg3_flag_set(tp, SUPPORT_MSIX);
16353			tp->irq_max = TG3_IRQ_MAX_VECS;
16354		}
16355	}
16356
16357	tp->txq_max = 1;
16358	tp->rxq_max = 1;
16359	if (tp->irq_max > 1) {
16360		tp->rxq_max = TG3_RSS_MAX_NUM_QS;
16361		tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
16362
16363		if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
16364		    tg3_asic_rev(tp) == ASIC_REV_5720)
16365			tp->txq_max = tp->irq_max - 1;
16366	}
16367
16368	if (tg3_flag(tp, 5755_PLUS) ||
16369	    tg3_asic_rev(tp) == ASIC_REV_5906)
16370		tg3_flag_set(tp, SHORT_DMA_BUG);
16371
16372	if (tg3_asic_rev(tp) == ASIC_REV_5719)
16373		tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
16374
16375	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16376	    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16377	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
16378	    tg3_asic_rev(tp) == ASIC_REV_5762)
16379		tg3_flag_set(tp, LRG_PROD_RING_CAP);
16380
16381	if (tg3_flag(tp, 57765_PLUS) &&
16382	    tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
16383		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
16384
16385	if (!tg3_flag(tp, 5705_PLUS) ||
16386	    tg3_flag(tp, 5780_CLASS) ||
16387	    tg3_flag(tp, USE_JUMBO_BDFLAG))
16388		tg3_flag_set(tp, JUMBO_CAPABLE);
16389
16390	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16391			      &pci_state_reg);
16392
16393	if (pci_is_pcie(tp->pdev)) {
16394		u16 lnkctl;
16395
16396		tg3_flag_set(tp, PCI_EXPRESS);
16397
16398		pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
16399		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
16400			if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16401				tg3_flag_clear(tp, HW_TSO_2);
16402				tg3_flag_clear(tp, TSO_CAPABLE);
16403			}
16404			if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
16405			    tg3_asic_rev(tp) == ASIC_REV_5761 ||
16406			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
16407			    tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
16408				tg3_flag_set(tp, CLKREQ_BUG);
16409		} else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
16410			tg3_flag_set(tp, L1PLLPD_EN);
16411		}
16412	} else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
16413		/* BCM5785 devices are effectively PCIe devices, and should
16414		 * follow PCIe codepaths, but do not have a PCIe capabilities
16415		 * section.
16416		 */
16417		tg3_flag_set(tp, PCI_EXPRESS);
16418	} else if (!tg3_flag(tp, 5705_PLUS) ||
16419		   tg3_flag(tp, 5780_CLASS)) {
16420		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
16421		if (!tp->pcix_cap) {
16422			dev_err(&tp->pdev->dev,
16423				"Cannot find PCI-X capability, aborting\n");
16424			return -EIO;
16425		}
16426
16427		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
16428			tg3_flag_set(tp, PCIX_MODE);
16429	}
16430
16431	/* If we have an AMD 762 or VIA K8T800 chipset, write
16432	 * reordering to the mailbox registers done by the host
16433	 * controller can cause major troubles.  We read back from
16434	 * every mailbox register write to force the writes to be
16435	 * posted to the chip in order.
16436	 */
16437	if (pci_dev_present(tg3_write_reorder_chipsets) &&
16438	    !tg3_flag(tp, PCI_EXPRESS))
16439		tg3_flag_set(tp, MBOX_WRITE_REORDER);
16440
16441	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
16442			     &tp->pci_cacheline_sz);
16443	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16444			     &tp->pci_lat_timer);
16445	if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
16446	    tp->pci_lat_timer < 64) {
16447		tp->pci_lat_timer = 64;
16448		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16449				      tp->pci_lat_timer);
16450	}
16451
16452	/* Important! -- It is critical that the PCI-X hw workaround
16453	 * situation is decided before the first MMIO register access.
16454	 */
16455	if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16456		/* 5700 BX chips need to have their TX producer index
16457		 * mailboxes written twice to workaround a bug.
16458		 */
16459		tg3_flag_set(tp, TXD_MBOX_HWBUG);
16460
16461		/* If we are in PCI-X mode, enable register write workaround.
16462		 *
16463		 * The workaround is to use indirect register accesses
16464		 * for all chip writes not to mailbox registers.
16465		 */
16466		if (tg3_flag(tp, PCIX_MODE)) {
16467			u32 pm_reg;
16468
16469			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16470
16471			/* The chip can have it's power management PCI config
16472			 * space registers clobbered due to this bug.
16473			 * So explicitly force the chip into D0 here.
16474			 */
16475			pci_read_config_dword(tp->pdev,
16476					      tp->pdev->pm_cap + PCI_PM_CTRL,
16477					      &pm_reg);
16478			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16479			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16480			pci_write_config_dword(tp->pdev,
16481					       tp->pdev->pm_cap + PCI_PM_CTRL,
16482					       pm_reg);
16483
16484			/* Also, force SERR#/PERR# in PCI command. */
16485			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16486			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16487			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16488		}
16489	}
16490
16491	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16492		tg3_flag_set(tp, PCI_HIGH_SPEED);
16493	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16494		tg3_flag_set(tp, PCI_32BIT);
16495
16496	/* Chip-specific fixup from Broadcom driver */
16497	if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16498	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16499		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16500		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16501	}
16502
16503	/* Default fast path register access methods */
16504	tp->read32 = tg3_read32;
16505	tp->write32 = tg3_write32;
16506	tp->read32_mbox = tg3_read32;
16507	tp->write32_mbox = tg3_write32;
16508	tp->write32_tx_mbox = tg3_write32;
16509	tp->write32_rx_mbox = tg3_write32;
16510
16511	/* Various workaround register access methods */
16512	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16513		tp->write32 = tg3_write_indirect_reg32;
16514	else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16515		 (tg3_flag(tp, PCI_EXPRESS) &&
16516		  tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16517		/*
16518		 * Back to back register writes can cause problems on these
16519		 * chips, the workaround is to read back all reg writes
16520		 * except those to mailbox regs.
16521		 *
16522		 * See tg3_write_indirect_reg32().
16523		 */
16524		tp->write32 = tg3_write_flush_reg32;
16525	}
16526
16527	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16528		tp->write32_tx_mbox = tg3_write32_tx_mbox;
16529		if (tg3_flag(tp, MBOX_WRITE_REORDER))
16530			tp->write32_rx_mbox = tg3_write_flush_reg32;
16531	}
16532
16533	if (tg3_flag(tp, ICH_WORKAROUND)) {
16534		tp->read32 = tg3_read_indirect_reg32;
16535		tp->write32 = tg3_write_indirect_reg32;
16536		tp->read32_mbox = tg3_read_indirect_mbox;
16537		tp->write32_mbox = tg3_write_indirect_mbox;
16538		tp->write32_tx_mbox = tg3_write_indirect_mbox;
16539		tp->write32_rx_mbox = tg3_write_indirect_mbox;
16540
16541		iounmap(tp->regs);
16542		tp->regs = NULL;
16543
16544		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16545		pci_cmd &= ~PCI_COMMAND_MEMORY;
16546		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16547	}
16548	if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16549		tp->read32_mbox = tg3_read32_mbox_5906;
16550		tp->write32_mbox = tg3_write32_mbox_5906;
16551		tp->write32_tx_mbox = tg3_write32_mbox_5906;
16552		tp->write32_rx_mbox = tg3_write32_mbox_5906;
16553	}
16554
16555	if (tp->write32 == tg3_write_indirect_reg32 ||
16556	    (tg3_flag(tp, PCIX_MODE) &&
16557	     (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16558	      tg3_asic_rev(tp) == ASIC_REV_5701)))
16559		tg3_flag_set(tp, SRAM_USE_CONFIG);
16560
16561	/* The memory arbiter has to be enabled in order for SRAM accesses
16562	 * to succeed.  Normally on powerup the tg3 chip firmware will make
16563	 * sure it is enabled, but other entities such as system netboot
16564	 * code might disable it.
16565	 */
16566	val = tr32(MEMARB_MODE);
16567	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16568
16569	tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16570	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16571	    tg3_flag(tp, 5780_CLASS)) {
16572		if (tg3_flag(tp, PCIX_MODE)) {
16573			pci_read_config_dword(tp->pdev,
16574					      tp->pcix_cap + PCI_X_STATUS,
16575					      &val);
16576			tp->pci_fn = val & 0x7;
16577		}
16578	} else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16579		   tg3_asic_rev(tp) == ASIC_REV_5719 ||
16580		   tg3_asic_rev(tp) == ASIC_REV_5720) {
16581		tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16582		if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16583			val = tr32(TG3_CPMU_STATUS);
16584
16585		if (tg3_asic_rev(tp) == ASIC_REV_5717)
16586			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16587		else
16588			tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16589				     TG3_CPMU_STATUS_FSHFT_5719;
16590	}
16591
16592	if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16593		tp->write32_tx_mbox = tg3_write_flush_reg32;
16594		tp->write32_rx_mbox = tg3_write_flush_reg32;
16595	}
16596
16597	/* Get eeprom hw config before calling tg3_set_power_state().
16598	 * In particular, the TG3_FLAG_IS_NIC flag must be
16599	 * determined before calling tg3_set_power_state() so that
16600	 * we know whether or not to switch out of Vaux power.
16601	 * When the flag is set, it means that GPIO1 is used for eeprom
16602	 * write protect and also implies that it is a LOM where GPIOs
16603	 * are not used to switch power.
16604	 */
16605	tg3_get_eeprom_hw_cfg(tp);
16606
16607	if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16608		tg3_flag_clear(tp, TSO_CAPABLE);
16609		tg3_flag_clear(tp, TSO_BUG);
16610		tp->fw_needed = NULL;
16611	}
16612
16613	if (tg3_flag(tp, ENABLE_APE)) {
16614		/* Allow reads and writes to the
16615		 * APE register and memory space.
16616		 */
16617		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16618				 PCISTATE_ALLOW_APE_SHMEM_WR |
16619				 PCISTATE_ALLOW_APE_PSPACE_WR;
16620		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16621				       pci_state_reg);
16622
16623		tg3_ape_lock_init(tp);
16624	}
16625
16626	/* Set up tp->grc_local_ctrl before calling
16627	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16628	 * will bring 5700's external PHY out of reset.
16629	 * It is also used as eeprom write protect on LOMs.
16630	 */
16631	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16632	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16633	    tg3_flag(tp, EEPROM_WRITE_PROT))
16634		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16635				       GRC_LCLCTRL_GPIO_OUTPUT1);
16636	/* Unused GPIO3 must be driven as output on 5752 because there
16637	 * are no pull-up resistors on unused GPIO pins.
16638	 */
16639	else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16640		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16641
16642	if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16643	    tg3_asic_rev(tp) == ASIC_REV_57780 ||
16644	    tg3_flag(tp, 57765_CLASS))
16645		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16646
16647	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16648	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16649		/* Turn off the debug UART. */
16650		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16651		if (tg3_flag(tp, IS_NIC))
16652			/* Keep VMain power. */
16653			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16654					      GRC_LCLCTRL_GPIO_OUTPUT0;
16655	}
16656
16657	if (tg3_asic_rev(tp) == ASIC_REV_5762)
16658		tp->grc_local_ctrl |=
16659			tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16660
16661	/* Switch out of Vaux if it is a NIC */
16662	tg3_pwrsrc_switch_to_vmain(tp);
16663
16664	/* Derive initial jumbo mode from MTU assigned in
16665	 * ether_setup() via the alloc_etherdev() call
16666	 */
16667	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16668		tg3_flag_set(tp, JUMBO_RING_ENABLE);
16669
16670	/* Determine WakeOnLan speed to use. */
16671	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16672	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16673	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16674	    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16675		tg3_flag_clear(tp, WOL_SPEED_100MB);
16676	} else {
16677		tg3_flag_set(tp, WOL_SPEED_100MB);
16678	}
16679
16680	if (tg3_asic_rev(tp) == ASIC_REV_5906)
16681		tp->phy_flags |= TG3_PHYFLG_IS_FET;
16682
16683	/* A few boards don't want Ethernet@WireSpeed phy feature */
16684	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16685	    (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16686	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16687	     (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16688	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16689	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16690		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16691
16692	if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16693	    tg3_chip_rev(tp) == CHIPREV_5704_AX)
16694		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16695	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16696		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16697
16698	if (tg3_flag(tp, 5705_PLUS) &&
16699	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16700	    tg3_asic_rev(tp) != ASIC_REV_5785 &&
16701	    tg3_asic_rev(tp) != ASIC_REV_57780 &&
16702	    !tg3_flag(tp, 57765_PLUS)) {
16703		if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16704		    tg3_asic_rev(tp) == ASIC_REV_5787 ||
16705		    tg3_asic_rev(tp) == ASIC_REV_5784 ||
16706		    tg3_asic_rev(tp) == ASIC_REV_5761) {
16707			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16708			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16709				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16710			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16711				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16712		} else
16713			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16714	}
16715
16716	if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16717	    tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16718		tp->phy_otp = tg3_read_otp_phycfg(tp);
16719		if (tp->phy_otp == 0)
16720			tp->phy_otp = TG3_OTP_DEFAULT;
16721	}
16722
16723	if (tg3_flag(tp, CPMU_PRESENT))
16724		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16725	else
16726		tp->mi_mode = MAC_MI_MODE_BASE;
16727
16728	tp->coalesce_mode = 0;
16729	if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16730	    tg3_chip_rev(tp) != CHIPREV_5700_BX)
16731		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16732
16733	/* Set these bits to enable statistics workaround. */
16734	if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16735	    tg3_asic_rev(tp) == ASIC_REV_5762 ||
16736	    tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16737	    tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16738		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16739		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16740	}
16741
16742	if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16743	    tg3_asic_rev(tp) == ASIC_REV_57780)
16744		tg3_flag_set(tp, USE_PHYLIB);
16745
16746	err = tg3_mdio_init(tp);
16747	if (err)
16748		return err;
16749
16750	/* Initialize data/descriptor byte/word swapping. */
16751	val = tr32(GRC_MODE);
16752	if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16753	    tg3_asic_rev(tp) == ASIC_REV_5762)
16754		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16755			GRC_MODE_WORD_SWAP_B2HRX_DATA |
16756			GRC_MODE_B2HRX_ENABLE |
16757			GRC_MODE_HTX2B_ENABLE |
16758			GRC_MODE_HOST_STACKUP);
16759	else
16760		val &= GRC_MODE_HOST_STACKUP;
16761
16762	tw32(GRC_MODE, val | tp->grc_mode);
16763
16764	tg3_switch_clocks(tp);
16765
16766	/* Clear this out for sanity. */
16767	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16768
16769	/* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16770	tw32(TG3PCI_REG_BASE_ADDR, 0);
16771
16772	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16773			      &pci_state_reg);
16774	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16775	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16776		if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16777		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16778		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16779		    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16780			void __iomem *sram_base;
16781
16782			/* Write some dummy words into the SRAM status block
16783			 * area, see if it reads back correctly.  If the return
16784			 * value is bad, force enable the PCIX workaround.
16785			 */
16786			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16787
16788			writel(0x00000000, sram_base);
16789			writel(0x00000000, sram_base + 4);
16790			writel(0xffffffff, sram_base + 4);
16791			if (readl(sram_base) != 0x00000000)
16792				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16793		}
16794	}
16795
16796	udelay(50);
16797	tg3_nvram_init(tp);
16798
16799	/* If the device has an NVRAM, no need to load patch firmware */
16800	if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16801	    !tg3_flag(tp, NO_NVRAM))
16802		tp->fw_needed = NULL;
16803
16804	grc_misc_cfg = tr32(GRC_MISC_CFG);
16805	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16806
16807	if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16808	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16809	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16810		tg3_flag_set(tp, IS_5788);
16811
16812	if (!tg3_flag(tp, IS_5788) &&
16813	    tg3_asic_rev(tp) != ASIC_REV_5700)
16814		tg3_flag_set(tp, TAGGED_STATUS);
16815	if (tg3_flag(tp, TAGGED_STATUS)) {
16816		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16817				      HOSTCC_MODE_CLRTICK_TXBD);
16818
16819		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16820		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16821				       tp->misc_host_ctrl);
16822	}
16823
16824	/* Preserve the APE MAC_MODE bits */
16825	if (tg3_flag(tp, ENABLE_APE))
16826		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16827	else
16828		tp->mac_mode = 0;
16829
16830	if (tg3_10_100_only_device(tp, ent))
16831		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16832
16833	err = tg3_phy_probe(tp);
16834	if (err) {
16835		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16836		/* ... but do not return immediately ... */
16837		tg3_mdio_fini(tp);
16838	}
16839
16840	tg3_read_vpd(tp);
16841	tg3_read_fw_ver(tp);
16842
16843	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16844		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16845	} else {
16846		if (tg3_asic_rev(tp) == ASIC_REV_5700)
16847			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16848		else
16849			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16850	}
16851
16852	/* 5700 {AX,BX} chips have a broken status block link
16853	 * change bit implementation, so we must use the
16854	 * status register in those cases.
16855	 */
16856	if (tg3_asic_rev(tp) == ASIC_REV_5700)
16857		tg3_flag_set(tp, USE_LINKCHG_REG);
16858	else
16859		tg3_flag_clear(tp, USE_LINKCHG_REG);
16860
16861	/* The led_ctrl is set during tg3_phy_probe, here we might
16862	 * have to force the link status polling mechanism based
16863	 * upon subsystem IDs.
16864	 */
16865	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16866	    tg3_asic_rev(tp) == ASIC_REV_5701 &&
16867	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16868		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16869		tg3_flag_set(tp, USE_LINKCHG_REG);
16870	}
16871
16872	/* For all SERDES we poll the MAC status register. */
16873	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16874		tg3_flag_set(tp, POLL_SERDES);
16875	else
16876		tg3_flag_clear(tp, POLL_SERDES);
16877
16878	if (tg3_flag(tp, ENABLE_APE) && tg3_flag(tp, ENABLE_ASF))
16879		tg3_flag_set(tp, POLL_CPMU_LINK);
16880
16881	tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16882	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16883	if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16884	    tg3_flag(tp, PCIX_MODE)) {
16885		tp->rx_offset = NET_SKB_PAD;
16886#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16887		tp->rx_copy_thresh = ~(u16)0;
16888#endif
16889	}
16890
16891	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16892	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16893	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16894
16895	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16896
16897	/* Increment the rx prod index on the rx std ring by at most
16898	 * 8 for these chips to workaround hw errata.
16899	 */
16900	if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16901	    tg3_asic_rev(tp) == ASIC_REV_5752 ||
16902	    tg3_asic_rev(tp) == ASIC_REV_5755)
16903		tp->rx_std_max_post = 8;
16904
16905	if (tg3_flag(tp, ASPM_WORKAROUND))
16906		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16907				     PCIE_PWR_MGMT_L1_THRESH_MSK;
16908
16909	return err;
16910}
16911
16912#ifdef CONFIG_SPARC
16913static int tg3_get_macaddr_sparc(struct tg3 *tp)
16914{
16915	struct net_device *dev = tp->dev;
16916	struct pci_dev *pdev = tp->pdev;
16917	struct device_node *dp = pci_device_to_OF_node(pdev);
16918	const unsigned char *addr;
16919	int len;
16920
16921	addr = of_get_property(dp, "local-mac-address", &len);
16922	if (addr && len == ETH_ALEN) {
16923		memcpy(dev->dev_addr, addr, ETH_ALEN);
16924		return 0;
16925	}
16926	return -ENODEV;
16927}
16928
16929static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16930{
16931	struct net_device *dev = tp->dev;
16932
16933	memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
16934	return 0;
16935}
16936#endif
16937
16938static int tg3_get_device_address(struct tg3 *tp)
16939{
16940	struct net_device *dev = tp->dev;
16941	u32 hi, lo, mac_offset;
16942	int addr_ok = 0;
16943	int err;
16944
16945#ifdef CONFIG_SPARC
16946	if (!tg3_get_macaddr_sparc(tp))
16947		return 0;
16948#endif
16949
16950	if (tg3_flag(tp, IS_SSB_CORE)) {
16951		err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16952		if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16953			return 0;
16954	}
16955
16956	mac_offset = 0x7c;
16957	if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16958	    tg3_flag(tp, 5780_CLASS)) {
16959		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16960			mac_offset = 0xcc;
16961		if (tg3_nvram_lock(tp))
16962			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16963		else
16964			tg3_nvram_unlock(tp);
16965	} else if (tg3_flag(tp, 5717_PLUS)) {
16966		if (tp->pci_fn & 1)
16967			mac_offset = 0xcc;
16968		if (tp->pci_fn > 1)
16969			mac_offset += 0x18c;
16970	} else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16971		mac_offset = 0x10;
16972
16973	/* First try to get it from MAC address mailbox. */
16974	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16975	if ((hi >> 16) == 0x484b) {
16976		dev->dev_addr[0] = (hi >>  8) & 0xff;
16977		dev->dev_addr[1] = (hi >>  0) & 0xff;
16978
16979		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16980		dev->dev_addr[2] = (lo >> 24) & 0xff;
16981		dev->dev_addr[3] = (lo >> 16) & 0xff;
16982		dev->dev_addr[4] = (lo >>  8) & 0xff;
16983		dev->dev_addr[5] = (lo >>  0) & 0xff;
16984
16985		/* Some old bootcode may report a 0 MAC address in SRAM */
16986		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16987	}
16988	if (!addr_ok) {
16989		/* Next, try NVRAM. */
16990		if (!tg3_flag(tp, NO_NVRAM) &&
16991		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16992		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16993			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16994			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16995		}
16996		/* Finally just fetch it out of the MAC control regs. */
16997		else {
16998			hi = tr32(MAC_ADDR_0_HIGH);
16999			lo = tr32(MAC_ADDR_0_LOW);
17000
17001			dev->dev_addr[5] = lo & 0xff;
17002			dev->dev_addr[4] = (lo >> 8) & 0xff;
17003			dev->dev_addr[3] = (lo >> 16) & 0xff;
17004			dev->dev_addr[2] = (lo >> 24) & 0xff;
17005			dev->dev_addr[1] = hi & 0xff;
17006			dev->dev_addr[0] = (hi >> 8) & 0xff;
17007		}
17008	}
17009
17010	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
17011#ifdef CONFIG_SPARC
17012		if (!tg3_get_default_macaddr_sparc(tp))
17013			return 0;
17014#endif
17015		return -EINVAL;
17016	}
17017	return 0;
17018}
17019
17020#define BOUNDARY_SINGLE_CACHELINE	1
17021#define BOUNDARY_MULTI_CACHELINE	2
17022
17023static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
17024{
17025	int cacheline_size;
17026	u8 byte;
17027	int goal;
17028
17029	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
17030	if (byte == 0)
17031		cacheline_size = 1024;
17032	else
17033		cacheline_size = (int) byte * 4;
17034
17035	/* On 5703 and later chips, the boundary bits have no
17036	 * effect.
17037	 */
17038	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17039	    tg3_asic_rev(tp) != ASIC_REV_5701 &&
17040	    !tg3_flag(tp, PCI_EXPRESS))
17041		goto out;
17042
17043#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
17044	goal = BOUNDARY_MULTI_CACHELINE;
17045#else
17046#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
17047	goal = BOUNDARY_SINGLE_CACHELINE;
17048#else
17049	goal = 0;
17050#endif
17051#endif
17052
17053	if (tg3_flag(tp, 57765_PLUS)) {
17054		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
17055		goto out;
17056	}
17057
17058	if (!goal)
17059		goto out;
17060
17061	/* PCI controllers on most RISC systems tend to disconnect
17062	 * when a device tries to burst across a cache-line boundary.
17063	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
17064	 *
17065	 * Unfortunately, for PCI-E there are only limited
17066	 * write-side controls for this, and thus for reads
17067	 * we will still get the disconnects.  We'll also waste
17068	 * these PCI cycles for both read and write for chips
17069	 * other than 5700 and 5701 which do not implement the
17070	 * boundary bits.
17071	 */
17072	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
17073		switch (cacheline_size) {
17074		case 16:
17075		case 32:
17076		case 64:
17077		case 128:
17078			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17079				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
17080					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
17081			} else {
17082				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17083					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17084			}
17085			break;
17086
17087		case 256:
17088			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
17089				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
17090			break;
17091
17092		default:
17093			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
17094				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
17095			break;
17096		}
17097	} else if (tg3_flag(tp, PCI_EXPRESS)) {
17098		switch (cacheline_size) {
17099		case 16:
17100		case 32:
17101		case 64:
17102			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17103				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17104				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
17105				break;
17106			}
17107			/* fallthrough */
17108		case 128:
17109		default:
17110			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
17111			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
17112			break;
17113		}
17114	} else {
17115		switch (cacheline_size) {
17116		case 16:
17117			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17118				val |= (DMA_RWCTRL_READ_BNDRY_16 |
17119					DMA_RWCTRL_WRITE_BNDRY_16);
17120				break;
17121			}
17122			/* fallthrough */
17123		case 32:
17124			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17125				val |= (DMA_RWCTRL_READ_BNDRY_32 |
17126					DMA_RWCTRL_WRITE_BNDRY_32);
17127				break;
17128			}
17129			/* fallthrough */
17130		case 64:
17131			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17132				val |= (DMA_RWCTRL_READ_BNDRY_64 |
17133					DMA_RWCTRL_WRITE_BNDRY_64);
17134				break;
17135			}
17136			/* fallthrough */
17137		case 128:
17138			if (goal == BOUNDARY_SINGLE_CACHELINE) {
17139				val |= (DMA_RWCTRL_READ_BNDRY_128 |
17140					DMA_RWCTRL_WRITE_BNDRY_128);
17141				break;
17142			}
17143			/* fallthrough */
17144		case 256:
17145			val |= (DMA_RWCTRL_READ_BNDRY_256 |
17146				DMA_RWCTRL_WRITE_BNDRY_256);
17147			break;
17148		case 512:
17149			val |= (DMA_RWCTRL_READ_BNDRY_512 |
17150				DMA_RWCTRL_WRITE_BNDRY_512);
17151			break;
17152		case 1024:
17153		default:
17154			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
17155				DMA_RWCTRL_WRITE_BNDRY_1024);
17156			break;
17157		}
17158	}
17159
17160out:
17161	return val;
17162}
17163
17164static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
17165			   int size, bool to_device)
17166{
17167	struct tg3_internal_buffer_desc test_desc;
17168	u32 sram_dma_descs;
17169	int i, ret;
17170
17171	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
17172
17173	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
17174	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
17175	tw32(RDMAC_STATUS, 0);
17176	tw32(WDMAC_STATUS, 0);
17177
17178	tw32(BUFMGR_MODE, 0);
17179	tw32(FTQ_RESET, 0);
17180
17181	test_desc.addr_hi = ((u64) buf_dma) >> 32;
17182	test_desc.addr_lo = buf_dma & 0xffffffff;
17183	test_desc.nic_mbuf = 0x00002100;
17184	test_desc.len = size;
17185
17186	/*
17187	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
17188	 * the *second* time the tg3 driver was getting loaded after an
17189	 * initial scan.
17190	 *
17191	 * Broadcom tells me:
17192	 *   ...the DMA engine is connected to the GRC block and a DMA
17193	 *   reset may affect the GRC block in some unpredictable way...
17194	 *   The behavior of resets to individual blocks has not been tested.
17195	 *
17196	 * Broadcom noted the GRC reset will also reset all sub-components.
17197	 */
17198	if (to_device) {
17199		test_desc.cqid_sqid = (13 << 8) | 2;
17200
17201		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
17202		udelay(40);
17203	} else {
17204		test_desc.cqid_sqid = (16 << 8) | 7;
17205
17206		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
17207		udelay(40);
17208	}
17209	test_desc.flags = 0x00000005;
17210
17211	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
17212		u32 val;
17213
17214		val = *(((u32 *)&test_desc) + i);
17215		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
17216				       sram_dma_descs + (i * sizeof(u32)));
17217		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
17218	}
17219	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
17220
17221	if (to_device)
17222		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
17223	else
17224		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
17225
17226	ret = -ENODEV;
17227	for (i = 0; i < 40; i++) {
17228		u32 val;
17229
17230		if (to_device)
17231			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
17232		else
17233			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
17234		if ((val & 0xffff) == sram_dma_descs) {
17235			ret = 0;
17236			break;
17237		}
17238
17239		udelay(100);
17240	}
17241
17242	return ret;
17243}
17244
17245#define TEST_BUFFER_SIZE	0x2000
17246
17247static const struct pci_device_id tg3_dma_wait_state_chipsets[] = {
17248	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
17249	{ },
17250};
17251
17252static int tg3_test_dma(struct tg3 *tp)
17253{
17254	dma_addr_t buf_dma;
17255	u32 *buf, saved_dma_rwctrl;
17256	int ret = 0;
17257
17258	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
17259				 &buf_dma, GFP_KERNEL);
17260	if (!buf) {
17261		ret = -ENOMEM;
17262		goto out_nofree;
17263	}
17264
17265	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
17266			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
17267
17268	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
17269
17270	if (tg3_flag(tp, 57765_PLUS))
17271		goto out;
17272
17273	if (tg3_flag(tp, PCI_EXPRESS)) {
17274		/* DMA read watermark not used on PCIE */
17275		tp->dma_rwctrl |= 0x00180000;
17276	} else if (!tg3_flag(tp, PCIX_MODE)) {
17277		if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
17278		    tg3_asic_rev(tp) == ASIC_REV_5750)
17279			tp->dma_rwctrl |= 0x003f0000;
17280		else
17281			tp->dma_rwctrl |= 0x003f000f;
17282	} else {
17283		if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17284		    tg3_asic_rev(tp) == ASIC_REV_5704) {
17285			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
17286			u32 read_water = 0x7;
17287
17288			/* If the 5704 is behind the EPB bridge, we can
17289			 * do the less restrictive ONE_DMA workaround for
17290			 * better performance.
17291			 */
17292			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
17293			    tg3_asic_rev(tp) == ASIC_REV_5704)
17294				tp->dma_rwctrl |= 0x8000;
17295			else if (ccval == 0x6 || ccval == 0x7)
17296				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17297
17298			if (tg3_asic_rev(tp) == ASIC_REV_5703)
17299				read_water = 4;
17300			/* Set bit 23 to enable PCIX hw bug fix */
17301			tp->dma_rwctrl |=
17302				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
17303				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
17304				(1 << 23);
17305		} else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
17306			/* 5780 always in PCIX mode */
17307			tp->dma_rwctrl |= 0x00144000;
17308		} else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
17309			/* 5714 always in PCIX mode */
17310			tp->dma_rwctrl |= 0x00148000;
17311		} else {
17312			tp->dma_rwctrl |= 0x001b000f;
17313		}
17314	}
17315	if (tg3_flag(tp, ONE_DMA_AT_ONCE))
17316		tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
17317
17318	if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
17319	    tg3_asic_rev(tp) == ASIC_REV_5704)
17320		tp->dma_rwctrl &= 0xfffffff0;
17321
17322	if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
17323	    tg3_asic_rev(tp) == ASIC_REV_5701) {
17324		/* Remove this if it causes problems for some boards. */
17325		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
17326
17327		/* On 5700/5701 chips, we need to set this bit.
17328		 * Otherwise the chip will issue cacheline transactions
17329		 * to streamable DMA memory with not all the byte
17330		 * enables turned on.  This is an error on several
17331		 * RISC PCI controllers, in particular sparc64.
17332		 *
17333		 * On 5703/5704 chips, this bit has been reassigned
17334		 * a different meaning.  In particular, it is used
17335		 * on those chips to enable a PCI-X workaround.
17336		 */
17337		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
17338	}
17339
17340	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17341
17342
17343	if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
17344	    tg3_asic_rev(tp) != ASIC_REV_5701)
17345		goto out;
17346
17347	/* It is best to perform DMA test with maximum write burst size
17348	 * to expose the 5700/5701 write DMA bug.
17349	 */
17350	saved_dma_rwctrl = tp->dma_rwctrl;
17351	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17352	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17353
17354	while (1) {
17355		u32 *p = buf, i;
17356
17357		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
17358			p[i] = i;
17359
17360		/* Send the buffer to the chip. */
17361		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
17362		if (ret) {
17363			dev_err(&tp->pdev->dev,
17364				"%s: Buffer write failed. err = %d\n",
17365				__func__, ret);
17366			break;
17367		}
17368
17369		/* Now read it back. */
17370		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
17371		if (ret) {
17372			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
17373				"err = %d\n", __func__, ret);
17374			break;
17375		}
17376
17377		/* Verify it. */
17378		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
17379			if (p[i] == i)
17380				continue;
17381
17382			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17383			    DMA_RWCTRL_WRITE_BNDRY_16) {
17384				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17385				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17386				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17387				break;
17388			} else {
17389				dev_err(&tp->pdev->dev,
17390					"%s: Buffer corrupted on read back! "
17391					"(%d != %d)\n", __func__, p[i], i);
17392				ret = -ENODEV;
17393				goto out;
17394			}
17395		}
17396
17397		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
17398			/* Success. */
17399			ret = 0;
17400			break;
17401		}
17402	}
17403	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
17404	    DMA_RWCTRL_WRITE_BNDRY_16) {
17405		/* DMA test passed without adjusting DMA boundary,
17406		 * now look for chipsets that are known to expose the
17407		 * DMA bug without failing the test.
17408		 */
17409		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
17410			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
17411			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
17412		} else {
17413			/* Safe to use the calculated DMA boundary. */
17414			tp->dma_rwctrl = saved_dma_rwctrl;
17415		}
17416
17417		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
17418	}
17419
17420out:
17421	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
17422out_nofree:
17423	return ret;
17424}
17425
17426static void tg3_init_bufmgr_config(struct tg3 *tp)
17427{
17428	if (tg3_flag(tp, 57765_PLUS)) {
17429		tp->bufmgr_config.mbuf_read_dma_low_water =
17430			DEFAULT_MB_RDMA_LOW_WATER_5705;
17431		tp->bufmgr_config.mbuf_mac_rx_low_water =
17432			DEFAULT_MB_MACRX_LOW_WATER_57765;
17433		tp->bufmgr_config.mbuf_high_water =
17434			DEFAULT_MB_HIGH_WATER_57765;
17435
17436		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17437			DEFAULT_MB_RDMA_LOW_WATER_5705;
17438		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17439			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17440		tp->bufmgr_config.mbuf_high_water_jumbo =
17441			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17442	} else if (tg3_flag(tp, 5705_PLUS)) {
17443		tp->bufmgr_config.mbuf_read_dma_low_water =
17444			DEFAULT_MB_RDMA_LOW_WATER_5705;
17445		tp->bufmgr_config.mbuf_mac_rx_low_water =
17446			DEFAULT_MB_MACRX_LOW_WATER_5705;
17447		tp->bufmgr_config.mbuf_high_water =
17448			DEFAULT_MB_HIGH_WATER_5705;
17449		if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17450			tp->bufmgr_config.mbuf_mac_rx_low_water =
17451				DEFAULT_MB_MACRX_LOW_WATER_5906;
17452			tp->bufmgr_config.mbuf_high_water =
17453				DEFAULT_MB_HIGH_WATER_5906;
17454		}
17455
17456		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17457			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17458		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17459			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17460		tp->bufmgr_config.mbuf_high_water_jumbo =
17461			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17462	} else {
17463		tp->bufmgr_config.mbuf_read_dma_low_water =
17464			DEFAULT_MB_RDMA_LOW_WATER;
17465		tp->bufmgr_config.mbuf_mac_rx_low_water =
17466			DEFAULT_MB_MACRX_LOW_WATER;
17467		tp->bufmgr_config.mbuf_high_water =
17468			DEFAULT_MB_HIGH_WATER;
17469
17470		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17471			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17472		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17473			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17474		tp->bufmgr_config.mbuf_high_water_jumbo =
17475			DEFAULT_MB_HIGH_WATER_JUMBO;
17476	}
17477
17478	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17479	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17480}
17481
17482static char *tg3_phy_string(struct tg3 *tp)
17483{
17484	switch (tp->phy_id & TG3_PHY_ID_MASK) {
17485	case TG3_PHY_ID_BCM5400:	return "5400";
17486	case TG3_PHY_ID_BCM5401:	return "5401";
17487	case TG3_PHY_ID_BCM5411:	return "5411";
17488	case TG3_PHY_ID_BCM5701:	return "5701";
17489	case TG3_PHY_ID_BCM5703:	return "5703";
17490	case TG3_PHY_ID_BCM5704:	return "5704";
17491	case TG3_PHY_ID_BCM5705:	return "5705";
17492	case TG3_PHY_ID_BCM5750:	return "5750";
17493	case TG3_PHY_ID_BCM5752:	return "5752";
17494	case TG3_PHY_ID_BCM5714:	return "5714";
17495	case TG3_PHY_ID_BCM5780:	return "5780";
17496	case TG3_PHY_ID_BCM5755:	return "5755";
17497	case TG3_PHY_ID_BCM5787:	return "5787";
17498	case TG3_PHY_ID_BCM5784:	return "5784";
17499	case TG3_PHY_ID_BCM5756:	return "5722/5756";
17500	case TG3_PHY_ID_BCM5906:	return "5906";
17501	case TG3_PHY_ID_BCM5761:	return "5761";
17502	case TG3_PHY_ID_BCM5718C:	return "5718C";
17503	case TG3_PHY_ID_BCM5718S:	return "5718S";
17504	case TG3_PHY_ID_BCM57765:	return "57765";
17505	case TG3_PHY_ID_BCM5719C:	return "5719C";
17506	case TG3_PHY_ID_BCM5720C:	return "5720C";
17507	case TG3_PHY_ID_BCM5762:	return "5762C";
17508	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
17509	case 0:			return "serdes";
17510	default:		return "unknown";
17511	}
17512}
17513
17514static char *tg3_bus_string(struct tg3 *tp, char *str)
17515{
17516	if (tg3_flag(tp, PCI_EXPRESS)) {
17517		strcpy(str, "PCI Express");
17518		return str;
17519	} else if (tg3_flag(tp, PCIX_MODE)) {
17520		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17521
17522		strcpy(str, "PCIX:");
17523
17524		if ((clock_ctrl == 7) ||
17525		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17526		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17527			strcat(str, "133MHz");
17528		else if (clock_ctrl == 0)
17529			strcat(str, "33MHz");
17530		else if (clock_ctrl == 2)
17531			strcat(str, "50MHz");
17532		else if (clock_ctrl == 4)
17533			strcat(str, "66MHz");
17534		else if (clock_ctrl == 6)
17535			strcat(str, "100MHz");
17536	} else {
17537		strcpy(str, "PCI:");
17538		if (tg3_flag(tp, PCI_HIGH_SPEED))
17539			strcat(str, "66MHz");
17540		else
17541			strcat(str, "33MHz");
17542	}
17543	if (tg3_flag(tp, PCI_32BIT))
17544		strcat(str, ":32-bit");
17545	else
17546		strcat(str, ":64-bit");
17547	return str;
17548}
17549
17550static void tg3_init_coal(struct tg3 *tp)
17551{
17552	struct ethtool_coalesce *ec = &tp->coal;
17553
17554	memset(ec, 0, sizeof(*ec));
17555	ec->cmd = ETHTOOL_GCOALESCE;
17556	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17557	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17558	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17559	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17560	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17561	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17562	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17563	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17564	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17565
17566	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17567				 HOSTCC_MODE_CLRTICK_TXBD)) {
17568		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17569		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17570		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17571		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17572	}
17573
17574	if (tg3_flag(tp, 5705_PLUS)) {
17575		ec->rx_coalesce_usecs_irq = 0;
17576		ec->tx_coalesce_usecs_irq = 0;
17577		ec->stats_block_coalesce_usecs = 0;
17578	}
17579}
17580
17581static int tg3_init_one(struct pci_dev *pdev,
17582				  const struct pci_device_id *ent)
17583{
17584	struct net_device *dev;
17585	struct tg3 *tp;
17586	int i, err;
17587	u32 sndmbx, rcvmbx, intmbx;
17588	char str[40];
17589	u64 dma_mask, persist_dma_mask;
17590	netdev_features_t features = 0;
17591
17592	printk_once(KERN_INFO "%s\n", version);
17593
17594	err = pci_enable_device(pdev);
17595	if (err) {
17596		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17597		return err;
17598	}
17599
17600	err = pci_request_regions(pdev, DRV_MODULE_NAME);
17601	if (err) {
17602		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17603		goto err_out_disable_pdev;
17604	}
17605
17606	pci_set_master(pdev);
17607
17608	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17609	if (!dev) {
17610		err = -ENOMEM;
17611		goto err_out_free_res;
17612	}
17613
17614	SET_NETDEV_DEV(dev, &pdev->dev);
17615
17616	tp = netdev_priv(dev);
17617	tp->pdev = pdev;
17618	tp->dev = dev;
17619	tp->rx_mode = TG3_DEF_RX_MODE;
17620	tp->tx_mode = TG3_DEF_TX_MODE;
17621	tp->irq_sync = 1;
17622	tp->pcierr_recovery = false;
17623
17624	if (tg3_debug > 0)
17625		tp->msg_enable = tg3_debug;
17626	else
17627		tp->msg_enable = TG3_DEF_MSG_ENABLE;
17628
17629	if (pdev_is_ssb_gige_core(pdev)) {
17630		tg3_flag_set(tp, IS_SSB_CORE);
17631		if (ssb_gige_must_flush_posted_writes(pdev))
17632			tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17633		if (ssb_gige_one_dma_at_once(pdev))
17634			tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17635		if (ssb_gige_have_roboswitch(pdev)) {
17636			tg3_flag_set(tp, USE_PHYLIB);
17637			tg3_flag_set(tp, ROBOSWITCH);
17638		}
17639		if (ssb_gige_is_rgmii(pdev))
17640			tg3_flag_set(tp, RGMII_MODE);
17641	}
17642
17643	/* The word/byte swap controls here control register access byte
17644	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17645	 * setting below.
17646	 */
17647	tp->misc_host_ctrl =
17648		MISC_HOST_CTRL_MASK_PCI_INT |
17649		MISC_HOST_CTRL_WORD_SWAP |
17650		MISC_HOST_CTRL_INDIR_ACCESS |
17651		MISC_HOST_CTRL_PCISTATE_RW;
17652
17653	/* The NONFRM (non-frame) byte/word swap controls take effect
17654	 * on descriptor entries, anything which isn't packet data.
17655	 *
17656	 * The StrongARM chips on the board (one for tx, one for rx)
17657	 * are running in big-endian mode.
17658	 */
17659	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17660			GRC_MODE_WSWAP_NONFRM_DATA);
17661#ifdef __BIG_ENDIAN
17662	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17663#endif
17664	spin_lock_init(&tp->lock);
17665	spin_lock_init(&tp->indirect_lock);
17666	INIT_WORK(&tp->reset_task, tg3_reset_task);
17667
17668	tp->regs = pci_ioremap_bar(pdev, BAR_0);
17669	if (!tp->regs) {
17670		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17671		err = -ENOMEM;
17672		goto err_out_free_dev;
17673	}
17674
17675	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17676	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17677	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17678	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17679	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17680	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17681	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17682	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17683	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17684	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57767 ||
17685	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57764 ||
17686	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17687	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17688	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727 ||
17689	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57787) {
17690		tg3_flag_set(tp, ENABLE_APE);
17691		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17692		if (!tp->aperegs) {
17693			dev_err(&pdev->dev,
17694				"Cannot map APE registers, aborting\n");
17695			err = -ENOMEM;
17696			goto err_out_iounmap;
17697		}
17698	}
17699
17700	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17701	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17702
17703	dev->ethtool_ops = &tg3_ethtool_ops;
17704	dev->watchdog_timeo = TG3_TX_TIMEOUT;
17705	dev->netdev_ops = &tg3_netdev_ops;
17706	dev->irq = pdev->irq;
17707
17708	err = tg3_get_invariants(tp, ent);
17709	if (err) {
17710		dev_err(&pdev->dev,
17711			"Problem fetching invariants of chip, aborting\n");
17712		goto err_out_apeunmap;
17713	}
17714
17715	/* The EPB bridge inside 5714, 5715, and 5780 and any
17716	 * device behind the EPB cannot support DMA addresses > 40-bit.
17717	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17718	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17719	 * do DMA address check in tg3_start_xmit().
17720	 */
17721	if (tg3_flag(tp, IS_5788))
17722		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17723	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17724		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17725#ifdef CONFIG_HIGHMEM
17726		dma_mask = DMA_BIT_MASK(64);
17727#endif
17728	} else
17729		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17730
17731	/* Configure DMA attributes. */
17732	if (dma_mask > DMA_BIT_MASK(32)) {
17733		err = pci_set_dma_mask(pdev, dma_mask);
17734		if (!err) {
17735			features |= NETIF_F_HIGHDMA;
17736			err = pci_set_consistent_dma_mask(pdev,
17737							  persist_dma_mask);
17738			if (err < 0) {
17739				dev_err(&pdev->dev, "Unable to obtain 64 bit "
17740					"DMA for consistent allocations\n");
17741				goto err_out_apeunmap;
17742			}
17743		}
17744	}
17745	if (err || dma_mask == DMA_BIT_MASK(32)) {
17746		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17747		if (err) {
17748			dev_err(&pdev->dev,
17749				"No usable DMA configuration, aborting\n");
17750			goto err_out_apeunmap;
17751		}
17752	}
17753
17754	tg3_init_bufmgr_config(tp);
17755
17756	/* 5700 B0 chips do not support checksumming correctly due
17757	 * to hardware bugs.
17758	 */
17759	if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17760		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17761
17762		if (tg3_flag(tp, 5755_PLUS))
17763			features |= NETIF_F_IPV6_CSUM;
17764	}
17765
17766	/* TSO is on by default on chips that support hardware TSO.
17767	 * Firmware TSO on older chips gives lower performance, so it
17768	 * is off by default, but can be enabled using ethtool.
17769	 */
17770	if ((tg3_flag(tp, HW_TSO_1) ||
17771	     tg3_flag(tp, HW_TSO_2) ||
17772	     tg3_flag(tp, HW_TSO_3)) &&
17773	    (features & NETIF_F_IP_CSUM))
17774		features |= NETIF_F_TSO;
17775	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17776		if (features & NETIF_F_IPV6_CSUM)
17777			features |= NETIF_F_TSO6;
17778		if (tg3_flag(tp, HW_TSO_3) ||
17779		    tg3_asic_rev(tp) == ASIC_REV_5761 ||
17780		    (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17781		     tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17782		    tg3_asic_rev(tp) == ASIC_REV_5785 ||
17783		    tg3_asic_rev(tp) == ASIC_REV_57780)
17784			features |= NETIF_F_TSO_ECN;
17785	}
17786
17787	dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17788			 NETIF_F_HW_VLAN_CTAG_RX;
17789	dev->vlan_features |= features;
17790
17791	/*
17792	 * Add loopback capability only for a subset of devices that support
17793	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17794	 * loopback for the remaining devices.
17795	 */
17796	if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17797	    !tg3_flag(tp, CPMU_PRESENT))
17798		/* Add the loopback capability */
17799		features |= NETIF_F_LOOPBACK;
17800
17801	dev->hw_features |= features;
17802	dev->priv_flags |= IFF_UNICAST_FLT;
17803
17804	if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17805	    !tg3_flag(tp, TSO_CAPABLE) &&
17806	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17807		tg3_flag_set(tp, MAX_RXPEND_64);
17808		tp->rx_pending = 63;
17809	}
17810
17811	err = tg3_get_device_address(tp);
17812	if (err) {
17813		dev_err(&pdev->dev,
17814			"Could not obtain valid ethernet address, aborting\n");
17815		goto err_out_apeunmap;
17816	}
17817
17818	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17819	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17820	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17821	for (i = 0; i < tp->irq_max; i++) {
17822		struct tg3_napi *tnapi = &tp->napi[i];
17823
17824		tnapi->tp = tp;
17825		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17826
17827		tnapi->int_mbox = intmbx;
17828		if (i <= 4)
17829			intmbx += 0x8;
17830		else
17831			intmbx += 0x4;
17832
17833		tnapi->consmbox = rcvmbx;
17834		tnapi->prodmbox = sndmbx;
17835
17836		if (i)
17837			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17838		else
17839			tnapi->coal_now = HOSTCC_MODE_NOW;
17840
17841		if (!tg3_flag(tp, SUPPORT_MSIX))
17842			break;
17843
17844		/*
17845		 * If we support MSIX, we'll be using RSS.  If we're using
17846		 * RSS, the first vector only handles link interrupts and the
17847		 * remaining vectors handle rx and tx interrupts.  Reuse the
17848		 * mailbox values for the next iteration.  The values we setup
17849		 * above are still useful for the single vectored mode.
17850		 */
17851		if (!i)
17852			continue;
17853
17854		rcvmbx += 0x8;
17855
17856		if (sndmbx & 0x4)
17857			sndmbx -= 0x4;
17858		else
17859			sndmbx += 0xc;
17860	}
17861
17862	/*
17863	 * Reset chip in case UNDI or EFI driver did not shutdown
17864	 * DMA self test will enable WDMAC and we'll see (spurious)
17865	 * pending DMA on the PCI bus at that point.
17866	 */
17867	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17868	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17869		tg3_full_lock(tp, 0);
17870		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17871		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17872		tg3_full_unlock(tp);
17873	}
17874
17875	err = tg3_test_dma(tp);
17876	if (err) {
17877		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17878		goto err_out_apeunmap;
17879	}
17880
17881	tg3_init_coal(tp);
17882
17883	pci_set_drvdata(pdev, dev);
17884
17885	if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17886	    tg3_asic_rev(tp) == ASIC_REV_5720 ||
17887	    tg3_asic_rev(tp) == ASIC_REV_5762)
17888		tg3_flag_set(tp, PTP_CAPABLE);
17889
17890	tg3_timer_init(tp);
17891
17892	tg3_carrier_off(tp);
17893
17894	err = register_netdev(dev);
17895	if (err) {
17896		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17897		goto err_out_apeunmap;
17898	}
17899
17900	if (tg3_flag(tp, PTP_CAPABLE)) {
17901		tg3_ptp_init(tp);
17902		tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
17903						   &tp->pdev->dev);
17904		if (IS_ERR(tp->ptp_clock))
17905			tp->ptp_clock = NULL;
17906	}
17907
17908	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17909		    tp->board_part_number,
17910		    tg3_chip_rev_id(tp),
17911		    tg3_bus_string(tp, str),
17912		    dev->dev_addr);
17913
17914	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17915		struct phy_device *phydev;
17916		phydev = tp->mdio_bus->phy_map[tp->phy_addr];
17917		netdev_info(dev,
17918			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17919			    phydev->drv->name, dev_name(&phydev->dev));
17920	} else {
17921		char *ethtype;
17922
17923		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17924			ethtype = "10/100Base-TX";
17925		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17926			ethtype = "1000Base-SX";
17927		else
17928			ethtype = "10/100/1000Base-T";
17929
17930		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17931			    "(WireSpeed[%d], EEE[%d])\n",
17932			    tg3_phy_string(tp), ethtype,
17933			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17934			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17935	}
17936
17937	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17938		    (dev->features & NETIF_F_RXCSUM) != 0,
17939		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
17940		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17941		    tg3_flag(tp, ENABLE_ASF) != 0,
17942		    tg3_flag(tp, TSO_CAPABLE) != 0);
17943	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17944		    tp->dma_rwctrl,
17945		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17946		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17947
17948	pci_save_state(pdev);
17949
17950	return 0;
17951
17952err_out_apeunmap:
17953	if (tp->aperegs) {
17954		iounmap(tp->aperegs);
17955		tp->aperegs = NULL;
17956	}
17957
17958err_out_iounmap:
17959	if (tp->regs) {
17960		iounmap(tp->regs);
17961		tp->regs = NULL;
17962	}
17963
17964err_out_free_dev:
17965	free_netdev(dev);
17966
17967err_out_free_res:
17968	pci_release_regions(pdev);
17969
17970err_out_disable_pdev:
17971	if (pci_is_enabled(pdev))
17972		pci_disable_device(pdev);
17973	return err;
17974}
17975
17976static void tg3_remove_one(struct pci_dev *pdev)
17977{
17978	struct net_device *dev = pci_get_drvdata(pdev);
17979
17980	if (dev) {
17981		struct tg3 *tp = netdev_priv(dev);
17982
17983		tg3_ptp_fini(tp);
17984
17985		release_firmware(tp->fw);
17986
17987		tg3_reset_task_cancel(tp);
17988
17989		if (tg3_flag(tp, USE_PHYLIB)) {
17990			tg3_phy_fini(tp);
17991			tg3_mdio_fini(tp);
17992		}
17993
17994		unregister_netdev(dev);
17995		if (tp->aperegs) {
17996			iounmap(tp->aperegs);
17997			tp->aperegs = NULL;
17998		}
17999		if (tp->regs) {
18000			iounmap(tp->regs);
18001			tp->regs = NULL;
18002		}
18003		free_netdev(dev);
18004		pci_release_regions(pdev);
18005		pci_disable_device(pdev);
18006	}
18007}
18008
18009#ifdef CONFIG_PM_SLEEP
18010static int tg3_suspend(struct device *device)
18011{
18012	struct pci_dev *pdev = to_pci_dev(device);
18013	struct net_device *dev = pci_get_drvdata(pdev);
18014	struct tg3 *tp = netdev_priv(dev);
18015	int err = 0;
18016
18017	rtnl_lock();
18018
18019	if (!netif_running(dev))
18020		goto unlock;
18021
18022	tg3_reset_task_cancel(tp);
18023	tg3_phy_stop(tp);
18024	tg3_netif_stop(tp);
18025
18026	tg3_timer_stop(tp);
18027
18028	tg3_full_lock(tp, 1);
18029	tg3_disable_ints(tp);
18030	tg3_full_unlock(tp);
18031
18032	netif_device_detach(dev);
18033
18034	tg3_full_lock(tp, 0);
18035	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
18036	tg3_flag_clear(tp, INIT_COMPLETE);
18037	tg3_full_unlock(tp);
18038
18039	err = tg3_power_down_prepare(tp);
18040	if (err) {
18041		int err2;
18042
18043		tg3_full_lock(tp, 0);
18044
18045		tg3_flag_set(tp, INIT_COMPLETE);
18046		err2 = tg3_restart_hw(tp, true);
18047		if (err2)
18048			goto out;
18049
18050		tg3_timer_start(tp);
18051
18052		netif_device_attach(dev);
18053		tg3_netif_start(tp);
18054
18055out:
18056		tg3_full_unlock(tp);
18057
18058		if (!err2)
18059			tg3_phy_start(tp);
18060	}
18061
18062unlock:
18063	rtnl_unlock();
18064	return err;
18065}
18066
18067static int tg3_resume(struct device *device)
18068{
18069	struct pci_dev *pdev = to_pci_dev(device);
18070	struct net_device *dev = pci_get_drvdata(pdev);
18071	struct tg3 *tp = netdev_priv(dev);
18072	int err = 0;
18073
18074	rtnl_lock();
18075
18076	if (!netif_running(dev))
18077		goto unlock;
18078
18079	netif_device_attach(dev);
18080
18081	tg3_full_lock(tp, 0);
18082
18083	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18084
18085	tg3_flag_set(tp, INIT_COMPLETE);
18086	err = tg3_restart_hw(tp,
18087			     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
18088	if (err)
18089		goto out;
18090
18091	tg3_timer_start(tp);
18092
18093	tg3_netif_start(tp);
18094
18095out:
18096	tg3_full_unlock(tp);
18097
18098	if (!err)
18099		tg3_phy_start(tp);
18100
18101unlock:
18102	rtnl_unlock();
18103	return err;
18104}
18105#endif /* CONFIG_PM_SLEEP */
18106
18107static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
18108
18109static void tg3_shutdown(struct pci_dev *pdev)
18110{
18111	struct net_device *dev = pci_get_drvdata(pdev);
18112	struct tg3 *tp = netdev_priv(dev);
18113
18114	rtnl_lock();
18115	netif_device_detach(dev);
18116
18117	if (netif_running(dev))
18118		dev_close(dev);
18119
18120	if (system_state == SYSTEM_POWER_OFF)
18121		tg3_power_down(tp);
18122
18123	rtnl_unlock();
18124}
18125
18126/**
18127 * tg3_io_error_detected - called when PCI error is detected
18128 * @pdev: Pointer to PCI device
18129 * @state: The current pci connection state
18130 *
18131 * This function is called after a PCI bus error affecting
18132 * this device has been detected.
18133 */
18134static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
18135					      pci_channel_state_t state)
18136{
18137	struct net_device *netdev = pci_get_drvdata(pdev);
18138	struct tg3 *tp = netdev_priv(netdev);
18139	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
18140
18141	netdev_info(netdev, "PCI I/O error detected\n");
18142
18143	rtnl_lock();
18144
18145	/* We needn't recover from permanent error */
18146	if (state == pci_channel_io_frozen)
18147		tp->pcierr_recovery = true;
18148
18149	/* We probably don't have netdev yet */
18150	if (!netdev || !netif_running(netdev))
18151		goto done;
18152
18153	tg3_phy_stop(tp);
18154
18155	tg3_netif_stop(tp);
18156
18157	tg3_timer_stop(tp);
18158
18159	/* Want to make sure that the reset task doesn't run */
18160	tg3_reset_task_cancel(tp);
18161
18162	netif_device_detach(netdev);
18163
18164	/* Clean up software state, even if MMIO is blocked */
18165	tg3_full_lock(tp, 0);
18166	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
18167	tg3_full_unlock(tp);
18168
18169done:
18170	if (state == pci_channel_io_perm_failure) {
18171		if (netdev) {
18172			tg3_napi_enable(tp);
18173			dev_close(netdev);
18174		}
18175		err = PCI_ERS_RESULT_DISCONNECT;
18176	} else {
18177		pci_disable_device(pdev);
18178	}
18179
18180	rtnl_unlock();
18181
18182	return err;
18183}
18184
18185/**
18186 * tg3_io_slot_reset - called after the pci bus has been reset.
18187 * @pdev: Pointer to PCI device
18188 *
18189 * Restart the card from scratch, as if from a cold-boot.
18190 * At this point, the card has exprienced a hard reset,
18191 * followed by fixups by BIOS, and has its config space
18192 * set up identically to what it was at cold boot.
18193 */
18194static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
18195{
18196	struct net_device *netdev = pci_get_drvdata(pdev);
18197	struct tg3 *tp = netdev_priv(netdev);
18198	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
18199	int err;
18200
18201	rtnl_lock();
18202
18203	if (pci_enable_device(pdev)) {
18204		dev_err(&pdev->dev,
18205			"Cannot re-enable PCI device after reset.\n");
18206		goto done;
18207	}
18208
18209	pci_set_master(pdev);
18210	pci_restore_state(pdev);
18211	pci_save_state(pdev);
18212
18213	if (!netdev || !netif_running(netdev)) {
18214		rc = PCI_ERS_RESULT_RECOVERED;
18215		goto done;
18216	}
18217
18218	err = tg3_power_up(tp);
18219	if (err)
18220		goto done;
18221
18222	rc = PCI_ERS_RESULT_RECOVERED;
18223
18224done:
18225	if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
18226		tg3_napi_enable(tp);
18227		dev_close(netdev);
18228	}
18229	rtnl_unlock();
18230
18231	return rc;
18232}
18233
18234/**
18235 * tg3_io_resume - called when traffic can start flowing again.
18236 * @pdev: Pointer to PCI device
18237 *
18238 * This callback is called when the error recovery driver tells
18239 * us that its OK to resume normal operation.
18240 */
18241static void tg3_io_resume(struct pci_dev *pdev)
18242{
18243	struct net_device *netdev = pci_get_drvdata(pdev);
18244	struct tg3 *tp = netdev_priv(netdev);
18245	int err;
18246
18247	rtnl_lock();
18248
18249	if (!netif_running(netdev))
18250		goto done;
18251
18252	tg3_full_lock(tp, 0);
18253	tg3_ape_driver_state_change(tp, RESET_KIND_INIT);
18254	tg3_flag_set(tp, INIT_COMPLETE);
18255	err = tg3_restart_hw(tp, true);
18256	if (err) {
18257		tg3_full_unlock(tp);
18258		netdev_err(netdev, "Cannot restart hardware after reset.\n");
18259		goto done;
18260	}
18261
18262	netif_device_attach(netdev);
18263
18264	tg3_timer_start(tp);
18265
18266	tg3_netif_start(tp);
18267
18268	tg3_full_unlock(tp);
18269
18270	tg3_phy_start(tp);
18271
18272done:
18273	tp->pcierr_recovery = false;
18274	rtnl_unlock();
18275}
18276
18277static const struct pci_error_handlers tg3_err_handler = {
18278	.error_detected	= tg3_io_error_detected,
18279	.slot_reset	= tg3_io_slot_reset,
18280	.resume		= tg3_io_resume
18281};
18282
18283static struct pci_driver tg3_driver = {
18284	.name		= DRV_MODULE_NAME,
18285	.id_table	= tg3_pci_tbl,
18286	.probe		= tg3_init_one,
18287	.remove		= tg3_remove_one,
18288	.err_handler	= &tg3_err_handler,
18289	.driver.pm	= &tg3_pm_ops,
18290	.shutdown	= tg3_shutdown,
18291};
18292
18293module_pci_driver(tg3_driver);
18294