1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c)  2003-2013 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7#include <linux/delay.h>
8#include <linux/io.h>
9#include <linux/pci.h>
10#include <linux/ratelimit.h>
11#include "ql4_def.h"
12#include "ql4_glbl.h"
13#include "ql4_inline.h"
14
15#include <asm-generic/io-64-nonatomic-lo-hi.h>
16
17#define TIMEOUT_100_MS	100
18#define MASK(n)		DMA_BIT_MASK(n)
19#define MN_WIN(addr)	(((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
20#define OCM_WIN(addr)	(((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
21#define MS_WIN(addr)	(addr & 0x0ffc0000)
22#define QLA82XX_PCI_MN_2M	(0)
23#define QLA82XX_PCI_MS_2M	(0x80000)
24#define QLA82XX_PCI_OCM0_2M	(0xc0000)
25#define VALID_OCM_ADDR(addr)	(((addr) & 0x3f800) != 0x3f800)
26#define GET_MEM_OFFS_2M(addr)	(addr & MASK(18))
27
28/* CRB window related */
29#define CRB_BLK(off)	((off >> 20) & 0x3f)
30#define CRB_SUBBLK(off)	((off >> 16) & 0xf)
31#define CRB_WINDOW_2M	(0x130060)
32#define CRB_HI(off)	((qla4_82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \
33			((off) & 0xf0000))
34#define QLA82XX_PCI_CAMQM_2M_END	(0x04800800UL)
35#define QLA82XX_PCI_CAMQM_2M_BASE	(0x000ff800UL)
36#define CRB_INDIRECT_2M			(0x1e0000UL)
37
38static inline void __iomem *
39qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha, unsigned long off)
40{
41	if ((off < ha->first_page_group_end) &&
42	    (off >= ha->first_page_group_start))
43		return (void __iomem *)(ha->nx_pcibase + off);
44
45	return NULL;
46}
47
48#define MAX_CRB_XFORM 60
49static unsigned long crb_addr_xform[MAX_CRB_XFORM];
50static int qla4_8xxx_crb_table_initialized;
51
52#define qla4_8xxx_crb_addr_transform(name) \
53	(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
54	 QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
55static void
56qla4_82xx_crb_addr_transform_setup(void)
57{
58	qla4_8xxx_crb_addr_transform(XDMA);
59	qla4_8xxx_crb_addr_transform(TIMR);
60	qla4_8xxx_crb_addr_transform(SRE);
61	qla4_8xxx_crb_addr_transform(SQN3);
62	qla4_8xxx_crb_addr_transform(SQN2);
63	qla4_8xxx_crb_addr_transform(SQN1);
64	qla4_8xxx_crb_addr_transform(SQN0);
65	qla4_8xxx_crb_addr_transform(SQS3);
66	qla4_8xxx_crb_addr_transform(SQS2);
67	qla4_8xxx_crb_addr_transform(SQS1);
68	qla4_8xxx_crb_addr_transform(SQS0);
69	qla4_8xxx_crb_addr_transform(RPMX7);
70	qla4_8xxx_crb_addr_transform(RPMX6);
71	qla4_8xxx_crb_addr_transform(RPMX5);
72	qla4_8xxx_crb_addr_transform(RPMX4);
73	qla4_8xxx_crb_addr_transform(RPMX3);
74	qla4_8xxx_crb_addr_transform(RPMX2);
75	qla4_8xxx_crb_addr_transform(RPMX1);
76	qla4_8xxx_crb_addr_transform(RPMX0);
77	qla4_8xxx_crb_addr_transform(ROMUSB);
78	qla4_8xxx_crb_addr_transform(SN);
79	qla4_8xxx_crb_addr_transform(QMN);
80	qla4_8xxx_crb_addr_transform(QMS);
81	qla4_8xxx_crb_addr_transform(PGNI);
82	qla4_8xxx_crb_addr_transform(PGND);
83	qla4_8xxx_crb_addr_transform(PGN3);
84	qla4_8xxx_crb_addr_transform(PGN2);
85	qla4_8xxx_crb_addr_transform(PGN1);
86	qla4_8xxx_crb_addr_transform(PGN0);
87	qla4_8xxx_crb_addr_transform(PGSI);
88	qla4_8xxx_crb_addr_transform(PGSD);
89	qla4_8xxx_crb_addr_transform(PGS3);
90	qla4_8xxx_crb_addr_transform(PGS2);
91	qla4_8xxx_crb_addr_transform(PGS1);
92	qla4_8xxx_crb_addr_transform(PGS0);
93	qla4_8xxx_crb_addr_transform(PS);
94	qla4_8xxx_crb_addr_transform(PH);
95	qla4_8xxx_crb_addr_transform(NIU);
96	qla4_8xxx_crb_addr_transform(I2Q);
97	qla4_8xxx_crb_addr_transform(EG);
98	qla4_8xxx_crb_addr_transform(MN);
99	qla4_8xxx_crb_addr_transform(MS);
100	qla4_8xxx_crb_addr_transform(CAS2);
101	qla4_8xxx_crb_addr_transform(CAS1);
102	qla4_8xxx_crb_addr_transform(CAS0);
103	qla4_8xxx_crb_addr_transform(CAM);
104	qla4_8xxx_crb_addr_transform(C2C1);
105	qla4_8xxx_crb_addr_transform(C2C0);
106	qla4_8xxx_crb_addr_transform(SMB);
107	qla4_8xxx_crb_addr_transform(OCM0);
108	qla4_8xxx_crb_addr_transform(I2C0);
109
110	qla4_8xxx_crb_table_initialized = 1;
111}
112
113static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
114	{{{0, 0,         0,         0} } },		/* 0: PCI */
115	{{{1, 0x0100000, 0x0102000, 0x120000},	/* 1: PCIE */
116		{1, 0x0110000, 0x0120000, 0x130000},
117		{1, 0x0120000, 0x0122000, 0x124000},
118		{1, 0x0130000, 0x0132000, 0x126000},
119		{1, 0x0140000, 0x0142000, 0x128000},
120		{1, 0x0150000, 0x0152000, 0x12a000},
121		{1, 0x0160000, 0x0170000, 0x110000},
122		{1, 0x0170000, 0x0172000, 0x12e000},
123		{0, 0x0000000, 0x0000000, 0x000000},
124		{0, 0x0000000, 0x0000000, 0x000000},
125		{0, 0x0000000, 0x0000000, 0x000000},
126		{0, 0x0000000, 0x0000000, 0x000000},
127		{0, 0x0000000, 0x0000000, 0x000000},
128		{0, 0x0000000, 0x0000000, 0x000000},
129		{1, 0x01e0000, 0x01e0800, 0x122000},
130		{0, 0x0000000, 0x0000000, 0x000000} } },
131	{{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
132	{{{0, 0,         0,         0} } },	    /* 3: */
133	{{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
134	{{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE   */
135	{{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU   */
136	{{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM    */
137	{{{1, 0x0800000, 0x0802000, 0x170000},  /* 8: SQM0  */
138		{0, 0x0000000, 0x0000000, 0x000000},
139		{0, 0x0000000, 0x0000000, 0x000000},
140		{0, 0x0000000, 0x0000000, 0x000000},
141		{0, 0x0000000, 0x0000000, 0x000000},
142		{0, 0x0000000, 0x0000000, 0x000000},
143		{0, 0x0000000, 0x0000000, 0x000000},
144		{0, 0x0000000, 0x0000000, 0x000000},
145		{0, 0x0000000, 0x0000000, 0x000000},
146		{0, 0x0000000, 0x0000000, 0x000000},
147		{0, 0x0000000, 0x0000000, 0x000000},
148		{0, 0x0000000, 0x0000000, 0x000000},
149		{0, 0x0000000, 0x0000000, 0x000000},
150		{0, 0x0000000, 0x0000000, 0x000000},
151		{0, 0x0000000, 0x0000000, 0x000000},
152		{1, 0x08f0000, 0x08f2000, 0x172000} } },
153	{{{1, 0x0900000, 0x0902000, 0x174000},	/* 9: SQM1*/
154		{0, 0x0000000, 0x0000000, 0x000000},
155		{0, 0x0000000, 0x0000000, 0x000000},
156		{0, 0x0000000, 0x0000000, 0x000000},
157		{0, 0x0000000, 0x0000000, 0x000000},
158		{0, 0x0000000, 0x0000000, 0x000000},
159		{0, 0x0000000, 0x0000000, 0x000000},
160		{0, 0x0000000, 0x0000000, 0x000000},
161		{0, 0x0000000, 0x0000000, 0x000000},
162		{0, 0x0000000, 0x0000000, 0x000000},
163		{0, 0x0000000, 0x0000000, 0x000000},
164		{0, 0x0000000, 0x0000000, 0x000000},
165		{0, 0x0000000, 0x0000000, 0x000000},
166		{0, 0x0000000, 0x0000000, 0x000000},
167		{0, 0x0000000, 0x0000000, 0x000000},
168		{1, 0x09f0000, 0x09f2000, 0x176000} } },
169	{{{0, 0x0a00000, 0x0a02000, 0x178000},	/* 10: SQM2*/
170		{0, 0x0000000, 0x0000000, 0x000000},
171		{0, 0x0000000, 0x0000000, 0x000000},
172		{0, 0x0000000, 0x0000000, 0x000000},
173		{0, 0x0000000, 0x0000000, 0x000000},
174		{0, 0x0000000, 0x0000000, 0x000000},
175		{0, 0x0000000, 0x0000000, 0x000000},
176		{0, 0x0000000, 0x0000000, 0x000000},
177		{0, 0x0000000, 0x0000000, 0x000000},
178		{0, 0x0000000, 0x0000000, 0x000000},
179		{0, 0x0000000, 0x0000000, 0x000000},
180		{0, 0x0000000, 0x0000000, 0x000000},
181		{0, 0x0000000, 0x0000000, 0x000000},
182		{0, 0x0000000, 0x0000000, 0x000000},
183		{0, 0x0000000, 0x0000000, 0x000000},
184		{1, 0x0af0000, 0x0af2000, 0x17a000} } },
185	{{{0, 0x0b00000, 0x0b02000, 0x17c000},	/* 11: SQM3*/
186		{0, 0x0000000, 0x0000000, 0x000000},
187		{0, 0x0000000, 0x0000000, 0x000000},
188		{0, 0x0000000, 0x0000000, 0x000000},
189		{0, 0x0000000, 0x0000000, 0x000000},
190		{0, 0x0000000, 0x0000000, 0x000000},
191		{0, 0x0000000, 0x0000000, 0x000000},
192		{0, 0x0000000, 0x0000000, 0x000000},
193		{0, 0x0000000, 0x0000000, 0x000000},
194		{0, 0x0000000, 0x0000000, 0x000000},
195		{0, 0x0000000, 0x0000000, 0x000000},
196		{0, 0x0000000, 0x0000000, 0x000000},
197		{0, 0x0000000, 0x0000000, 0x000000},
198		{0, 0x0000000, 0x0000000, 0x000000},
199		{0, 0x0000000, 0x0000000, 0x000000},
200		{1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
201	{{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
202	{{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
203	{{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
204	{{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
205	{{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
206	{{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
207	{{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
208	{{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
209	{{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
210	{{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
211	{{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
212	{{{0, 0,         0,         0} } },	/* 23: */
213	{{{0, 0,         0,         0} } },	/* 24: */
214	{{{0, 0,         0,         0} } },	/* 25: */
215	{{{0, 0,         0,         0} } },	/* 26: */
216	{{{0, 0,         0,         0} } },	/* 27: */
217	{{{0, 0,         0,         0} } },	/* 28: */
218	{{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
219	{{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
220	{{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
221	{{{0} } },				/* 32: PCI */
222	{{{1, 0x2100000, 0x2102000, 0x120000},	/* 33: PCIE */
223		{1, 0x2110000, 0x2120000, 0x130000},
224		{1, 0x2120000, 0x2122000, 0x124000},
225		{1, 0x2130000, 0x2132000, 0x126000},
226		{1, 0x2140000, 0x2142000, 0x128000},
227		{1, 0x2150000, 0x2152000, 0x12a000},
228		{1, 0x2160000, 0x2170000, 0x110000},
229		{1, 0x2170000, 0x2172000, 0x12e000},
230		{0, 0x0000000, 0x0000000, 0x000000},
231		{0, 0x0000000, 0x0000000, 0x000000},
232		{0, 0x0000000, 0x0000000, 0x000000},
233		{0, 0x0000000, 0x0000000, 0x000000},
234		{0, 0x0000000, 0x0000000, 0x000000},
235		{0, 0x0000000, 0x0000000, 0x000000},
236		{0, 0x0000000, 0x0000000, 0x000000},
237		{0, 0x0000000, 0x0000000, 0x000000} } },
238	{{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
239	{{{0} } },				/* 35: */
240	{{{0} } },				/* 36: */
241	{{{0} } },				/* 37: */
242	{{{0} } },				/* 38: */
243	{{{0} } },				/* 39: */
244	{{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
245	{{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
246	{{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
247	{{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
248	{{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
249	{{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
250	{{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
251	{{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
252	{{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
253	{{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
254	{{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
255	{{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
256	{{{0} } },				/* 52: */
257	{{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
258	{{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
259	{{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
260	{{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
261	{{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
262	{{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
263	{{{0} } },				/* 59: I2C0 */
264	{{{0} } },				/* 60: I2C1 */
265	{{{1, 0x3d00000, 0x3d04000, 0x1dc000} } },/* 61: LPC */
266	{{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
267	{{{1, 0x3f00000, 0x3f01000, 0x168000} } }	/* 63: P2NR0 */
268};
269
270/*
271 * top 12 bits of crb internal address (hub, agent)
272 */
273static unsigned qla4_82xx_crb_hub_agt[64] = {
274	0,
275	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
276	QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
277	QLA82XX_HW_CRB_HUB_AGT_ADR_MS,
278	0,
279	QLA82XX_HW_CRB_HUB_AGT_ADR_SRE,
280	QLA82XX_HW_CRB_HUB_AGT_ADR_NIU,
281	QLA82XX_HW_CRB_HUB_AGT_ADR_QMN,
282	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0,
283	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1,
284	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2,
285	QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3,
286	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
287	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
288	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
289	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4,
290	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
291	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0,
292	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1,
293	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2,
294	QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3,
295	QLA82XX_HW_CRB_HUB_AGT_ADR_PGND,
296	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI,
297	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0,
298	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1,
299	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2,
300	QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3,
301	0,
302	QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI,
303	QLA82XX_HW_CRB_HUB_AGT_ADR_SN,
304	0,
305	QLA82XX_HW_CRB_HUB_AGT_ADR_EG,
306	0,
307	QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
308	QLA82XX_HW_CRB_HUB_AGT_ADR_CAM,
309	0,
310	0,
311	0,
312	0,
313	0,
314	QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR,
315	0,
316	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1,
317	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2,
318	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3,
319	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4,
320	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5,
321	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6,
322	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7,
323	QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA,
324	QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q,
325	QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB,
326	0,
327	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0,
328	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8,
329	QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9,
330	QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0,
331	0,
332	QLA82XX_HW_CRB_HUB_AGT_ADR_SMB,
333	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0,
334	QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1,
335	0,
336	QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC,
337	0,
338};
339
340/* Device states */
341static char *qdev_state[] = {
342	"Unknown",
343	"Cold",
344	"Initializing",
345	"Ready",
346	"Need Reset",
347	"Need Quiescent",
348	"Failed",
349	"Quiescent",
350};
351
352/*
353 * In: 'off' is offset from CRB space in 128M pci map
354 * Out: 'off' is 2M pci map addr
355 * side effect: lock crb window
356 */
357static void
358qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha, ulong *off)
359{
360	u32 win_read;
361
362	ha->crb_win = CRB_HI(*off);
363	writel(ha->crb_win,
364		(void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
365
366	/* Read back value to make sure write has gone through before trying
367	* to use it. */
368	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
369	if (win_read != ha->crb_win) {
370		DEBUG2(ql4_printk(KERN_INFO, ha,
371		    "%s: Written crbwin (0x%x) != Read crbwin (0x%x),"
372		    " off=0x%lx\n", __func__, ha->crb_win, win_read, *off));
373	}
374	*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
375}
376
377void
378qla4_82xx_wr_32(struct scsi_qla_host *ha, ulong off, u32 data)
379{
380	unsigned long flags = 0;
381	int rv;
382
383	rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
384
385	BUG_ON(rv == -1);
386
387	if (rv == 1) {
388		write_lock_irqsave(&ha->hw_lock, flags);
389		qla4_82xx_crb_win_lock(ha);
390		qla4_82xx_pci_set_crbwindow_2M(ha, &off);
391	}
392
393	writel(data, (void __iomem *)off);
394
395	if (rv == 1) {
396		qla4_82xx_crb_win_unlock(ha);
397		write_unlock_irqrestore(&ha->hw_lock, flags);
398	}
399}
400
401uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha, ulong off)
402{
403	unsigned long flags = 0;
404	int rv;
405	u32 data;
406
407	rv = qla4_82xx_pci_get_crb_addr_2M(ha, &off);
408
409	BUG_ON(rv == -1);
410
411	if (rv == 1) {
412		write_lock_irqsave(&ha->hw_lock, flags);
413		qla4_82xx_crb_win_lock(ha);
414		qla4_82xx_pci_set_crbwindow_2M(ha, &off);
415	}
416	data = readl((void __iomem *)off);
417
418	if (rv == 1) {
419		qla4_82xx_crb_win_unlock(ha);
420		write_unlock_irqrestore(&ha->hw_lock, flags);
421	}
422	return data;
423}
424
425/* Minidump related functions */
426int qla4_82xx_md_rd_32(struct scsi_qla_host *ha, uint32_t off, uint32_t *data)
427{
428	uint32_t win_read, off_value;
429	int rval = QLA_SUCCESS;
430
431	off_value  = off & 0xFFFF0000;
432	writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
433
434	/*
435	 * Read back value to make sure write has gone through before trying
436	 * to use it.
437	 */
438	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
439	if (win_read != off_value) {
440		DEBUG2(ql4_printk(KERN_INFO, ha,
441				  "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
442				  __func__, off_value, win_read, off));
443		rval = QLA_ERROR;
444	} else {
445		off_value  = off & 0x0000FFFF;
446		*data = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
447					       ha->nx_pcibase));
448	}
449	return rval;
450}
451
452int qla4_82xx_md_wr_32(struct scsi_qla_host *ha, uint32_t off, uint32_t data)
453{
454	uint32_t win_read, off_value;
455	int rval = QLA_SUCCESS;
456
457	off_value  = off & 0xFFFF0000;
458	writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
459
460	/* Read back value to make sure write has gone through before trying
461	 * to use it.
462	 */
463	win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
464	if (win_read != off_value) {
465		DEBUG2(ql4_printk(KERN_INFO, ha,
466				  "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
467				  __func__, off_value, win_read, off));
468		rval = QLA_ERROR;
469	} else {
470		off_value  = off & 0x0000FFFF;
471		writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
472					      ha->nx_pcibase));
473	}
474	return rval;
475}
476
477#define CRB_WIN_LOCK_TIMEOUT 100000000
478
479int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha)
480{
481	int i;
482	int done = 0, timeout = 0;
483
484	while (!done) {
485		/* acquire semaphore3 from PCI HW block */
486		done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK));
487		if (done == 1)
488			break;
489		if (timeout >= CRB_WIN_LOCK_TIMEOUT)
490			return -1;
491
492		timeout++;
493
494		/* Yield CPU */
495		if (!in_interrupt())
496			schedule();
497		else {
498			for (i = 0; i < 20; i++)
499				cpu_relax();    /*This a nop instr on i386*/
500		}
501	}
502	qla4_82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->func_num);
503	return 0;
504}
505
506void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha)
507{
508	qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK));
509}
510
511#define IDC_LOCK_TIMEOUT 100000000
512
513/**
514 * qla4_82xx_idc_lock - hw_lock
515 * @ha: pointer to adapter structure
516 *
517 * General purpose lock used to synchronize access to
518 * CRB_DEV_STATE, CRB_DEV_REF_COUNT, etc.
519 **/
520int qla4_82xx_idc_lock(struct scsi_qla_host *ha)
521{
522	int i;
523	int done = 0, timeout = 0;
524
525	while (!done) {
526		/* acquire semaphore5 from PCI HW block */
527		done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK));
528		if (done == 1)
529			break;
530		if (timeout >= IDC_LOCK_TIMEOUT)
531			return -1;
532
533		timeout++;
534
535		/* Yield CPU */
536		if (!in_interrupt())
537			schedule();
538		else {
539			for (i = 0; i < 20; i++)
540				cpu_relax();    /*This a nop instr on i386*/
541		}
542	}
543	return 0;
544}
545
546void qla4_82xx_idc_unlock(struct scsi_qla_host *ha)
547{
548	qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK));
549}
550
551int
552qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha, ulong *off)
553{
554	struct crb_128M_2M_sub_block_map *m;
555
556	if (*off >= QLA82XX_CRB_MAX)
557		return -1;
558
559	if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) {
560		*off = (*off - QLA82XX_PCI_CAMQM) +
561		    QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase;
562		return 0;
563	}
564
565	if (*off < QLA82XX_PCI_CRBSPACE)
566		return -1;
567
568	*off -= QLA82XX_PCI_CRBSPACE;
569	/*
570	 * Try direct map
571	 */
572
573	m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)];
574
575	if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) {
576		*off = *off + m->start_2M - m->start_128M + ha->nx_pcibase;
577		return 0;
578	}
579
580	/*
581	 * Not in direct map, use crb window
582	 */
583	return 1;
584}
585
586/*
587* check memory access boundary.
588* used by test agent. support ddr access only for now
589*/
590static unsigned long
591qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha,
592		unsigned long long addr, int size)
593{
594	if (!QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
595	    QLA8XXX_ADDR_DDR_NET_MAX) ||
596	    !QLA8XXX_ADDR_IN_RANGE(addr + size - 1,
597	    QLA8XXX_ADDR_DDR_NET, QLA8XXX_ADDR_DDR_NET_MAX) ||
598	    ((size != 1) && (size != 2) && (size != 4) && (size != 8))) {
599		return 0;
600	}
601	return 1;
602}
603
604static int qla4_82xx_pci_set_window_warning_count;
605
606static unsigned long
607qla4_82xx_pci_set_window(struct scsi_qla_host *ha, unsigned long long addr)
608{
609	int window;
610	u32 win_read;
611
612	if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
613	    QLA8XXX_ADDR_DDR_NET_MAX)) {
614		/* DDR network side */
615		window = MN_WIN(addr);
616		ha->ddr_mn_window = window;
617		qla4_82xx_wr_32(ha, ha->mn_win_crb |
618		    QLA82XX_PCI_CRBSPACE, window);
619		win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
620		    QLA82XX_PCI_CRBSPACE);
621		if ((win_read << 17) != window) {
622			ql4_printk(KERN_WARNING, ha,
623			"%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n",
624			__func__, window, win_read);
625		}
626		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET;
627	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
628				QLA8XXX_ADDR_OCM0_MAX)) {
629		unsigned int temp1;
630		/* if bits 19:18&17:11 are on */
631		if ((addr & 0x00ff800) == 0xff800) {
632			printk("%s: QM access not handled.\n", __func__);
633			addr = -1UL;
634		}
635
636		window = OCM_WIN(addr);
637		ha->ddr_mn_window = window;
638		qla4_82xx_wr_32(ha, ha->mn_win_crb |
639		    QLA82XX_PCI_CRBSPACE, window);
640		win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb |
641		    QLA82XX_PCI_CRBSPACE);
642		temp1 = ((window & 0x1FF) << 7) |
643		    ((window & 0x0FFFE0000) >> 17);
644		if (win_read != temp1) {
645			printk("%s: Written OCMwin (0x%x) != Read"
646			    " OCMwin (0x%x)\n", __func__, temp1, win_read);
647		}
648		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M;
649
650	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
651				QLA82XX_P3_ADDR_QDR_NET_MAX)) {
652		/* QDR network side */
653		window = MS_WIN(addr);
654		ha->qdr_sn_window = window;
655		qla4_82xx_wr_32(ha, ha->ms_win_crb |
656		    QLA82XX_PCI_CRBSPACE, window);
657		win_read = qla4_82xx_rd_32(ha,
658		     ha->ms_win_crb | QLA82XX_PCI_CRBSPACE);
659		if (win_read != window) {
660			printk("%s: Written MSwin (0x%x) != Read "
661			    "MSwin (0x%x)\n", __func__, window, win_read);
662		}
663		addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET;
664
665	} else {
666		/*
667		 * peg gdb frequently accesses memory that doesn't exist,
668		 * this limits the chit chat so debugging isn't slowed down.
669		 */
670		if ((qla4_82xx_pci_set_window_warning_count++ < 8) ||
671		    (qla4_82xx_pci_set_window_warning_count%64 == 0)) {
672			printk("%s: Warning:%s Unknown address range!\n",
673			    __func__, DRIVER_NAME);
674		}
675		addr = -1UL;
676	}
677	return addr;
678}
679
680/* check if address is in the same windows as the previous access */
681static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha,
682		unsigned long long addr)
683{
684	int window;
685	unsigned long long qdr_max;
686
687	qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX;
688
689	if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
690	    QLA8XXX_ADDR_DDR_NET_MAX)) {
691		/* DDR network side */
692		BUG();	/* MN access can not come here */
693	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM0,
694	     QLA8XXX_ADDR_OCM0_MAX)) {
695		return 1;
696	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_OCM1,
697	     QLA8XXX_ADDR_OCM1_MAX)) {
698		return 1;
699	} else if (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
700	    qdr_max)) {
701		/* QDR network side */
702		window = ((addr - QLA8XXX_ADDR_QDR_NET) >> 22) & 0x3f;
703		if (ha->qdr_sn_window == window)
704			return 1;
705	}
706
707	return 0;
708}
709
710static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha,
711		u64 off, void *data, int size)
712{
713	unsigned long flags;
714	void __iomem *addr;
715	int ret = 0;
716	u64 start;
717	void __iomem *mem_ptr = NULL;
718	unsigned long mem_base;
719	unsigned long mem_page;
720
721	write_lock_irqsave(&ha->hw_lock, flags);
722
723	/*
724	 * If attempting to access unknown address or straddle hw windows,
725	 * do not access.
726	 */
727	start = qla4_82xx_pci_set_window(ha, off);
728	if ((start == -1UL) ||
729	    (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
730		write_unlock_irqrestore(&ha->hw_lock, flags);
731		printk(KERN_ERR"%s out of bound pci memory access. "
732				"offset is 0x%llx\n", DRIVER_NAME, off);
733		return -1;
734	}
735
736	addr = qla4_8xxx_pci_base_offsetfset(ha, start);
737	if (!addr) {
738		write_unlock_irqrestore(&ha->hw_lock, flags);
739		mem_base = pci_resource_start(ha->pdev, 0);
740		mem_page = start & PAGE_MASK;
741		/* Map two pages whenever user tries to access addresses in two
742		   consecutive pages.
743		 */
744		if (mem_page != ((start + size - 1) & PAGE_MASK))
745			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
746		else
747			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
748
749		if (mem_ptr == NULL) {
750			*(u8 *)data = 0;
751			return -1;
752		}
753		addr = mem_ptr;
754		addr += start & (PAGE_SIZE - 1);
755		write_lock_irqsave(&ha->hw_lock, flags);
756	}
757
758	switch (size) {
759	case 1:
760		*(u8  *)data = readb(addr);
761		break;
762	case 2:
763		*(u16 *)data = readw(addr);
764		break;
765	case 4:
766		*(u32 *)data = readl(addr);
767		break;
768	case 8:
769		*(u64 *)data = readq(addr);
770		break;
771	default:
772		ret = -1;
773		break;
774	}
775	write_unlock_irqrestore(&ha->hw_lock, flags);
776
777	if (mem_ptr)
778		iounmap(mem_ptr);
779	return ret;
780}
781
782static int
783qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha, u64 off,
784		void *data, int size)
785{
786	unsigned long flags;
787	void __iomem *addr;
788	int ret = 0;
789	u64 start;
790	void __iomem *mem_ptr = NULL;
791	unsigned long mem_base;
792	unsigned long mem_page;
793
794	write_lock_irqsave(&ha->hw_lock, flags);
795
796	/*
797	 * If attempting to access unknown address or straddle hw windows,
798	 * do not access.
799	 */
800	start = qla4_82xx_pci_set_window(ha, off);
801	if ((start == -1UL) ||
802	    (qla4_82xx_pci_is_same_window(ha, off + size - 1) == 0)) {
803		write_unlock_irqrestore(&ha->hw_lock, flags);
804		printk(KERN_ERR"%s out of bound pci memory access. "
805				"offset is 0x%llx\n", DRIVER_NAME, off);
806		return -1;
807	}
808
809	addr = qla4_8xxx_pci_base_offsetfset(ha, start);
810	if (!addr) {
811		write_unlock_irqrestore(&ha->hw_lock, flags);
812		mem_base = pci_resource_start(ha->pdev, 0);
813		mem_page = start & PAGE_MASK;
814		/* Map two pages whenever user tries to access addresses in two
815		   consecutive pages.
816		 */
817		if (mem_page != ((start + size - 1) & PAGE_MASK))
818			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
819		else
820			mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
821		if (mem_ptr == NULL)
822			return -1;
823
824		addr = mem_ptr;
825		addr += start & (PAGE_SIZE - 1);
826		write_lock_irqsave(&ha->hw_lock, flags);
827	}
828
829	switch (size) {
830	case 1:
831		writeb(*(u8 *)data, addr);
832		break;
833	case 2:
834		writew(*(u16 *)data, addr);
835		break;
836	case 4:
837		writel(*(u32 *)data, addr);
838		break;
839	case 8:
840		writeq(*(u64 *)data, addr);
841		break;
842	default:
843		ret = -1;
844		break;
845	}
846	write_unlock_irqrestore(&ha->hw_lock, flags);
847	if (mem_ptr)
848		iounmap(mem_ptr);
849	return ret;
850}
851
852#define MTU_FUDGE_FACTOR 100
853
854static unsigned long
855qla4_82xx_decode_crb_addr(unsigned long addr)
856{
857	int i;
858	unsigned long base_addr, offset, pci_base;
859
860	if (!qla4_8xxx_crb_table_initialized)
861		qla4_82xx_crb_addr_transform_setup();
862
863	pci_base = ADDR_ERROR;
864	base_addr = addr & 0xfff00000;
865	offset = addr & 0x000fffff;
866
867	for (i = 0; i < MAX_CRB_XFORM; i++) {
868		if (crb_addr_xform[i] == base_addr) {
869			pci_base = i << 20;
870			break;
871		}
872	}
873	if (pci_base == ADDR_ERROR)
874		return pci_base;
875	else
876		return pci_base + offset;
877}
878
879static long rom_max_timeout = 100;
880static long qla4_82xx_rom_lock_timeout = 100;
881
882static int
883qla4_82xx_rom_lock(struct scsi_qla_host *ha)
884{
885	int i;
886	int done = 0, timeout = 0;
887
888	while (!done) {
889		/* acquire semaphore2 from PCI HW block */
890
891		done = qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
892		if (done == 1)
893			break;
894		if (timeout >= qla4_82xx_rom_lock_timeout)
895			return -1;
896
897		timeout++;
898
899		/* Yield CPU */
900		if (!in_interrupt())
901			schedule();
902		else {
903			for (i = 0; i < 20; i++)
904				cpu_relax();    /*This a nop instr on i386*/
905		}
906	}
907	qla4_82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
908	return 0;
909}
910
911static void
912qla4_82xx_rom_unlock(struct scsi_qla_host *ha)
913{
914	qla4_82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
915}
916
917static int
918qla4_82xx_wait_rom_done(struct scsi_qla_host *ha)
919{
920	long timeout = 0;
921	long done = 0 ;
922
923	while (done == 0) {
924		done = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS);
925		done &= 2;
926		timeout++;
927		if (timeout >= rom_max_timeout) {
928			printk("%s: Timeout reached  waiting for rom done",
929					DRIVER_NAME);
930			return -1;
931		}
932	}
933	return 0;
934}
935
936static int
937qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
938{
939	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr);
940	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
941	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3);
942	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0xb);
943	if (qla4_82xx_wait_rom_done(ha)) {
944		printk("%s: Error waiting for rom done\n", DRIVER_NAME);
945		return -1;
946	}
947	/* reset abyte_cnt and dummy_byte_cnt */
948	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
949	udelay(10);
950	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0);
951
952	*valp = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA);
953	return 0;
954}
955
956static int
957qla4_82xx_rom_fast_read(struct scsi_qla_host *ha, int addr, int *valp)
958{
959	int ret, loops = 0;
960
961	while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
962		udelay(100);
963		loops++;
964	}
965	if (loops >= 50000) {
966		ql4_printk(KERN_WARNING, ha, "%s: qla4_82xx_rom_lock failed\n",
967			   DRIVER_NAME);
968		return -1;
969	}
970	ret = qla4_82xx_do_rom_fast_read(ha, addr, valp);
971	qla4_82xx_rom_unlock(ha);
972	return ret;
973}
974
975/**
976 * This routine does CRB initialize sequence
977 * to put the ISP into operational state
978 **/
979static int
980qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
981{
982	int addr, val;
983	int i ;
984	struct crb_addr_pair *buf;
985	unsigned long off;
986	unsigned offset, n;
987
988	struct crb_addr_pair {
989		long addr;
990		long data;
991	};
992
993	/* Halt all the indiviual PEGs and other blocks of the ISP */
994	qla4_82xx_rom_lock(ha);
995
996	/* disable all I2Q */
997	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0);
998	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0);
999	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0);
1000	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0);
1001	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0);
1002	qla4_82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0);
1003
1004	/* disable all niu interrupts */
1005	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff);
1006	/* disable xge rx/tx */
1007	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00);
1008	/* disable xg1 rx/tx */
1009	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00);
1010	/* disable sideband mac */
1011	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00);
1012	/* disable ap0 mac */
1013	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00);
1014	/* disable ap1 mac */
1015	qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00);
1016
1017	/* halt sre */
1018	val = qla4_82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000);
1019	qla4_82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1)));
1020
1021	/* halt epg */
1022	qla4_82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1);
1023
1024	/* halt timers */
1025	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0);
1026	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0);
1027	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0);
1028	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0);
1029	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0);
1030	qla4_82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0);
1031
1032	/* halt pegs */
1033	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1);
1034	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1);
1035	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1);
1036	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1);
1037	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1);
1038	msleep(5);
1039
1040	/* big hammer */
1041	if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
1042		/* don't reset CAM block on reset */
1043		qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
1044	else
1045		qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
1046
1047	qla4_82xx_rom_unlock(ha);
1048
1049	/* Read the signature value from the flash.
1050	 * Offset 0: Contain signature (0xcafecafe)
1051	 * Offset 4: Offset and number of addr/value pairs
1052	 * that present in CRB initialize sequence
1053	 */
1054	if (qla4_82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL ||
1055	    qla4_82xx_rom_fast_read(ha, 4, &n) != 0) {
1056		ql4_printk(KERN_WARNING, ha,
1057			"[ERROR] Reading crb_init area: n: %08x\n", n);
1058		return -1;
1059	}
1060
1061	/* Offset in flash = lower 16 bits
1062	 * Number of enteries = upper 16 bits
1063	 */
1064	offset = n & 0xffffU;
1065	n = (n >> 16) & 0xffffU;
1066
1067	/* number of addr/value pair should not exceed 1024 enteries */
1068	if (n  >= 1024) {
1069		ql4_printk(KERN_WARNING, ha,
1070		    "%s: %s:n=0x%x [ERROR] Card flash not initialized.\n",
1071		    DRIVER_NAME, __func__, n);
1072		return -1;
1073	}
1074
1075	ql4_printk(KERN_INFO, ha,
1076		"%s: %d CRB init values found in ROM.\n", DRIVER_NAME, n);
1077
1078	buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL);
1079	if (buf == NULL) {
1080		ql4_printk(KERN_WARNING, ha,
1081		    "%s: [ERROR] Unable to malloc memory.\n", DRIVER_NAME);
1082		return -1;
1083	}
1084
1085	for (i = 0; i < n; i++) {
1086		if (qla4_82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 ||
1087		    qla4_82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) !=
1088		    0) {
1089			kfree(buf);
1090			return -1;
1091		}
1092
1093		buf[i].addr = addr;
1094		buf[i].data = val;
1095	}
1096
1097	for (i = 0; i < n; i++) {
1098		/* Translate internal CRB initialization
1099		 * address to PCI bus address
1100		 */
1101		off = qla4_82xx_decode_crb_addr((unsigned long)buf[i].addr) +
1102		    QLA82XX_PCI_CRBSPACE;
1103		/* Not all CRB  addr/value pair to be written,
1104		 * some of them are skipped
1105		 */
1106
1107		/* skip if LS bit is set*/
1108		if (off & 0x1) {
1109			DEBUG2(ql4_printk(KERN_WARNING, ha,
1110			    "Skip CRB init replay for offset = 0x%lx\n", off));
1111			continue;
1112		}
1113
1114		/* skipping cold reboot MAGIC */
1115		if (off == QLA82XX_CAM_RAM(0x1fc))
1116			continue;
1117
1118		/* do not reset PCI */
1119		if (off == (ROMUSB_GLB + 0xbc))
1120			continue;
1121
1122		/* skip core clock, so that firmware can increase the clock */
1123		if (off == (ROMUSB_GLB + 0xc8))
1124			continue;
1125
1126		/* skip the function enable register */
1127		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION))
1128			continue;
1129
1130		if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2))
1131			continue;
1132
1133		if ((off & 0x0ff00000) == QLA82XX_CRB_SMB)
1134			continue;
1135
1136		if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET)
1137			continue;
1138
1139		if (off == ADDR_ERROR) {
1140			ql4_printk(KERN_WARNING, ha,
1141			    "%s: [ERROR] Unknown addr: 0x%08lx\n",
1142			    DRIVER_NAME, buf[i].addr);
1143			continue;
1144		}
1145
1146		qla4_82xx_wr_32(ha, off, buf[i].data);
1147
1148		/* ISP requires much bigger delay to settle down,
1149		 * else crb_window returns 0xffffffff
1150		 */
1151		if (off == QLA82XX_ROMUSB_GLB_SW_RESET)
1152			msleep(1000);
1153
1154		/* ISP requires millisec delay between
1155		 * successive CRB register updation
1156		 */
1157		msleep(1);
1158	}
1159
1160	kfree(buf);
1161
1162	/* Resetting the data and instruction cache */
1163	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e);
1164	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8);
1165	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8);
1166
1167	/* Clear all protocol processing engines */
1168	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0);
1169	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0);
1170	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0);
1171	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0);
1172	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0);
1173	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0);
1174	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0);
1175	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0);
1176
1177	return 0;
1178}
1179
1180/**
1181 * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
1182 * @ha: Pointer to adapter structure
1183 * @addr: Flash address to write to
1184 * @data: Data to be written
1185 * @count: word_count to be written
1186 *
1187 * Return: On success return QLA_SUCCESS
1188 *         On error return QLA_ERROR
1189 **/
1190int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
1191				uint32_t *data, uint32_t count)
1192{
1193	int i, j;
1194	uint32_t agt_ctrl;
1195	unsigned long flags;
1196	int ret_val = QLA_SUCCESS;
1197
1198	/* Only 128-bit aligned access */
1199	if (addr & 0xF) {
1200		ret_val = QLA_ERROR;
1201		goto exit_ms_mem_write;
1202	}
1203
1204	write_lock_irqsave(&ha->hw_lock, flags);
1205
1206	/* Write address */
1207	ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
1208	if (ret_val == QLA_ERROR) {
1209		ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
1210			   __func__);
1211		goto exit_ms_mem_write_unlock;
1212	}
1213
1214	for (i = 0; i < count; i++, addr += 16) {
1215		if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
1216					     QLA8XXX_ADDR_QDR_NET_MAX)) ||
1217		      (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
1218					     QLA8XXX_ADDR_DDR_NET_MAX)))) {
1219			ret_val = QLA_ERROR;
1220			goto exit_ms_mem_write_unlock;
1221		}
1222
1223		ret_val = ha->isp_ops->wr_reg_indirect(ha,
1224						       MD_MIU_TEST_AGT_ADDR_LO,
1225						       addr);
1226		/* Write data */
1227		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1228						MD_MIU_TEST_AGT_WRDATA_LO,
1229						*data++);
1230		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1231						MD_MIU_TEST_AGT_WRDATA_HI,
1232						*data++);
1233		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1234						MD_MIU_TEST_AGT_WRDATA_ULO,
1235						*data++);
1236		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1237						MD_MIU_TEST_AGT_WRDATA_UHI,
1238						*data++);
1239		if (ret_val == QLA_ERROR) {
1240			ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
1241				   __func__);
1242			goto exit_ms_mem_write_unlock;
1243		}
1244
1245		/* Check write status */
1246		ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
1247						       MIU_TA_CTL_WRITE_ENABLE);
1248		ret_val |= ha->isp_ops->wr_reg_indirect(ha,
1249							MD_MIU_TEST_AGT_CTRL,
1250							MIU_TA_CTL_WRITE_START);
1251		if (ret_val == QLA_ERROR) {
1252			ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
1253				   __func__);
1254			goto exit_ms_mem_write_unlock;
1255		}
1256
1257		for (j = 0; j < MAX_CTL_CHECK; j++) {
1258			ret_val = ha->isp_ops->rd_reg_indirect(ha,
1259							MD_MIU_TEST_AGT_CTRL,
1260							&agt_ctrl);
1261			if (ret_val == QLA_ERROR) {
1262				ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
1263					   __func__);
1264				goto exit_ms_mem_write_unlock;
1265			}
1266			if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
1267				break;
1268		}
1269
1270		/* Status check failed */
1271		if (j >= MAX_CTL_CHECK) {
1272			printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
1273					   __func__);
1274			ret_val = QLA_ERROR;
1275			goto exit_ms_mem_write_unlock;
1276		}
1277	}
1278
1279exit_ms_mem_write_unlock:
1280	write_unlock_irqrestore(&ha->hw_lock, flags);
1281
1282exit_ms_mem_write:
1283	return ret_val;
1284}
1285
1286static int
1287qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
1288{
1289	int  i, rval = 0;
1290	long size = 0;
1291	long flashaddr, memaddr;
1292	u64 data;
1293	u32 high, low;
1294
1295	flashaddr = memaddr = ha->hw.flt_region_bootload;
1296	size = (image_start - flashaddr) / 8;
1297
1298	DEBUG2(printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n",
1299	    ha->host_no, __func__, flashaddr, image_start));
1300
1301	for (i = 0; i < size; i++) {
1302		if ((qla4_82xx_rom_fast_read(ha, flashaddr, (int *)&low)) ||
1303		    (qla4_82xx_rom_fast_read(ha, flashaddr + 4,
1304		    (int *)&high))) {
1305			rval = -1;
1306			goto exit_load_from_flash;
1307		}
1308		data = ((u64)high << 32) | low ;
1309		rval = qla4_82xx_pci_mem_write_2M(ha, memaddr, &data, 8);
1310		if (rval)
1311			goto exit_load_from_flash;
1312
1313		flashaddr += 8;
1314		memaddr   += 8;
1315
1316		if (i % 0x1000 == 0)
1317			msleep(1);
1318
1319	}
1320
1321	udelay(100);
1322
1323	read_lock(&ha->hw_lock);
1324	qla4_82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020);
1325	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e);
1326	read_unlock(&ha->hw_lock);
1327
1328exit_load_from_flash:
1329	return rval;
1330}
1331
1332static int qla4_82xx_load_fw(struct scsi_qla_host *ha, uint32_t image_start)
1333{
1334	u32 rst;
1335
1336	qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1337	if (qla4_82xx_pinit_from_rom(ha, 0) != QLA_SUCCESS) {
1338		printk(KERN_WARNING "%s: Error during CRB Initialization\n",
1339		    __func__);
1340		return QLA_ERROR;
1341	}
1342
1343	udelay(500);
1344
1345	/* at this point, QM is in reset. This could be a problem if there are
1346	 * incoming d* transition queue messages. QM/PCIE could wedge.
1347	 * To get around this, QM is brought out of reset.
1348	 */
1349
1350	rst = qla4_82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET);
1351	/* unreset qm */
1352	rst &= ~(1 << 28);
1353	qla4_82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst);
1354
1355	if (qla4_82xx_load_from_flash(ha, image_start)) {
1356		printk("%s: Error trying to load fw from flash!\n", __func__);
1357		return QLA_ERROR;
1358	}
1359
1360	return QLA_SUCCESS;
1361}
1362
1363int
1364qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha,
1365		u64 off, void *data, int size)
1366{
1367	int i, j = 0, k, start, end, loop, sz[2], off0[2];
1368	int shift_amount;
1369	uint32_t temp;
1370	uint64_t off8, val, mem_crb, word[2] = {0, 0};
1371
1372	/*
1373	 * If not MN, go check for MS or invalid.
1374	 */
1375
1376	if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1377		mem_crb = QLA82XX_CRB_QDR_NET;
1378	else {
1379		mem_crb = QLA82XX_CRB_DDR_NET;
1380		if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1381			return qla4_82xx_pci_mem_read_direct(ha,
1382					off, data, size);
1383	}
1384
1385
1386	off8 = off & 0xfffffff0;
1387	off0[0] = off & 0xf;
1388	sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]);
1389	shift_amount = 4;
1390
1391	loop = ((off0[0] + size - 1) >> shift_amount) + 1;
1392	off0[1] = 0;
1393	sz[1] = size - sz[0];
1394
1395	for (i = 0; i < loop; i++) {
1396		temp = off8 + (i << shift_amount);
1397		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp);
1398		temp = 0;
1399		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp);
1400		temp = MIU_TA_CTL_ENABLE;
1401		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1402		temp = MIU_TA_CTL_START_ENABLE;
1403		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp);
1404
1405		for (j = 0; j < MAX_CTL_CHECK; j++) {
1406			temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1407			if ((temp & MIU_TA_CTL_BUSY) == 0)
1408				break;
1409		}
1410
1411		if (j >= MAX_CTL_CHECK) {
1412			printk_ratelimited(KERN_ERR
1413					   "%s: failed to read through agent\n",
1414					   __func__);
1415			break;
1416		}
1417
1418		start = off0[i] >> 2;
1419		end   = (off0[i] + sz[i] - 1) >> 2;
1420		for (k = start; k <= end; k++) {
1421			temp = qla4_82xx_rd_32(ha,
1422				mem_crb + MIU_TEST_AGT_RDDATA(k));
1423			word[i] |= ((uint64_t)temp << (32 * (k & 1)));
1424		}
1425	}
1426
1427	if (j >= MAX_CTL_CHECK)
1428		return -1;
1429
1430	if ((off0[0] & 7) == 0) {
1431		val = word[0];
1432	} else {
1433		val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1434		((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1435	}
1436
1437	switch (size) {
1438	case 1:
1439		*(uint8_t  *)data = val;
1440		break;
1441	case 2:
1442		*(uint16_t *)data = val;
1443		break;
1444	case 4:
1445		*(uint32_t *)data = val;
1446		break;
1447	case 8:
1448		*(uint64_t *)data = val;
1449		break;
1450	}
1451	return 0;
1452}
1453
1454int
1455qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha,
1456		u64 off, void *data, int size)
1457{
1458	int i, j, ret = 0, loop, sz[2], off0;
1459	int scale, shift_amount, startword;
1460	uint32_t temp;
1461	uint64_t off8, mem_crb, tmpw, word[2] = {0, 0};
1462
1463	/*
1464	 * If not MN, go check for MS or invalid.
1465	 */
1466	if (off >= QLA8XXX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX)
1467		mem_crb = QLA82XX_CRB_QDR_NET;
1468	else {
1469		mem_crb = QLA82XX_CRB_DDR_NET;
1470		if (qla4_82xx_pci_mem_bound_check(ha, off, size) == 0)
1471			return qla4_82xx_pci_mem_write_direct(ha,
1472					off, data, size);
1473	}
1474
1475	off0 = off & 0x7;
1476	sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1477	sz[1] = size - sz[0];
1478
1479	off8 = off & 0xfffffff0;
1480	loop = (((off & 0xf) + size - 1) >> 4) + 1;
1481	shift_amount = 4;
1482	scale = 2;
1483	startword = (off & 0xf)/8;
1484
1485	for (i = 0; i < loop; i++) {
1486		if (qla4_82xx_pci_mem_read_2M(ha, off8 +
1487		    (i << shift_amount), &word[i * scale], 8))
1488			return -1;
1489	}
1490
1491	switch (size) {
1492	case 1:
1493		tmpw = *((uint8_t *)data);
1494		break;
1495	case 2:
1496		tmpw = *((uint16_t *)data);
1497		break;
1498	case 4:
1499		tmpw = *((uint32_t *)data);
1500		break;
1501	case 8:
1502	default:
1503		tmpw = *((uint64_t *)data);
1504		break;
1505	}
1506
1507	if (sz[0] == 8)
1508		word[startword] = tmpw;
1509	else {
1510		word[startword] &=
1511		    ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1512		word[startword] |= tmpw << (off0 * 8);
1513	}
1514
1515	if (sz[1] != 0) {
1516		word[startword+1] &= ~(~0ULL << (sz[1] * 8));
1517		word[startword+1] |= tmpw >> (sz[0] * 8);
1518	}
1519
1520	for (i = 0; i < loop; i++) {
1521		temp = off8 + (i << shift_amount);
1522		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp);
1523		temp = 0;
1524		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp);
1525		temp = word[i * scale] & 0xffffffff;
1526		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp);
1527		temp = (word[i * scale] >> 32) & 0xffffffff;
1528		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp);
1529		temp = word[i*scale + 1] & 0xffffffff;
1530		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO,
1531		    temp);
1532		temp = (word[i*scale + 1] >> 32) & 0xffffffff;
1533		qla4_82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI,
1534		    temp);
1535
1536		temp = MIU_TA_CTL_WRITE_ENABLE;
1537		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1538		temp = MIU_TA_CTL_WRITE_START;
1539		qla4_82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_CTRL, temp);
1540
1541		for (j = 0; j < MAX_CTL_CHECK; j++) {
1542			temp = qla4_82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL);
1543			if ((temp & MIU_TA_CTL_BUSY) == 0)
1544				break;
1545		}
1546
1547		if (j >= MAX_CTL_CHECK) {
1548			if (printk_ratelimit())
1549				ql4_printk(KERN_ERR, ha,
1550					   "%s: failed to read through agent\n",
1551					   __func__);
1552			ret = -1;
1553			break;
1554		}
1555	}
1556
1557	return ret;
1558}
1559
1560static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
1561{
1562	u32 val = 0;
1563	int retries = 60;
1564
1565	if (!pegtune_val) {
1566		do {
1567			val = qla4_82xx_rd_32(ha, CRB_CMDPEG_STATE);
1568			if ((val == PHAN_INITIALIZE_COMPLETE) ||
1569			    (val == PHAN_INITIALIZE_ACK))
1570				return 0;
1571			set_current_state(TASK_UNINTERRUPTIBLE);
1572			schedule_timeout(500);
1573
1574		} while (--retries);
1575
1576		if (!retries) {
1577			pegtune_val = qla4_82xx_rd_32(ha,
1578				QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
1579			printk(KERN_WARNING "%s: init failed, "
1580				"pegtune_val = %x\n", __func__, pegtune_val);
1581			return -1;
1582		}
1583	}
1584	return 0;
1585}
1586
1587static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha)
1588{
1589	uint32_t state = 0;
1590	int loops = 0;
1591
1592	/* Window 1 call */
1593	read_lock(&ha->hw_lock);
1594	state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1595	read_unlock(&ha->hw_lock);
1596
1597	while ((state != PHAN_PEG_RCV_INITIALIZED) && (loops < 30000)) {
1598		udelay(100);
1599		/* Window 1 call */
1600		read_lock(&ha->hw_lock);
1601		state = qla4_82xx_rd_32(ha, CRB_RCVPEG_STATE);
1602		read_unlock(&ha->hw_lock);
1603
1604		loops++;
1605	}
1606
1607	if (loops >= 30000) {
1608		DEBUG2(ql4_printk(KERN_INFO, ha,
1609		    "Receive Peg initialization not complete: 0x%x.\n", state));
1610		return QLA_ERROR;
1611	}
1612
1613	return QLA_SUCCESS;
1614}
1615
1616void
1617qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1618{
1619	uint32_t drv_active;
1620
1621	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1622
1623	/*
1624	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1625	 * shift 1 by func_num to set a bit for the function.
1626	 * For ISP8022, drv_active has 4 bits per function
1627	 */
1628	if (is_qla8032(ha) || is_qla8042(ha))
1629		drv_active |= (1 << ha->func_num);
1630	else
1631		drv_active |= (1 << (ha->func_num * 4));
1632
1633	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1634		   __func__, ha->host_no, drv_active);
1635	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1636}
1637
1638void
1639qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1640{
1641	uint32_t drv_active;
1642
1643	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1644
1645	/*
1646	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1647	 * shift 1 by func_num to set a bit for the function.
1648	 * For ISP8022, drv_active has 4 bits per function
1649	 */
1650	if (is_qla8032(ha) || is_qla8042(ha))
1651		drv_active &= ~(1 << (ha->func_num));
1652	else
1653		drv_active &= ~(1 << (ha->func_num * 4));
1654
1655	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1656		   __func__, ha->host_no, drv_active);
1657	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_ACTIVE, drv_active);
1658}
1659
1660inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
1661{
1662	uint32_t drv_state, drv_active;
1663	int rval;
1664
1665	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
1666	drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1667
1668	/*
1669	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1670	 * shift 1 by func_num to set a bit for the function.
1671	 * For ISP8022, drv_active has 4 bits per function
1672	 */
1673	if (is_qla8032(ha) || is_qla8042(ha))
1674		rval = drv_state & (1 << ha->func_num);
1675	else
1676		rval = drv_state & (1 << (ha->func_num * 4));
1677
1678	if ((test_bit(AF_EEH_BUSY, &ha->flags)) && drv_active)
1679		rval = 1;
1680
1681	return rval;
1682}
1683
1684void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1685{
1686	uint32_t drv_state;
1687
1688	drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1689
1690	/*
1691	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1692	 * shift 1 by func_num to set a bit for the function.
1693	 * For ISP8022, drv_active has 4 bits per function
1694	 */
1695	if (is_qla8032(ha) || is_qla8042(ha))
1696		drv_state |= (1 << ha->func_num);
1697	else
1698		drv_state |= (1 << (ha->func_num * 4));
1699
1700	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1701		   __func__, ha->host_no, drv_state);
1702	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1703}
1704
1705void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1706{
1707	uint32_t drv_state;
1708
1709	drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1710
1711	/*
1712	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1713	 * shift 1 by func_num to set a bit for the function.
1714	 * For ISP8022, drv_active has 4 bits per function
1715	 */
1716	if (is_qla8032(ha) || is_qla8042(ha))
1717		drv_state &= ~(1 << ha->func_num);
1718	else
1719		drv_state &= ~(1 << (ha->func_num * 4));
1720
1721	ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1722		   __func__, ha->host_no, drv_state);
1723	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, drv_state);
1724}
1725
1726static inline void
1727qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
1728{
1729	uint32_t qsnt_state;
1730
1731	qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
1732
1733	/*
1734	 * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
1735	 * shift 1 by func_num to set a bit for the function.
1736	 * For ISP8022, drv_active has 4 bits per function.
1737	 */
1738	if (is_qla8032(ha) || is_qla8042(ha))
1739		qsnt_state |= (1 << ha->func_num);
1740	else
1741		qsnt_state |= (2 << (ha->func_num * 4));
1742
1743	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, qsnt_state);
1744}
1745
1746
1747static int
1748qla4_82xx_start_firmware(struct scsi_qla_host *ha, uint32_t image_start)
1749{
1750	uint16_t lnk;
1751
1752	/* scrub dma mask expansion register */
1753	qla4_82xx_wr_32(ha, CRB_DMA_SHIFT, 0x55555555);
1754
1755	/* Overwrite stale initialization register values */
1756	qla4_82xx_wr_32(ha, CRB_CMDPEG_STATE, 0);
1757	qla4_82xx_wr_32(ha, CRB_RCVPEG_STATE, 0);
1758	qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0);
1759	qla4_82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0);
1760
1761	if (qla4_82xx_load_fw(ha, image_start) != QLA_SUCCESS) {
1762		printk("%s: Error trying to start fw!\n", __func__);
1763		return QLA_ERROR;
1764	}
1765
1766	/* Handshake with the card before we register the devices. */
1767	if (qla4_82xx_cmdpeg_ready(ha, 0) != QLA_SUCCESS) {
1768		printk("%s: Error during card handshake!\n", __func__);
1769		return QLA_ERROR;
1770	}
1771
1772	/* Negotiated Link width */
1773	pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk);
1774	ha->link_width = (lnk >> 4) & 0x3f;
1775
1776	/* Synchronize with Receive peg */
1777	return qla4_82xx_rcvpeg_ready(ha);
1778}
1779
1780int qla4_82xx_try_start_fw(struct scsi_qla_host *ha)
1781{
1782	int rval = QLA_ERROR;
1783
1784	/*
1785	 * FW Load priority:
1786	 * 1) Operational firmware residing in flash.
1787	 * 2) Fail
1788	 */
1789
1790	ql4_printk(KERN_INFO, ha,
1791	    "FW: Retrieving flash offsets from FLT/FDT ...\n");
1792	rval = qla4_8xxx_get_flash_info(ha);
1793	if (rval != QLA_SUCCESS)
1794		return rval;
1795
1796	ql4_printk(KERN_INFO, ha,
1797	    "FW: Attempting to load firmware from flash...\n");
1798	rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw);
1799
1800	if (rval != QLA_SUCCESS) {
1801		ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
1802		    " FAILED...\n");
1803		return rval;
1804	}
1805
1806	return rval;
1807}
1808
1809void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
1810{
1811	if (qla4_82xx_rom_lock(ha)) {
1812		/* Someone else is holding the lock. */
1813		dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
1814	}
1815
1816	/*
1817	 * Either we got the lock, or someone
1818	 * else died while holding it.
1819	 * In either case, unlock.
1820	 */
1821	qla4_82xx_rom_unlock(ha);
1822}
1823
1824static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
1825					     uint32_t addr1, uint32_t mask)
1826{
1827	unsigned long timeout;
1828	uint32_t rval = QLA_SUCCESS;
1829	uint32_t temp;
1830
1831	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
1832	do {
1833		ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
1834		if ((temp & mask) != 0)
1835			break;
1836
1837		if (time_after_eq(jiffies, timeout)) {
1838			ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
1839			return QLA_ERROR;
1840		}
1841	} while (1);
1842
1843	return rval;
1844}
1845
1846uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
1847				uint32_t addr3, uint32_t mask, uint32_t addr,
1848				uint32_t *data_ptr)
1849{
1850	int rval = QLA_SUCCESS;
1851	uint32_t temp;
1852	uint32_t data;
1853
1854	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1855	if (rval)
1856		goto exit_ipmdio_rd_reg;
1857
1858	temp = (0x40000000 | addr);
1859	ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
1860
1861	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1862	if (rval)
1863		goto exit_ipmdio_rd_reg;
1864
1865	ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
1866	*data_ptr = data;
1867
1868exit_ipmdio_rd_reg:
1869	return rval;
1870}
1871
1872
1873static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
1874						    uint32_t addr1,
1875						    uint32_t addr2,
1876						    uint32_t addr3,
1877						    uint32_t mask)
1878{
1879	unsigned long timeout;
1880	uint32_t temp;
1881	uint32_t rval = QLA_SUCCESS;
1882
1883	timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
1884	do {
1885		ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
1886		if ((temp & 0x1) != 1)
1887			break;
1888		if (time_after_eq(jiffies, timeout)) {
1889			ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
1890			return QLA_ERROR;
1891		}
1892	} while (1);
1893
1894	return rval;
1895}
1896
1897static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
1898				  uint32_t addr1, uint32_t addr3,
1899				  uint32_t mask, uint32_t addr,
1900				  uint32_t value)
1901{
1902	int rval = QLA_SUCCESS;
1903
1904	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1905	if (rval)
1906		goto exit_ipmdio_wr_reg;
1907
1908	ha->isp_ops->wr_reg_indirect(ha, addr3, value);
1909	ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
1910
1911	rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
1912	if (rval)
1913		goto exit_ipmdio_wr_reg;
1914
1915exit_ipmdio_wr_reg:
1916	return rval;
1917}
1918
1919static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1920				struct qla8xxx_minidump_entry_hdr *entry_hdr,
1921				uint32_t **d_ptr)
1922{
1923	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1924	struct qla8xxx_minidump_entry_crb *crb_hdr;
1925	uint32_t *data_ptr = *d_ptr;
1926
1927	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1928	crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
1929	r_addr = crb_hdr->addr;
1930	r_stride = crb_hdr->crb_strd.addr_stride;
1931	loop_cnt = crb_hdr->op_count;
1932
1933	for (i = 0; i < loop_cnt; i++) {
1934		ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
1935		*data_ptr++ = cpu_to_le32(r_addr);
1936		*data_ptr++ = cpu_to_le32(r_value);
1937		r_addr += r_stride;
1938	}
1939	*d_ptr = data_ptr;
1940}
1941
1942static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
1943{
1944	int rval = QLA_SUCCESS;
1945	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
1946	uint64_t dma_base_addr = 0;
1947	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
1948
1949	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1950							ha->fw_dump_tmplt_hdr;
1951	dma_eng_num =
1952		tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
1953	dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
1954				(dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
1955
1956	/* Read the pex-dma's command-status-and-control register. */
1957	rval = ha->isp_ops->rd_reg_indirect(ha,
1958			(dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
1959			&cmd_sts_and_cntrl);
1960
1961	if (rval)
1962		return QLA_ERROR;
1963
1964	/* Check if requested pex-dma engine is available. */
1965	if (cmd_sts_and_cntrl & BIT_31)
1966		return QLA_SUCCESS;
1967	else
1968		return QLA_ERROR;
1969}
1970
1971static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
1972			   struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
1973{
1974	int rval = QLA_SUCCESS, wait = 0;
1975	uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
1976	uint64_t dma_base_addr = 0;
1977	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
1978
1979	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1980							ha->fw_dump_tmplt_hdr;
1981	dma_eng_num =
1982		tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
1983	dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
1984				(dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
1985
1986	rval = ha->isp_ops->wr_reg_indirect(ha,
1987				dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
1988				m_hdr->desc_card_addr);
1989	if (rval)
1990		goto error_exit;
1991
1992	rval = ha->isp_ops->wr_reg_indirect(ha,
1993			      dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
1994	if (rval)
1995		goto error_exit;
1996
1997	rval = ha->isp_ops->wr_reg_indirect(ha,
1998			      dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
1999			      m_hdr->start_dma_cmd);
2000	if (rval)
2001		goto error_exit;
2002
2003	/* Wait for dma operation to complete. */
2004	for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
2005		rval = ha->isp_ops->rd_reg_indirect(ha,
2006			    (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
2007			    &cmd_sts_and_cntrl);
2008		if (rval)
2009			goto error_exit;
2010
2011		if ((cmd_sts_and_cntrl & BIT_1) == 0)
2012			break;
2013		else
2014			udelay(10);
2015	}
2016
2017	/* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
2018	if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
2019		rval = QLA_ERROR;
2020		goto error_exit;
2021	}
2022
2023error_exit:
2024	return rval;
2025}
2026
2027static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
2028				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2029				uint32_t **d_ptr)
2030{
2031	int rval = QLA_SUCCESS;
2032	struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
2033	uint32_t size, read_size;
2034	uint8_t *data_ptr = (uint8_t *)*d_ptr;
2035	void *rdmem_buffer = NULL;
2036	dma_addr_t rdmem_dma;
2037	struct qla4_83xx_pex_dma_descriptor dma_desc;
2038
2039	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2040
2041	rval = qla4_83xx_check_dma_engine_state(ha);
2042	if (rval != QLA_SUCCESS) {
2043		DEBUG2(ql4_printk(KERN_INFO, ha,
2044				  "%s: DMA engine not available. Fallback to rdmem-read.\n",
2045				  __func__));
2046		return QLA_ERROR;
2047	}
2048
2049	m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
2050	rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
2051					  QLA83XX_PEX_DMA_READ_SIZE,
2052					  &rdmem_dma, GFP_KERNEL);
2053	if (!rdmem_buffer) {
2054		DEBUG2(ql4_printk(KERN_INFO, ha,
2055				  "%s: Unable to allocate rdmem dma buffer\n",
2056				  __func__));
2057		return QLA_ERROR;
2058	}
2059
2060	/* Prepare pex-dma descriptor to be written to MS memory. */
2061	/* dma-desc-cmd layout:
2062	 *              0-3: dma-desc-cmd 0-3
2063	 *              4-7: pcid function number
2064	 *              8-15: dma-desc-cmd 8-15
2065	 */
2066	dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
2067	dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
2068	dma_desc.dma_bus_addr = rdmem_dma;
2069
2070	size = 0;
2071	read_size = 0;
2072	/*
2073	 * Perform rdmem operation using pex-dma.
2074	 * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
2075	 */
2076	while (read_size < m_hdr->read_data_size) {
2077		if (m_hdr->read_data_size - read_size >=
2078		    QLA83XX_PEX_DMA_READ_SIZE)
2079			size = QLA83XX_PEX_DMA_READ_SIZE;
2080		else {
2081			size = (m_hdr->read_data_size - read_size);
2082
2083			if (rdmem_buffer)
2084				dma_free_coherent(&ha->pdev->dev,
2085						  QLA83XX_PEX_DMA_READ_SIZE,
2086						  rdmem_buffer, rdmem_dma);
2087
2088			rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
2089							  &rdmem_dma,
2090							  GFP_KERNEL);
2091			if (!rdmem_buffer) {
2092				DEBUG2(ql4_printk(KERN_INFO, ha,
2093						  "%s: Unable to allocate rdmem dma buffer\n",
2094						  __func__));
2095				return QLA_ERROR;
2096			}
2097			dma_desc.dma_bus_addr = rdmem_dma;
2098		}
2099
2100		dma_desc.src_addr = m_hdr->read_addr + read_size;
2101		dma_desc.cmd.read_data_size = size;
2102
2103		/* Prepare: Write pex-dma descriptor to MS memory. */
2104		rval = qla4_8xxx_ms_mem_write_128b(ha,
2105			      (uint64_t)m_hdr->desc_card_addr,
2106			      (uint32_t *)&dma_desc,
2107			      (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
2108		if (rval != QLA_SUCCESS) {
2109			ql4_printk(KERN_INFO, ha,
2110				   "%s: Error writing rdmem-dma-init to MS !!!\n",
2111				   __func__);
2112			goto error_exit;
2113		}
2114
2115		DEBUG2(ql4_printk(KERN_INFO, ha,
2116				  "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
2117				  __func__, size));
2118		/* Execute: Start pex-dma operation. */
2119		rval = qla4_83xx_start_pex_dma(ha, m_hdr);
2120		if (rval != QLA_SUCCESS) {
2121			DEBUG2(ql4_printk(KERN_INFO, ha,
2122					  "scsi(%ld): start-pex-dma failed rval=0x%x\n",
2123					  ha->host_no, rval));
2124			goto error_exit;
2125		}
2126
2127		memcpy(data_ptr, rdmem_buffer, size);
2128		data_ptr += size;
2129		read_size += size;
2130	}
2131
2132	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
2133
2134	*d_ptr = (uint32_t *)data_ptr;
2135
2136error_exit:
2137	if (rdmem_buffer)
2138		dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
2139				  rdmem_dma);
2140
2141	return rval;
2142}
2143
2144static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
2145				 struct qla8xxx_minidump_entry_hdr *entry_hdr,
2146				 uint32_t **d_ptr)
2147{
2148	uint32_t addr, r_addr, c_addr, t_r_addr;
2149	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2150	unsigned long p_wait, w_time, p_mask;
2151	uint32_t c_value_w, c_value_r;
2152	struct qla8xxx_minidump_entry_cache *cache_hdr;
2153	int rval = QLA_ERROR;
2154	uint32_t *data_ptr = *d_ptr;
2155
2156	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2157	cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
2158
2159	loop_count = cache_hdr->op_count;
2160	r_addr = cache_hdr->read_addr;
2161	c_addr = cache_hdr->control_addr;
2162	c_value_w = cache_hdr->cache_ctrl.write_value;
2163
2164	t_r_addr = cache_hdr->tag_reg_addr;
2165	t_value = cache_hdr->addr_ctrl.init_tag_value;
2166	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2167	p_wait = cache_hdr->cache_ctrl.poll_wait;
2168	p_mask = cache_hdr->cache_ctrl.poll_mask;
2169
2170	for (i = 0; i < loop_count; i++) {
2171		ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
2172
2173		if (c_value_w)
2174			ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
2175
2176		if (p_mask) {
2177			w_time = jiffies + p_wait;
2178			do {
2179				ha->isp_ops->rd_reg_indirect(ha, c_addr,
2180							     &c_value_r);
2181				if ((c_value_r & p_mask) == 0) {
2182					break;
2183				} else if (time_after_eq(jiffies, w_time)) {
2184					/* capturing dump failed */
2185					return rval;
2186				}
2187			} while (1);
2188		}
2189
2190		addr = r_addr;
2191		for (k = 0; k < r_cnt; k++) {
2192			ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
2193			*data_ptr++ = cpu_to_le32(r_value);
2194			addr += cache_hdr->read_ctrl.read_addr_stride;
2195		}
2196
2197		t_value += cache_hdr->addr_ctrl.tag_value_stride;
2198	}
2199	*d_ptr = data_ptr;
2200	return QLA_SUCCESS;
2201}
2202
2203static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
2204				struct qla8xxx_minidump_entry_hdr *entry_hdr)
2205{
2206	struct qla8xxx_minidump_entry_crb *crb_entry;
2207	uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
2208	uint32_t crb_addr;
2209	unsigned long wtime;
2210	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2211	int i;
2212
2213	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2214	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
2215						ha->fw_dump_tmplt_hdr;
2216	crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr;
2217
2218	crb_addr = crb_entry->addr;
2219	for (i = 0; i < crb_entry->op_count; i++) {
2220		opcode = crb_entry->crb_ctrl.opcode;
2221		if (opcode & QLA8XXX_DBG_OPCODE_WR) {
2222			ha->isp_ops->wr_reg_indirect(ha, crb_addr,
2223						     crb_entry->value_1);
2224			opcode &= ~QLA8XXX_DBG_OPCODE_WR;
2225		}
2226		if (opcode & QLA8XXX_DBG_OPCODE_RW) {
2227			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2228			ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
2229			opcode &= ~QLA8XXX_DBG_OPCODE_RW;
2230		}
2231		if (opcode & QLA8XXX_DBG_OPCODE_AND) {
2232			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2233			read_value &= crb_entry->value_2;
2234			opcode &= ~QLA8XXX_DBG_OPCODE_AND;
2235			if (opcode & QLA8XXX_DBG_OPCODE_OR) {
2236				read_value |= crb_entry->value_3;
2237				opcode &= ~QLA8XXX_DBG_OPCODE_OR;
2238			}
2239			ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
2240		}
2241		if (opcode & QLA8XXX_DBG_OPCODE_OR) {
2242			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2243			read_value |= crb_entry->value_3;
2244			ha->isp_ops->wr_reg_indirect(ha, crb_addr, read_value);
2245			opcode &= ~QLA8XXX_DBG_OPCODE_OR;
2246		}
2247		if (opcode & QLA8XXX_DBG_OPCODE_POLL) {
2248			poll_time = crb_entry->crb_strd.poll_timeout;
2249			wtime = jiffies + poll_time;
2250			ha->isp_ops->rd_reg_indirect(ha, crb_addr, &read_value);
2251
2252			do {
2253				if ((read_value & crb_entry->value_2) ==
2254				    crb_entry->value_1) {
2255					break;
2256				} else if (time_after_eq(jiffies, wtime)) {
2257					/* capturing dump failed */
2258					rval = QLA_ERROR;
2259					break;
2260				} else {
2261					ha->isp_ops->rd_reg_indirect(ha,
2262							crb_addr, &read_value);
2263				}
2264			} while (1);
2265			opcode &= ~QLA8XXX_DBG_OPCODE_POLL;
2266		}
2267
2268		if (opcode & QLA8XXX_DBG_OPCODE_RDSTATE) {
2269			if (crb_entry->crb_strd.state_index_a) {
2270				index = crb_entry->crb_strd.state_index_a;
2271				addr = tmplt_hdr->saved_state_array[index];
2272			} else {
2273				addr = crb_addr;
2274			}
2275
2276			ha->isp_ops->rd_reg_indirect(ha, addr, &read_value);
2277			index = crb_entry->crb_ctrl.state_index_v;
2278			tmplt_hdr->saved_state_array[index] = read_value;
2279			opcode &= ~QLA8XXX_DBG_OPCODE_RDSTATE;
2280		}
2281
2282		if (opcode & QLA8XXX_DBG_OPCODE_WRSTATE) {
2283			if (crb_entry->crb_strd.state_index_a) {
2284				index = crb_entry->crb_strd.state_index_a;
2285				addr = tmplt_hdr->saved_state_array[index];
2286			} else {
2287				addr = crb_addr;
2288			}
2289
2290			if (crb_entry->crb_ctrl.state_index_v) {
2291				index = crb_entry->crb_ctrl.state_index_v;
2292				read_value =
2293					tmplt_hdr->saved_state_array[index];
2294			} else {
2295				read_value = crb_entry->value_1;
2296			}
2297
2298			ha->isp_ops->wr_reg_indirect(ha, addr, read_value);
2299			opcode &= ~QLA8XXX_DBG_OPCODE_WRSTATE;
2300		}
2301
2302		if (opcode & QLA8XXX_DBG_OPCODE_MDSTATE) {
2303			index = crb_entry->crb_ctrl.state_index_v;
2304			read_value = tmplt_hdr->saved_state_array[index];
2305			read_value <<= crb_entry->crb_ctrl.shl;
2306			read_value >>= crb_entry->crb_ctrl.shr;
2307			if (crb_entry->value_2)
2308				read_value &= crb_entry->value_2;
2309			read_value |= crb_entry->value_3;
2310			read_value += crb_entry->value_1;
2311			tmplt_hdr->saved_state_array[index] = read_value;
2312			opcode &= ~QLA8XXX_DBG_OPCODE_MDSTATE;
2313		}
2314		crb_addr += crb_entry->crb_strd.addr_stride;
2315	}
2316	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
2317	return rval;
2318}
2319
2320static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
2321				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2322				uint32_t **d_ptr)
2323{
2324	uint32_t r_addr, r_stride, loop_cnt, i, r_value;
2325	struct qla8xxx_minidump_entry_rdocm *ocm_hdr;
2326	uint32_t *data_ptr = *d_ptr;
2327
2328	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2329	ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr;
2330	r_addr = ocm_hdr->read_addr;
2331	r_stride = ocm_hdr->read_addr_stride;
2332	loop_cnt = ocm_hdr->op_count;
2333
2334	DEBUG2(ql4_printk(KERN_INFO, ha,
2335			  "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
2336			  __func__, r_addr, r_stride, loop_cnt));
2337
2338	for (i = 0; i < loop_cnt; i++) {
2339		r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
2340		*data_ptr++ = cpu_to_le32(r_value);
2341		r_addr += r_stride;
2342	}
2343	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
2344		__func__, (long unsigned int) (loop_cnt * sizeof(uint32_t))));
2345	*d_ptr = data_ptr;
2346}
2347
2348static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
2349				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2350				uint32_t **d_ptr)
2351{
2352	uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
2353	struct qla8xxx_minidump_entry_mux *mux_hdr;
2354	uint32_t *data_ptr = *d_ptr;
2355
2356	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2357	mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr;
2358	r_addr = mux_hdr->read_addr;
2359	s_addr = mux_hdr->select_addr;
2360	s_stride = mux_hdr->select_value_stride;
2361	s_value = mux_hdr->select_value;
2362	loop_cnt = mux_hdr->op_count;
2363
2364	for (i = 0; i < loop_cnt; i++) {
2365		ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
2366		ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2367		*data_ptr++ = cpu_to_le32(s_value);
2368		*data_ptr++ = cpu_to_le32(r_value);
2369		s_value += s_stride;
2370	}
2371	*d_ptr = data_ptr;
2372}
2373
2374static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
2375				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2376				uint32_t **d_ptr)
2377{
2378	uint32_t addr, r_addr, c_addr, t_r_addr;
2379	uint32_t i, k, loop_count, t_value, r_cnt, r_value;
2380	uint32_t c_value_w;
2381	struct qla8xxx_minidump_entry_cache *cache_hdr;
2382	uint32_t *data_ptr = *d_ptr;
2383
2384	cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr;
2385	loop_count = cache_hdr->op_count;
2386	r_addr = cache_hdr->read_addr;
2387	c_addr = cache_hdr->control_addr;
2388	c_value_w = cache_hdr->cache_ctrl.write_value;
2389
2390	t_r_addr = cache_hdr->tag_reg_addr;
2391	t_value = cache_hdr->addr_ctrl.init_tag_value;
2392	r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
2393
2394	for (i = 0; i < loop_count; i++) {
2395		ha->isp_ops->wr_reg_indirect(ha, t_r_addr, t_value);
2396		ha->isp_ops->wr_reg_indirect(ha, c_addr, c_value_w);
2397		addr = r_addr;
2398		for (k = 0; k < r_cnt; k++) {
2399			ha->isp_ops->rd_reg_indirect(ha, addr, &r_value);
2400			*data_ptr++ = cpu_to_le32(r_value);
2401			addr += cache_hdr->read_ctrl.read_addr_stride;
2402		}
2403		t_value += cache_hdr->addr_ctrl.tag_value_stride;
2404	}
2405	*d_ptr = data_ptr;
2406}
2407
2408static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
2409				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2410				uint32_t **d_ptr)
2411{
2412	uint32_t s_addr, r_addr;
2413	uint32_t r_stride, r_value, r_cnt, qid = 0;
2414	uint32_t i, k, loop_cnt;
2415	struct qla8xxx_minidump_entry_queue *q_hdr;
2416	uint32_t *data_ptr = *d_ptr;
2417
2418	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2419	q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr;
2420	s_addr = q_hdr->select_addr;
2421	r_cnt = q_hdr->rd_strd.read_addr_cnt;
2422	r_stride = q_hdr->rd_strd.read_addr_stride;
2423	loop_cnt = q_hdr->op_count;
2424
2425	for (i = 0; i < loop_cnt; i++) {
2426		ha->isp_ops->wr_reg_indirect(ha, s_addr, qid);
2427		r_addr = q_hdr->read_addr;
2428		for (k = 0; k < r_cnt; k++) {
2429			ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2430			*data_ptr++ = cpu_to_le32(r_value);
2431			r_addr += r_stride;
2432		}
2433		qid += q_hdr->q_strd.queue_id_stride;
2434	}
2435	*d_ptr = data_ptr;
2436}
2437
2438#define MD_DIRECT_ROM_WINDOW		0x42110030
2439#define MD_DIRECT_ROM_READ_BASE		0x42150000
2440
2441static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2442				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2443				uint32_t **d_ptr)
2444{
2445	uint32_t r_addr, r_value;
2446	uint32_t i, loop_cnt;
2447	struct qla8xxx_minidump_entry_rdrom *rom_hdr;
2448	uint32_t *data_ptr = *d_ptr;
2449
2450	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2451	rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
2452	r_addr = rom_hdr->read_addr;
2453	loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
2454
2455	DEBUG2(ql4_printk(KERN_INFO, ha,
2456			  "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
2457			   __func__, r_addr, loop_cnt));
2458
2459	for (i = 0; i < loop_cnt; i++) {
2460		ha->isp_ops->wr_reg_indirect(ha, MD_DIRECT_ROM_WINDOW,
2461					     (r_addr & 0xFFFF0000));
2462		ha->isp_ops->rd_reg_indirect(ha,
2463				MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF),
2464				&r_value);
2465		*data_ptr++ = cpu_to_le32(r_value);
2466		r_addr += sizeof(uint32_t);
2467	}
2468	*d_ptr = data_ptr;
2469}
2470
2471#define MD_MIU_TEST_AGT_CTRL		0x41000090
2472#define MD_MIU_TEST_AGT_ADDR_LO		0x41000094
2473#define MD_MIU_TEST_AGT_ADDR_HI		0x41000098
2474
2475static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2476				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2477				uint32_t **d_ptr)
2478{
2479	uint32_t r_addr, r_value, r_data;
2480	uint32_t i, j, loop_cnt;
2481	struct qla8xxx_minidump_entry_rdmem *m_hdr;
2482	unsigned long flags;
2483	uint32_t *data_ptr = *d_ptr;
2484
2485	DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2486	m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr;
2487	r_addr = m_hdr->read_addr;
2488	loop_cnt = m_hdr->read_data_size/16;
2489
2490	DEBUG2(ql4_printk(KERN_INFO, ha,
2491			  "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2492			  __func__, r_addr, m_hdr->read_data_size));
2493
2494	if (r_addr & 0xf) {
2495		DEBUG2(ql4_printk(KERN_INFO, ha,
2496				  "[%s]: Read addr 0x%x not 16 bytes aligned\n",
2497				  __func__, r_addr));
2498		return QLA_ERROR;
2499	}
2500
2501	if (m_hdr->read_data_size % 16) {
2502		DEBUG2(ql4_printk(KERN_INFO, ha,
2503				  "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2504				  __func__, m_hdr->read_data_size));
2505		return QLA_ERROR;
2506	}
2507
2508	DEBUG2(ql4_printk(KERN_INFO, ha,
2509			  "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2510			  __func__, r_addr, m_hdr->read_data_size, loop_cnt));
2511
2512	write_lock_irqsave(&ha->hw_lock, flags);
2513	for (i = 0; i < loop_cnt; i++) {
2514		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
2515					     r_addr);
2516		r_value = 0;
2517		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI,
2518					     r_value);
2519		r_value = MIU_TA_CTL_ENABLE;
2520		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2521		r_value = MIU_TA_CTL_START_ENABLE;
2522		ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL, r_value);
2523
2524		for (j = 0; j < MAX_CTL_CHECK; j++) {
2525			ha->isp_ops->rd_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
2526						     &r_value);
2527			if ((r_value & MIU_TA_CTL_BUSY) == 0)
2528				break;
2529		}
2530
2531		if (j >= MAX_CTL_CHECK) {
2532			printk_ratelimited(KERN_ERR
2533					   "%s: failed to read through agent\n",
2534					    __func__);
2535			write_unlock_irqrestore(&ha->hw_lock, flags);
2536			return QLA_SUCCESS;
2537		}
2538
2539		for (j = 0; j < 4; j++) {
2540			ha->isp_ops->rd_reg_indirect(ha,
2541						     MD_MIU_TEST_AGT_RDDATA[j],
2542						     &r_data);
2543			*data_ptr++ = cpu_to_le32(r_data);
2544		}
2545
2546		r_addr += 16;
2547	}
2548	write_unlock_irqrestore(&ha->hw_lock, flags);
2549
2550	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
2551			  __func__, (loop_cnt * 16)));
2552
2553	*d_ptr = data_ptr;
2554	return QLA_SUCCESS;
2555}
2556
2557static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2558				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2559				uint32_t **d_ptr)
2560{
2561	uint32_t *data_ptr = *d_ptr;
2562	int rval = QLA_SUCCESS;
2563
2564	rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
2565	if (rval != QLA_SUCCESS)
2566		rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2567							  &data_ptr);
2568	*d_ptr = data_ptr;
2569	return rval;
2570}
2571
2572static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2573				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2574				int index)
2575{
2576	entry_hdr->d_ctrl.driver_flags |= QLA8XXX_DBG_SKIPPED_FLAG;
2577	DEBUG2(ql4_printk(KERN_INFO, ha,
2578			  "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2579			  ha->host_no, index, entry_hdr->entry_type,
2580			  entry_hdr->d_ctrl.entry_capture_mask));
2581	/* If driver encounters a new entry type that it cannot process,
2582	 * it should just skip the entry and adjust the total buffer size by
2583	 * from subtracting the skipped bytes from it
2584	 */
2585	ha->fw_dump_skip_size += entry_hdr->entry_capture_size;
2586}
2587
2588/* ISP83xx functions to process new minidump entries... */
2589static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha,
2590				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2591				uint32_t **d_ptr)
2592{
2593	uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
2594	uint16_t s_stride, i;
2595	uint32_t *data_ptr = *d_ptr;
2596	uint32_t rval = QLA_SUCCESS;
2597	struct qla83xx_minidump_entry_pollrd *pollrd_hdr;
2598
2599	pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr;
2600	s_addr = le32_to_cpu(pollrd_hdr->select_addr);
2601	r_addr = le32_to_cpu(pollrd_hdr->read_addr);
2602	s_value = le32_to_cpu(pollrd_hdr->select_value);
2603	s_stride = le32_to_cpu(pollrd_hdr->select_value_stride);
2604
2605	poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2606	poll_mask = le32_to_cpu(pollrd_hdr->poll_mask);
2607
2608	for (i = 0; i < le32_to_cpu(pollrd_hdr->op_count); i++) {
2609		ha->isp_ops->wr_reg_indirect(ha, s_addr, s_value);
2610		poll_wait = le32_to_cpu(pollrd_hdr->poll_wait);
2611		while (1) {
2612			ha->isp_ops->rd_reg_indirect(ha, s_addr, &r_value);
2613
2614			if ((r_value & poll_mask) != 0) {
2615				break;
2616			} else {
2617				msleep(1);
2618				if (--poll_wait == 0) {
2619					ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2620						   __func__);
2621					rval = QLA_ERROR;
2622					goto exit_process_pollrd;
2623				}
2624			}
2625		}
2626		ha->isp_ops->rd_reg_indirect(ha, r_addr, &r_value);
2627		*data_ptr++ = cpu_to_le32(s_value);
2628		*data_ptr++ = cpu_to_le32(r_value);
2629		s_value += s_stride;
2630	}
2631
2632	*d_ptr = data_ptr;
2633
2634exit_process_pollrd:
2635	return rval;
2636}
2637
2638static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
2639				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2640				uint32_t **d_ptr)
2641{
2642	int loop_cnt;
2643	uint32_t addr1, addr2, value, data, temp, wrval;
2644	uint8_t stride, stride2;
2645	uint16_t count;
2646	uint32_t poll, mask, data_size, modify_mask;
2647	uint32_t wait_count = 0;
2648	uint32_t *data_ptr = *d_ptr;
2649	struct qla8044_minidump_entry_rddfe *rddfe;
2650	uint32_t rval = QLA_SUCCESS;
2651
2652	rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
2653	addr1 = le32_to_cpu(rddfe->addr_1);
2654	value = le32_to_cpu(rddfe->value);
2655	stride = le32_to_cpu(rddfe->stride);
2656	stride2 = le32_to_cpu(rddfe->stride2);
2657	count = le32_to_cpu(rddfe->count);
2658
2659	poll = le32_to_cpu(rddfe->poll);
2660	mask = le32_to_cpu(rddfe->mask);
2661	modify_mask = le32_to_cpu(rddfe->modify_mask);
2662	data_size = le32_to_cpu(rddfe->data_size);
2663
2664	addr2 = addr1 + stride;
2665
2666	for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
2667		ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
2668
2669		wait_count = 0;
2670		while (wait_count < poll) {
2671			ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2672			if ((temp & mask) != 0)
2673				break;
2674			wait_count++;
2675		}
2676
2677		if (wait_count == poll) {
2678			ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
2679			rval = QLA_ERROR;
2680			goto exit_process_rddfe;
2681		} else {
2682			ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
2683			temp = temp & modify_mask;
2684			temp = (temp | ((loop_cnt << 16) | loop_cnt));
2685			wrval = ((temp << 16) | temp);
2686
2687			ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
2688			ha->isp_ops->wr_reg_indirect(ha, addr1, value);
2689
2690			wait_count = 0;
2691			while (wait_count < poll) {
2692				ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2693				if ((temp & mask) != 0)
2694					break;
2695				wait_count++;
2696			}
2697			if (wait_count == poll) {
2698				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2699					   __func__);
2700				rval = QLA_ERROR;
2701				goto exit_process_rddfe;
2702			}
2703
2704			ha->isp_ops->wr_reg_indirect(ha, addr1,
2705						     ((0x40000000 | value) +
2706						     stride2));
2707			wait_count = 0;
2708			while (wait_count < poll) {
2709				ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
2710				if ((temp & mask) != 0)
2711					break;
2712				wait_count++;
2713			}
2714
2715			if (wait_count == poll) {
2716				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
2717					   __func__);
2718				rval = QLA_ERROR;
2719				goto exit_process_rddfe;
2720			}
2721
2722			ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
2723
2724			*data_ptr++ = cpu_to_le32(wrval);
2725			*data_ptr++ = cpu_to_le32(data);
2726		}
2727	}
2728
2729	*d_ptr = data_ptr;
2730exit_process_rddfe:
2731	return rval;
2732}
2733
2734static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
2735				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2736				uint32_t **d_ptr)
2737{
2738	int rval = QLA_SUCCESS;
2739	uint32_t addr1, addr2, value1, value2, data, selval;
2740	uint8_t stride1, stride2;
2741	uint32_t addr3, addr4, addr5, addr6, addr7;
2742	uint16_t count, loop_cnt;
2743	uint32_t poll, mask;
2744	uint32_t *data_ptr = *d_ptr;
2745	struct qla8044_minidump_entry_rdmdio *rdmdio;
2746
2747	rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
2748	addr1 = le32_to_cpu(rdmdio->addr_1);
2749	addr2 = le32_to_cpu(rdmdio->addr_2);
2750	value1 = le32_to_cpu(rdmdio->value_1);
2751	stride1 = le32_to_cpu(rdmdio->stride_1);
2752	stride2 = le32_to_cpu(rdmdio->stride_2);
2753	count = le32_to_cpu(rdmdio->count);
2754
2755	poll = le32_to_cpu(rdmdio->poll);
2756	mask = le32_to_cpu(rdmdio->mask);
2757	value2 = le32_to_cpu(rdmdio->value_2);
2758
2759	addr3 = addr1 + stride1;
2760
2761	for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
2762		rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
2763							 addr3, mask);
2764		if (rval)
2765			goto exit_process_rdmdio;
2766
2767		addr4 = addr2 - stride1;
2768		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
2769					     value2);
2770		if (rval)
2771			goto exit_process_rdmdio;
2772
2773		addr5 = addr2 - (2 * stride1);
2774		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
2775					     value1);
2776		if (rval)
2777			goto exit_process_rdmdio;
2778
2779		addr6 = addr2 - (3 * stride1);
2780		rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
2781					     addr6, 0x2);
2782		if (rval)
2783			goto exit_process_rdmdio;
2784
2785		rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
2786							 addr3, mask);
2787		if (rval)
2788			goto exit_process_rdmdio;
2789
2790		addr7 = addr2 - (4 * stride1);
2791		rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
2792						      mask, addr7, &data);
2793		if (rval)
2794			goto exit_process_rdmdio;
2795
2796		selval = (value2 << 18) | (value1 << 2) | 2;
2797
2798		stride2 = le32_to_cpu(rdmdio->stride_2);
2799		*data_ptr++ = cpu_to_le32(selval);
2800		*data_ptr++ = cpu_to_le32(data);
2801
2802		value1 = value1 + stride2;
2803		*d_ptr = data_ptr;
2804	}
2805
2806exit_process_rdmdio:
2807	return rval;
2808}
2809
2810static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
2811				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2812				uint32_t **d_ptr)
2813{
2814	uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
2815	struct qla8044_minidump_entry_pollwr *pollwr_hdr;
2816	uint32_t wait_count = 0;
2817	uint32_t rval = QLA_SUCCESS;
2818
2819	pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
2820	addr1 = le32_to_cpu(pollwr_hdr->addr_1);
2821	addr2 = le32_to_cpu(pollwr_hdr->addr_2);
2822	value1 = le32_to_cpu(pollwr_hdr->value_1);
2823	value2 = le32_to_cpu(pollwr_hdr->value_2);
2824
2825	poll = le32_to_cpu(pollwr_hdr->poll);
2826	mask = le32_to_cpu(pollwr_hdr->mask);
2827
2828	while (wait_count < poll) {
2829		ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
2830
2831		if ((r_value & poll) != 0)
2832			break;
2833
2834		wait_count++;
2835	}
2836
2837	if (wait_count == poll) {
2838		ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
2839		rval = QLA_ERROR;
2840		goto exit_process_pollwr;
2841	}
2842
2843	ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
2844	ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
2845
2846	wait_count = 0;
2847	while (wait_count < poll) {
2848		ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
2849
2850		if ((r_value & poll) != 0)
2851			break;
2852		wait_count++;
2853	}
2854
2855exit_process_pollwr:
2856	return rval;
2857}
2858
2859static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
2860				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2861				uint32_t **d_ptr)
2862{
2863	uint32_t sel_val1, sel_val2, t_sel_val, data, i;
2864	uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
2865	struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr;
2866	uint32_t *data_ptr = *d_ptr;
2867
2868	rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr;
2869	sel_val1 = le32_to_cpu(rdmux2_hdr->select_value_1);
2870	sel_val2 = le32_to_cpu(rdmux2_hdr->select_value_2);
2871	sel_addr1 = le32_to_cpu(rdmux2_hdr->select_addr_1);
2872	sel_addr2 = le32_to_cpu(rdmux2_hdr->select_addr_2);
2873	sel_val_mask = le32_to_cpu(rdmux2_hdr->select_value_mask);
2874	read_addr = le32_to_cpu(rdmux2_hdr->read_addr);
2875
2876	for (i = 0; i < rdmux2_hdr->op_count; i++) {
2877		ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val1);
2878		t_sel_val = sel_val1 & sel_val_mask;
2879		*data_ptr++ = cpu_to_le32(t_sel_val);
2880
2881		ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2882		ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2883
2884		*data_ptr++ = cpu_to_le32(data);
2885
2886		ha->isp_ops->wr_reg_indirect(ha, sel_addr1, sel_val2);
2887		t_sel_val = sel_val2 & sel_val_mask;
2888		*data_ptr++ = cpu_to_le32(t_sel_val);
2889
2890		ha->isp_ops->wr_reg_indirect(ha, sel_addr2, t_sel_val);
2891		ha->isp_ops->rd_reg_indirect(ha, read_addr, &data);
2892
2893		*data_ptr++ = cpu_to_le32(data);
2894
2895		sel_val1 += rdmux2_hdr->select_value_stride;
2896		sel_val2 += rdmux2_hdr->select_value_stride;
2897	}
2898
2899	*d_ptr = data_ptr;
2900}
2901
2902static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha,
2903				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2904				uint32_t **d_ptr)
2905{
2906	uint32_t poll_wait, poll_mask, r_value, data;
2907	uint32_t addr_1, addr_2, value_1, value_2;
2908	uint32_t *data_ptr = *d_ptr;
2909	uint32_t rval = QLA_SUCCESS;
2910	struct qla83xx_minidump_entry_pollrdmwr *poll_hdr;
2911
2912	poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr;
2913	addr_1 = le32_to_cpu(poll_hdr->addr_1);
2914	addr_2 = le32_to_cpu(poll_hdr->addr_2);
2915	value_1 = le32_to_cpu(poll_hdr->value_1);
2916	value_2 = le32_to_cpu(poll_hdr->value_2);
2917	poll_mask = le32_to_cpu(poll_hdr->poll_mask);
2918
2919	ha->isp_ops->wr_reg_indirect(ha, addr_1, value_1);
2920
2921	poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2922	while (1) {
2923		ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2924
2925		if ((r_value & poll_mask) != 0) {
2926			break;
2927		} else {
2928			msleep(1);
2929			if (--poll_wait == 0) {
2930				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_1\n",
2931					   __func__);
2932				rval = QLA_ERROR;
2933				goto exit_process_pollrdmwr;
2934			}
2935		}
2936	}
2937
2938	ha->isp_ops->rd_reg_indirect(ha, addr_2, &data);
2939	data &= le32_to_cpu(poll_hdr->modify_mask);
2940	ha->isp_ops->wr_reg_indirect(ha, addr_2, data);
2941	ha->isp_ops->wr_reg_indirect(ha, addr_1, value_2);
2942
2943	poll_wait = le32_to_cpu(poll_hdr->poll_wait);
2944	while (1) {
2945		ha->isp_ops->rd_reg_indirect(ha, addr_1, &r_value);
2946
2947		if ((r_value & poll_mask) != 0) {
2948			break;
2949		} else {
2950			msleep(1);
2951			if (--poll_wait == 0) {
2952				ql4_printk(KERN_ERR, ha, "%s: TIMEOUT_2\n",
2953					   __func__);
2954				rval = QLA_ERROR;
2955				goto exit_process_pollrdmwr;
2956			}
2957		}
2958	}
2959
2960	*data_ptr++ = cpu_to_le32(addr_2);
2961	*data_ptr++ = cpu_to_le32(data);
2962	*d_ptr = data_ptr;
2963
2964exit_process_pollrdmwr:
2965	return rval;
2966}
2967
2968static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha,
2969				struct qla8xxx_minidump_entry_hdr *entry_hdr,
2970				uint32_t **d_ptr)
2971{
2972	uint32_t fl_addr, u32_count, rval;
2973	struct qla8xxx_minidump_entry_rdrom *rom_hdr;
2974	uint32_t *data_ptr = *d_ptr;
2975
2976	rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr;
2977	fl_addr = le32_to_cpu(rom_hdr->read_addr);
2978	u32_count = le32_to_cpu(rom_hdr->read_data_size)/sizeof(uint32_t);
2979
2980	DEBUG2(ql4_printk(KERN_INFO, ha, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
2981			  __func__, fl_addr, u32_count));
2982
2983	rval = qla4_83xx_lockless_flash_read_u32(ha, fl_addr,
2984						 (u8 *)(data_ptr), u32_count);
2985
2986	if (rval == QLA_ERROR) {
2987		ql4_printk(KERN_ERR, ha, "%s: Flash Read Error,Count=%d\n",
2988			   __func__, u32_count);
2989		goto exit_process_rdrom;
2990	}
2991
2992	data_ptr += u32_count;
2993	*d_ptr = data_ptr;
2994
2995exit_process_rdrom:
2996	return rval;
2997}
2998
2999/**
3000 * qla4_8xxx_collect_md_data - Retrieve firmware minidump data.
3001 * @ha: pointer to adapter structure
3002 **/
3003static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
3004{
3005	int num_entry_hdr = 0;
3006	struct qla8xxx_minidump_entry_hdr *entry_hdr;
3007	struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
3008	uint32_t *data_ptr;
3009	uint32_t data_collected = 0;
3010	int i, rval = QLA_ERROR;
3011	uint64_t now;
3012	uint32_t timestamp;
3013
3014	ha->fw_dump_skip_size = 0;
3015	if (!ha->fw_dump) {
3016		ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
3017			   __func__, ha->host_no);
3018		return rval;
3019	}
3020
3021	tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
3022						ha->fw_dump_tmplt_hdr;
3023	data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
3024						ha->fw_dump_tmplt_size);
3025	data_collected += ha->fw_dump_tmplt_size;
3026
3027	num_entry_hdr = tmplt_hdr->num_of_entries;
3028	ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
3029		   __func__, data_ptr);
3030	ql4_printk(KERN_INFO, ha,
3031		   "[%s]: no of entry headers in Template: 0x%x\n",
3032		   __func__, num_entry_hdr);
3033	ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
3034		   __func__, ha->fw_dump_capture_mask);
3035	ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
3036		   __func__, ha->fw_dump_size, ha->fw_dump_size);
3037
3038	/* Update current timestamp before taking dump */
3039	now = get_jiffies_64();
3040	timestamp = (u32)(jiffies_to_msecs(now) / 1000);
3041	tmplt_hdr->driver_timestamp = timestamp;
3042
3043	entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
3044					(((uint8_t *)ha->fw_dump_tmplt_hdr) +
3045					 tmplt_hdr->first_entry_offset);
3046
3047	if (is_qla8032(ha) || is_qla8042(ha))
3048		tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
3049					tmplt_hdr->ocm_window_reg[ha->func_num];
3050
3051	/* Walk through the entry headers - validate/perform required action */
3052	for (i = 0; i < num_entry_hdr; i++) {
3053		if (data_collected > ha->fw_dump_size) {
3054			ql4_printk(KERN_INFO, ha,
3055				   "Data collected: [0x%x], Total Dump size: [0x%x]\n",
3056				   data_collected, ha->fw_dump_size);
3057			return rval;
3058		}
3059
3060		if (!(entry_hdr->d_ctrl.entry_capture_mask &
3061		      ha->fw_dump_capture_mask)) {
3062			entry_hdr->d_ctrl.driver_flags |=
3063						QLA8XXX_DBG_SKIPPED_FLAG;
3064			goto skip_nxt_entry;
3065		}
3066
3067		DEBUG2(ql4_printk(KERN_INFO, ha,
3068				  "Data collected: [0x%x], Dump size left:[0x%x]\n",
3069				  data_collected,
3070				  (ha->fw_dump_size - data_collected)));
3071
3072		/* Decode the entry type and take required action to capture
3073		 * debug data
3074		 */
3075		switch (entry_hdr->entry_type) {
3076		case QLA8XXX_RDEND:
3077			qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3078			break;
3079		case QLA8XXX_CNTRL:
3080			rval = qla4_8xxx_minidump_process_control(ha,
3081								  entry_hdr);
3082			if (rval != QLA_SUCCESS) {
3083				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3084				goto md_failed;
3085			}
3086			break;
3087		case QLA8XXX_RDCRB:
3088			qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
3089							 &data_ptr);
3090			break;
3091		case QLA8XXX_RDMEM:
3092			rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
3093								&data_ptr);
3094			if (rval != QLA_SUCCESS) {
3095				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3096				goto md_failed;
3097			}
3098			break;
3099		case QLA8XXX_BOARD:
3100		case QLA8XXX_RDROM:
3101			if (is_qla8022(ha)) {
3102				qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
3103								 &data_ptr);
3104			} else if (is_qla8032(ha) || is_qla8042(ha)) {
3105				rval = qla4_83xx_minidump_process_rdrom(ha,
3106								    entry_hdr,
3107								    &data_ptr);
3108				if (rval != QLA_SUCCESS)
3109					qla4_8xxx_mark_entry_skipped(ha,
3110								     entry_hdr,
3111								     i);
3112			}
3113			break;
3114		case QLA8XXX_L2DTG:
3115		case QLA8XXX_L2ITG:
3116		case QLA8XXX_L2DAT:
3117		case QLA8XXX_L2INS:
3118			rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
3119								&data_ptr);
3120			if (rval != QLA_SUCCESS) {
3121				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3122				goto md_failed;
3123			}
3124			break;
3125		case QLA8XXX_L1DTG:
3126		case QLA8XXX_L1ITG:
3127		case QLA8XXX_L1DAT:
3128		case QLA8XXX_L1INS:
3129			qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
3130							   &data_ptr);
3131			break;
3132		case QLA8XXX_RDOCM:
3133			qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
3134							 &data_ptr);
3135			break;
3136		case QLA8XXX_RDMUX:
3137			qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
3138							 &data_ptr);
3139			break;
3140		case QLA8XXX_QUEUE:
3141			qla4_8xxx_minidump_process_queue(ha, entry_hdr,
3142							 &data_ptr);
3143			break;
3144		case QLA83XX_POLLRD:
3145			if (is_qla8022(ha)) {
3146				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3147				break;
3148			}
3149			rval = qla83xx_minidump_process_pollrd(ha, entry_hdr,
3150							       &data_ptr);
3151			if (rval != QLA_SUCCESS)
3152				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3153			break;
3154		case QLA83XX_RDMUX2:
3155			if (is_qla8022(ha)) {
3156				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3157				break;
3158			}
3159			qla83xx_minidump_process_rdmux2(ha, entry_hdr,
3160							&data_ptr);
3161			break;
3162		case QLA83XX_POLLRDMWR:
3163			if (is_qla8022(ha)) {
3164				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3165				break;
3166			}
3167			rval = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr,
3168								  &data_ptr);
3169			if (rval != QLA_SUCCESS)
3170				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3171			break;
3172		case QLA8044_RDDFE:
3173			rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
3174								&data_ptr);
3175			if (rval != QLA_SUCCESS)
3176				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3177			break;
3178		case QLA8044_RDMDIO:
3179			rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
3180								 &data_ptr);
3181			if (rval != QLA_SUCCESS)
3182				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3183			break;
3184		case QLA8044_POLLWR:
3185			rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
3186								 &data_ptr);
3187			if (rval != QLA_SUCCESS)
3188				qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3189			break;
3190		case QLA8XXX_RDNOP:
3191		default:
3192			qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
3193			break;
3194		}
3195
3196		data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
3197skip_nxt_entry:
3198		/*  next entry in the template */
3199		entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
3200				(((uint8_t *)entry_hdr) +
3201				 entry_hdr->entry_size);
3202	}
3203
3204	if ((data_collected + ha->fw_dump_skip_size) != ha->fw_dump_size) {
3205		ql4_printk(KERN_INFO, ha,
3206			   "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
3207			   data_collected, ha->fw_dump_size);
3208		rval = QLA_ERROR;
3209		goto md_failed;
3210	}
3211
3212	DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
3213			  __func__, i));
3214md_failed:
3215	return rval;
3216}
3217
3218/**
3219 * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
3220 * @ha: pointer to adapter structure
3221 **/
3222static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
3223{
3224	char event_string[40];
3225	char *envp[] = { event_string, NULL };
3226
3227	switch (code) {
3228	case QL4_UEVENT_CODE_FW_DUMP:
3229		snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
3230			 ha->host_no);
3231		break;
3232	default:
3233		/*do nothing*/
3234		break;
3235	}
3236
3237	kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
3238}
3239
3240void qla4_8xxx_get_minidump(struct scsi_qla_host *ha)
3241{
3242	if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
3243	    !test_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
3244		if (!qla4_8xxx_collect_md_data(ha)) {
3245			qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
3246			set_bit(AF_82XX_FW_DUMPED, &ha->flags);
3247		} else {
3248			ql4_printk(KERN_INFO, ha, "%s: Unable to collect minidump\n",
3249				   __func__);
3250		}
3251	}
3252}
3253
3254/**
3255 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
3256 * @ha: pointer to adapter structure
3257 *
3258 * Note: IDC lock must be held upon entry
3259 **/
3260int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
3261{
3262	int rval = QLA_ERROR;
3263	int i;
3264	uint32_t old_count, count;
3265	int need_reset = 0;
3266
3267	need_reset = ha->isp_ops->need_reset(ha);
3268
3269	if (need_reset) {
3270		/* We are trying to perform a recovery here. */
3271		if (test_bit(AF_FW_RECOVERY, &ha->flags))
3272			ha->isp_ops->rom_lock_recovery(ha);
3273	} else  {
3274		old_count = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
3275		for (i = 0; i < 10; i++) {
3276			msleep(200);
3277			count = qla4_8xxx_rd_direct(ha,
3278						    QLA8XXX_PEG_ALIVE_COUNTER);
3279			if (count != old_count) {
3280				rval = QLA_SUCCESS;
3281				goto dev_ready;
3282			}
3283		}
3284		ha->isp_ops->rom_lock_recovery(ha);
3285	}
3286
3287	/* set to DEV_INITIALIZING */
3288	ql4_printk(KERN_INFO, ha, "HW State: INITIALIZING\n");
3289	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3290			    QLA8XXX_DEV_INITIALIZING);
3291
3292	ha->isp_ops->idc_unlock(ha);
3293
3294	if (is_qla8022(ha))
3295		qla4_8xxx_get_minidump(ha);
3296
3297	rval = ha->isp_ops->restart_firmware(ha);
3298	ha->isp_ops->idc_lock(ha);
3299
3300	if (rval != QLA_SUCCESS) {
3301		ql4_printk(KERN_INFO, ha, "HW State: FAILED\n");
3302		qla4_8xxx_clear_drv_active(ha);
3303		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3304				    QLA8XXX_DEV_FAILED);
3305		return rval;
3306	}
3307
3308dev_ready:
3309	ql4_printk(KERN_INFO, ha, "HW State: READY\n");
3310	qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE, QLA8XXX_DEV_READY);
3311
3312	return rval;
3313}
3314
3315/**
3316 * qla4_82xx_need_reset_handler - Code to start reset sequence
3317 * @ha: pointer to adapter structure
3318 *
3319 * Note: IDC lock must be held upon entry
3320 **/
3321static void
3322qla4_82xx_need_reset_handler(struct scsi_qla_host *ha)
3323{
3324	uint32_t dev_state, drv_state, drv_active;
3325	uint32_t active_mask = 0xFFFFFFFF;
3326	unsigned long reset_timeout;
3327
3328	ql4_printk(KERN_INFO, ha,
3329		"Performing ISP error recovery\n");
3330
3331	if (test_and_clear_bit(AF_ONLINE, &ha->flags)) {
3332		qla4_82xx_idc_unlock(ha);
3333		ha->isp_ops->disable_intrs(ha);
3334		qla4_82xx_idc_lock(ha);
3335	}
3336
3337	if (!test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
3338		DEBUG2(ql4_printk(KERN_INFO, ha,
3339				  "%s(%ld): reset acknowledged\n",
3340				  __func__, ha->host_no));
3341		qla4_8xxx_set_rst_ready(ha);
3342	} else {
3343		active_mask = (~(1 << (ha->func_num * 4)));
3344	}
3345
3346	/* wait for 10 seconds for reset ack from all functions */
3347	reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
3348
3349	drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3350	drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3351
3352	ql4_printk(KERN_INFO, ha,
3353		"%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
3354		__func__, ha->host_no, drv_state, drv_active);
3355
3356	while (drv_state != (drv_active & active_mask)) {
3357		if (time_after_eq(jiffies, reset_timeout)) {
3358			ql4_printk(KERN_INFO, ha,
3359				   "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
3360				   DRIVER_NAME, drv_state, drv_active);
3361			break;
3362		}
3363
3364		/*
3365		 * When reset_owner times out, check which functions
3366		 * acked/did not ack
3367		 */
3368		if (test_bit(AF_8XXX_RST_OWNER, &ha->flags)) {
3369			ql4_printk(KERN_INFO, ha,
3370				   "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
3371				   __func__, ha->host_no, drv_state,
3372				   drv_active);
3373		}
3374		qla4_82xx_idc_unlock(ha);
3375		msleep(1000);
3376		qla4_82xx_idc_lock(ha);
3377
3378		drv_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
3379		drv_active = qla4_82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
3380	}
3381
3382	/* Clear RESET OWNER as we are not going to use it any further */
3383	clear_bit(AF_8XXX_RST_OWNER, &ha->flags);
3384
3385	dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3386	ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
3387		   dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
3388
3389	/* Force to DEV_COLD unless someone else is starting a reset */
3390	if (dev_state != QLA8XXX_DEV_INITIALIZING) {
3391		ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
3392		qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD);
3393		qla4_8xxx_set_rst_ready(ha);
3394	}
3395}
3396
3397/**
3398 * qla4_8xxx_need_qsnt_handler - Code to start qsnt
3399 * @ha: pointer to adapter structure
3400 **/
3401void
3402qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha)
3403{
3404	ha->isp_ops->idc_lock(ha);
3405	qla4_8xxx_set_qsnt_ready(ha);
3406	ha->isp_ops->idc_unlock(ha);
3407}
3408
3409static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha)
3410{
3411	int idc_ver;
3412	uint32_t drv_active;
3413
3414	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
3415	if (drv_active == (1 << (ha->func_num * 4))) {
3416		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION,
3417				    QLA82XX_IDC_VERSION);
3418		ql4_printk(KERN_INFO, ha,
3419			   "%s: IDC version updated to %d\n", __func__,
3420			   QLA82XX_IDC_VERSION);
3421	} else {
3422		idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
3423		if (QLA82XX_IDC_VERSION != idc_ver) {
3424			ql4_printk(KERN_INFO, ha,
3425				   "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
3426				   __func__, QLA82XX_IDC_VERSION, idc_ver);
3427		}
3428	}
3429}
3430
3431static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha)
3432{
3433	int idc_ver;
3434	uint32_t drv_active;
3435	int rval = QLA_SUCCESS;
3436
3437	drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
3438	if (drv_active == (1 << ha->func_num)) {
3439		idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
3440		idc_ver &= (~0xFF);
3441		idc_ver |= QLA83XX_IDC_VER_MAJ_VALUE;
3442		qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION, idc_ver);
3443		ql4_printk(KERN_INFO, ha,
3444			   "%s: IDC version updated to %d\n", __func__,
3445			   idc_ver);
3446	} else {
3447		idc_ver = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_IDC_VERSION);
3448		idc_ver &= 0xFF;
3449		if (QLA83XX_IDC_VER_MAJ_VALUE != idc_ver) {
3450			ql4_printk(KERN_INFO, ha,
3451				   "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n",
3452				   __func__, QLA83XX_IDC_VER_MAJ_VALUE,
3453				   idc_ver);
3454			rval = QLA_ERROR;
3455			goto exit_set_idc_ver;
3456		}
3457	}
3458
3459	/* Update IDC_MINOR_VERSION */
3460	idc_ver = qla4_83xx_rd_reg(ha, QLA83XX_CRB_IDC_VER_MINOR);
3461	idc_ver &= ~(0x03 << (ha->func_num * 2));
3462	idc_ver |= (QLA83XX_IDC_VER_MIN_VALUE << (ha->func_num * 2));
3463	qla4_83xx_wr_reg(ha, QLA83XX_CRB_IDC_VER_MINOR, idc_ver);
3464
3465exit_set_idc_ver:
3466	return rval;
3467}
3468
3469int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
3470{
3471	uint32_t drv_active;
3472	int rval = QLA_SUCCESS;
3473
3474	if (test_bit(AF_INIT_DONE, &ha->flags))
3475		goto exit_update_idc_reg;
3476
3477	ha->isp_ops->idc_lock(ha);
3478	qla4_8xxx_set_drv_active(ha);
3479
3480	/*
3481	 * If we are the first driver to load and
3482	 * ql4xdontresethba is not set, clear IDC_CTRL BIT0.
3483	 */
3484	if (is_qla8032(ha) || is_qla8042(ha)) {
3485		drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
3486		if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
3487			qla4_83xx_clear_idc_dontreset(ha);
3488	}
3489
3490	if (is_qla8022(ha)) {
3491		qla4_82xx_set_idc_ver(ha);
3492	} else if (is_qla8032(ha) || is_qla8042(ha)) {
3493		rval = qla4_83xx_set_idc_ver(ha);
3494		if (rval == QLA_ERROR)
3495			qla4_8xxx_clear_drv_active(ha);
3496	}
3497
3498	ha->isp_ops->idc_unlock(ha);
3499
3500exit_update_idc_reg:
3501	return rval;
3502}
3503
3504/**
3505 * qla4_8xxx_device_state_handler - Adapter state machine
3506 * @ha: pointer to host adapter structure.
3507 *
3508 * Note: IDC lock must be UNLOCKED upon entry
3509 **/
3510int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
3511{
3512	uint32_t dev_state;
3513	int rval = QLA_SUCCESS;
3514	unsigned long dev_init_timeout;
3515
3516	rval = qla4_8xxx_update_idc_reg(ha);
3517	if (rval == QLA_ERROR)
3518		goto exit_state_handler;
3519
3520	dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
3521	DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
3522			  dev_state, dev_state < MAX_STATES ?
3523			  qdev_state[dev_state] : "Unknown"));
3524
3525	/* wait for 30 seconds for device to go ready */
3526	dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
3527
3528	ha->isp_ops->idc_lock(ha);
3529	while (1) {
3530
3531		if (time_after_eq(jiffies, dev_init_timeout)) {
3532			ql4_printk(KERN_WARNING, ha,
3533				   "%s: Device Init Failed 0x%x = %s\n",
3534				   DRIVER_NAME,
3535				   dev_state, dev_state < MAX_STATES ?
3536				   qdev_state[dev_state] : "Unknown");
3537			qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
3538					    QLA8XXX_DEV_FAILED);
3539		}
3540
3541		dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
3542		ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
3543			   dev_state, dev_state < MAX_STATES ?
3544			   qdev_state[dev_state] : "Unknown");
3545
3546		/* NOTE: Make sure idc unlocked upon exit of switch statement */
3547		switch (dev_state) {
3548		case QLA8XXX_DEV_READY:
3549			goto exit;
3550		case QLA8XXX_DEV_COLD:
3551			rval = qla4_8xxx_device_bootstrap(ha);
3552			goto exit;
3553		case QLA8XXX_DEV_INITIALIZING:
3554			ha->isp_ops->idc_unlock(ha);
3555			msleep(1000);
3556			ha->isp_ops->idc_lock(ha);
3557			break;
3558		case QLA8XXX_DEV_NEED_RESET:
3559			/*
3560			 * For ISP8324 and ISP8042, if NEED_RESET is set by any
3561			 * driver, it should be honored, irrespective of
3562			 * IDC_CTRL DONTRESET_BIT0
3563			 */
3564			if (is_qla8032(ha) || is_qla8042(ha)) {
3565				qla4_83xx_need_reset_handler(ha);
3566			} else if (is_qla8022(ha)) {
3567				if (!ql4xdontresethba) {
3568					qla4_82xx_need_reset_handler(ha);
3569					/* Update timeout value after need
3570					 * reset handler */
3571					dev_init_timeout = jiffies +
3572						(ha->nx_dev_init_timeout * HZ);
3573				} else {
3574					ha->isp_ops->idc_unlock(ha);
3575					msleep(1000);
3576					ha->isp_ops->idc_lock(ha);
3577				}
3578			}
3579			break;
3580		case QLA8XXX_DEV_NEED_QUIESCENT:
3581			/* idc locked/unlocked in handler */
3582			qla4_8xxx_need_qsnt_handler(ha);
3583			break;
3584		case QLA8XXX_DEV_QUIESCENT:
3585			ha->isp_ops->idc_unlock(ha);
3586			msleep(1000);
3587			ha->isp_ops->idc_lock(ha);
3588			break;
3589		case QLA8XXX_DEV_FAILED:
3590			ha->isp_ops->idc_unlock(ha);
3591			qla4xxx_dead_adapter_cleanup(ha);
3592			rval = QLA_ERROR;
3593			ha->isp_ops->idc_lock(ha);
3594			goto exit;
3595		default:
3596			ha->isp_ops->idc_unlock(ha);
3597			qla4xxx_dead_adapter_cleanup(ha);
3598			rval = QLA_ERROR;
3599			ha->isp_ops->idc_lock(ha);
3600			goto exit;
3601		}
3602	}
3603exit:
3604	ha->isp_ops->idc_unlock(ha);
3605exit_state_handler:
3606	return rval;
3607}
3608
3609int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
3610{
3611	int retval;
3612
3613	/* clear the interrupt */
3614	if (is_qla8032(ha) || is_qla8042(ha)) {
3615		writel(0, &ha->qla4_83xx_reg->risc_intr);
3616		readl(&ha->qla4_83xx_reg->risc_intr);
3617	} else if (is_qla8022(ha)) {
3618		writel(0, &ha->qla4_82xx_reg->host_int);
3619		readl(&ha->qla4_82xx_reg->host_int);
3620	}
3621
3622	retval = qla4_8xxx_device_state_handler(ha);
3623
3624	/* Initialize request and response queues. */
3625	if (retval == QLA_SUCCESS)
3626		qla4xxx_init_rings(ha);
3627
3628	if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
3629		retval = qla4xxx_request_irqs(ha);
3630
3631	return retval;
3632}
3633
3634/*****************************************************************************/
3635/* Flash Manipulation Routines                                               */
3636/*****************************************************************************/
3637
3638#define OPTROM_BURST_SIZE       0x1000
3639#define OPTROM_BURST_DWORDS     (OPTROM_BURST_SIZE / 4)
3640
3641#define FARX_DATA_FLAG	BIT_31
3642#define FARX_ACCESS_FLASH_CONF	0x7FFD0000
3643#define FARX_ACCESS_FLASH_DATA	0x7FF00000
3644
3645static inline uint32_t
3646flash_conf_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
3647{
3648	return hw->flash_conf_off | faddr;
3649}
3650
3651static inline uint32_t
3652flash_data_addr(struct ql82xx_hw_data *hw, uint32_t faddr)
3653{
3654	return hw->flash_data_off | faddr;
3655}
3656
3657static uint32_t *
3658qla4_82xx_read_flash_data(struct scsi_qla_host *ha, uint32_t *dwptr,
3659    uint32_t faddr, uint32_t length)
3660{
3661	uint32_t i;
3662	uint32_t val;
3663	int loops = 0;
3664	while ((qla4_82xx_rom_lock(ha) != 0) && (loops < 50000)) {
3665		udelay(100);
3666		cond_resched();
3667		loops++;
3668	}
3669	if (loops >= 50000) {
3670		ql4_printk(KERN_WARNING, ha, "ROM lock failed\n");
3671		return dwptr;
3672	}
3673
3674	/* Dword reads to flash. */
3675	for (i = 0; i < length/4; i++, faddr += 4) {
3676		if (qla4_82xx_do_rom_fast_read(ha, faddr, &val)) {
3677			ql4_printk(KERN_WARNING, ha,
3678			    "Do ROM fast read failed\n");
3679			goto done_read;
3680		}
3681		dwptr[i] = __constant_cpu_to_le32(val);
3682	}
3683
3684done_read:
3685	qla4_82xx_rom_unlock(ha);
3686	return dwptr;
3687}
3688
3689/**
3690 * Address and length are byte address
3691 **/
3692static uint8_t *
3693qla4_82xx_read_optrom_data(struct scsi_qla_host *ha, uint8_t *buf,
3694		uint32_t offset, uint32_t length)
3695{
3696	qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length);
3697	return buf;
3698}
3699
3700static int
3701qla4_8xxx_find_flt_start(struct scsi_qla_host *ha, uint32_t *start)
3702{
3703	const char *loc, *locations[] = { "DEF", "PCI" };
3704
3705	/*
3706	 * FLT-location structure resides after the last PCI region.
3707	 */
3708
3709	/* Begin with sane defaults. */
3710	loc = locations[0];
3711	*start = FA_FLASH_LAYOUT_ADDR_82;
3712
3713	DEBUG2(ql4_printk(KERN_INFO, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
3714	return QLA_SUCCESS;
3715}
3716
3717static void
3718qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
3719{
3720	const char *loc, *locations[] = { "DEF", "FLT" };
3721	uint16_t *wptr;
3722	uint16_t cnt, chksum;
3723	uint32_t start, status;
3724	struct qla_flt_header *flt;
3725	struct qla_flt_region *region;
3726	struct ql82xx_hw_data *hw = &ha->hw;
3727
3728	hw->flt_region_flt = flt_addr;
3729	wptr = (uint16_t *)ha->request_ring;
3730	flt = (struct qla_flt_header *)ha->request_ring;
3731	region = (struct qla_flt_region *)&flt[1];
3732
3733	if (is_qla8022(ha)) {
3734		qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3735					   flt_addr << 2, OPTROM_BURST_SIZE);
3736	} else if (is_qla8032(ha) || is_qla8042(ha)) {
3737		status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
3738						  (uint8_t *)ha->request_ring,
3739						  0x400);
3740		if (status != QLA_SUCCESS)
3741			goto no_flash_data;
3742	}
3743
3744	if (*wptr == __constant_cpu_to_le16(0xffff))
3745		goto no_flash_data;
3746	if (flt->version != __constant_cpu_to_le16(1)) {
3747		DEBUG2(ql4_printk(KERN_INFO, ha, "Unsupported FLT detected: "
3748			"version=0x%x length=0x%x checksum=0x%x.\n",
3749			le16_to_cpu(flt->version), le16_to_cpu(flt->length),
3750			le16_to_cpu(flt->checksum)));
3751		goto no_flash_data;
3752	}
3753
3754	cnt = (sizeof(struct qla_flt_header) + le16_to_cpu(flt->length)) >> 1;
3755	for (chksum = 0; cnt; cnt--)
3756		chksum += le16_to_cpu(*wptr++);
3757	if (chksum) {
3758		DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
3759			"version=0x%x length=0x%x checksum=0x%x.\n",
3760			le16_to_cpu(flt->version), le16_to_cpu(flt->length),
3761			chksum));
3762		goto no_flash_data;
3763	}
3764
3765	loc = locations[1];
3766	cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region);
3767	for ( ; cnt; cnt--, region++) {
3768		/* Store addresses as DWORD offsets. */
3769		start = le32_to_cpu(region->start) >> 2;
3770
3771		DEBUG3(ql4_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
3772		    "end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
3773		    le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
3774
3775		switch (le32_to_cpu(region->code) & 0xff) {
3776		case FLT_REG_FDT:
3777			hw->flt_region_fdt = start;
3778			break;
3779		case FLT_REG_BOOT_CODE_82:
3780			hw->flt_region_boot = start;
3781			break;
3782		case FLT_REG_FW_82:
3783		case FLT_REG_FW_82_1:
3784			hw->flt_region_fw = start;
3785			break;
3786		case FLT_REG_BOOTLOAD_82:
3787			hw->flt_region_bootload = start;
3788			break;
3789		case FLT_REG_ISCSI_PARAM:
3790			hw->flt_iscsi_param =  start;
3791			break;
3792		case FLT_REG_ISCSI_CHAP:
3793			hw->flt_region_chap =  start;
3794			hw->flt_chap_size =  le32_to_cpu(region->size);
3795			break;
3796		case FLT_REG_ISCSI_DDB:
3797			hw->flt_region_ddb =  start;
3798			hw->flt_ddb_size =  le32_to_cpu(region->size);
3799			break;
3800		}
3801	}
3802	goto done;
3803
3804no_flash_data:
3805	/* Use hardcoded defaults. */
3806	loc = locations[0];
3807
3808	hw->flt_region_fdt      = FA_FLASH_DESCR_ADDR_82;
3809	hw->flt_region_boot     = FA_BOOT_CODE_ADDR_82;
3810	hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82;
3811	hw->flt_region_fw       = FA_RISC_CODE_ADDR_82;
3812	hw->flt_region_chap	= FA_FLASH_ISCSI_CHAP >> 2;
3813	hw->flt_chap_size	= FA_FLASH_CHAP_SIZE;
3814	hw->flt_region_ddb	= FA_FLASH_ISCSI_DDB >> 2;
3815	hw->flt_ddb_size	= FA_FLASH_DDB_SIZE;
3816
3817done:
3818	DEBUG2(ql4_printk(KERN_INFO, ha,
3819			  "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x  ddb_size=0x%x\n",
3820			  loc, hw->flt_region_flt, hw->flt_region_fdt,
3821			  hw->flt_region_boot, hw->flt_region_bootload,
3822			  hw->flt_region_fw, hw->flt_region_chap,
3823			  hw->flt_chap_size, hw->flt_region_ddb,
3824			  hw->flt_ddb_size));
3825}
3826
3827static void
3828qla4_82xx_get_fdt_info(struct scsi_qla_host *ha)
3829{
3830#define FLASH_BLK_SIZE_4K       0x1000
3831#define FLASH_BLK_SIZE_32K      0x8000
3832#define FLASH_BLK_SIZE_64K      0x10000
3833	const char *loc, *locations[] = { "MID", "FDT" };
3834	uint16_t cnt, chksum;
3835	uint16_t *wptr;
3836	struct qla_fdt_layout *fdt;
3837	uint16_t mid = 0;
3838	uint16_t fid = 0;
3839	struct ql82xx_hw_data *hw = &ha->hw;
3840
3841	hw->flash_conf_off = FARX_ACCESS_FLASH_CONF;
3842	hw->flash_data_off = FARX_ACCESS_FLASH_DATA;
3843
3844	wptr = (uint16_t *)ha->request_ring;
3845	fdt = (struct qla_fdt_layout *)ha->request_ring;
3846	qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3847	    hw->flt_region_fdt << 2, OPTROM_BURST_SIZE);
3848
3849	if (*wptr == __constant_cpu_to_le16(0xffff))
3850		goto no_flash_data;
3851
3852	if (fdt->sig[0] != 'Q' || fdt->sig[1] != 'L' || fdt->sig[2] != 'I' ||
3853	    fdt->sig[3] != 'D')
3854		goto no_flash_data;
3855
3856	for (cnt = 0, chksum = 0; cnt < sizeof(struct qla_fdt_layout) >> 1;
3857	    cnt++)
3858		chksum += le16_to_cpu(*wptr++);
3859
3860	if (chksum) {
3861		DEBUG2(ql4_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
3862		    "checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
3863		    le16_to_cpu(fdt->version)));
3864		goto no_flash_data;
3865	}
3866
3867	loc = locations[1];
3868	mid = le16_to_cpu(fdt->man_id);
3869	fid = le16_to_cpu(fdt->id);
3870	hw->fdt_wrt_disable = fdt->wrt_disable_bits;
3871	hw->fdt_erase_cmd = flash_conf_addr(hw, 0x0300 | fdt->erase_cmd);
3872	hw->fdt_block_size = le32_to_cpu(fdt->block_size);
3873
3874	if (fdt->unprotect_sec_cmd) {
3875		hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, 0x0300 |
3876		    fdt->unprotect_sec_cmd);
3877		hw->fdt_protect_sec_cmd = fdt->protect_sec_cmd ?
3878		    flash_conf_addr(hw, 0x0300 | fdt->protect_sec_cmd) :
3879		    flash_conf_addr(hw, 0x0336);
3880	}
3881	goto done;
3882
3883no_flash_data:
3884	loc = locations[0];
3885	hw->fdt_block_size = FLASH_BLK_SIZE_64K;
3886done:
3887	DEBUG2(ql4_printk(KERN_INFO, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
3888		"pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
3889		hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd,
3890		hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable,
3891		hw->fdt_block_size));
3892}
3893
3894static void
3895qla4_82xx_get_idc_param(struct scsi_qla_host *ha)
3896{
3897#define QLA82XX_IDC_PARAM_ADDR      0x003e885c
3898	uint32_t *wptr;
3899
3900	if (!is_qla8022(ha))
3901		return;
3902	wptr = (uint32_t *)ha->request_ring;
3903	qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
3904			QLA82XX_IDC_PARAM_ADDR , 8);
3905
3906	if (*wptr == __constant_cpu_to_le32(0xffffffff)) {
3907		ha->nx_dev_init_timeout = ROM_DEV_INIT_TIMEOUT;
3908		ha->nx_reset_timeout = ROM_DRV_RESET_ACK_TIMEOUT;
3909	} else {
3910		ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
3911		ha->nx_reset_timeout = le32_to_cpu(*wptr);
3912	}
3913
3914	DEBUG2(ql4_printk(KERN_DEBUG, ha,
3915		"ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout));
3916	DEBUG2(ql4_printk(KERN_DEBUG, ha,
3917		"ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout));
3918	return;
3919}
3920
3921void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
3922			      int in_count)
3923{
3924	int i;
3925
3926	/* Load all mailbox registers, except mailbox 0. */
3927	for (i = 1; i < in_count; i++)
3928		writel(mbx_cmd[i], &ha->qla4_82xx_reg->mailbox_in[i]);
3929
3930	/* Wakeup firmware  */
3931	writel(mbx_cmd[0], &ha->qla4_82xx_reg->mailbox_in[0]);
3932	readl(&ha->qla4_82xx_reg->mailbox_in[0]);
3933	writel(HINT_MBX_INT_PENDING, &ha->qla4_82xx_reg->hint);
3934	readl(&ha->qla4_82xx_reg->hint);
3935}
3936
3937void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
3938{
3939	int intr_status;
3940
3941	intr_status = readl(&ha->qla4_82xx_reg->host_int);
3942	if (intr_status & ISRX_82XX_RISC_INT) {
3943		ha->mbox_status_count = out_count;
3944		intr_status = readl(&ha->qla4_82xx_reg->host_status);
3945		ha->isp_ops->interrupt_service_routine(ha, intr_status);
3946
3947		if (test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
3948		    test_bit(AF_INTx_ENABLED, &ha->flags))
3949			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
3950					0xfbff);
3951	}
3952}
3953
3954int
3955qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
3956{
3957	int ret;
3958	uint32_t flt_addr;
3959
3960	ret = qla4_8xxx_find_flt_start(ha, &flt_addr);
3961	if (ret != QLA_SUCCESS)
3962		return ret;
3963
3964	qla4_8xxx_get_flt_info(ha, flt_addr);
3965	if (is_qla8022(ha)) {
3966		qla4_82xx_get_fdt_info(ha);
3967		qla4_82xx_get_idc_param(ha);
3968	} else if (is_qla8032(ha) || is_qla8042(ha)) {
3969		qla4_83xx_get_idc_param(ha);
3970	}
3971
3972	return QLA_SUCCESS;
3973}
3974
3975/**
3976 * qla4_8xxx_stop_firmware - stops firmware on specified adapter instance
3977 * @ha: pointer to host adapter structure.
3978 *
3979 * Remarks:
3980 * For iSCSI, throws away all I/O and AENs into bit bucket, so they will
3981 * not be available after successful return.  Driver must cleanup potential
3982 * outstanding I/O's after calling this funcion.
3983 **/
3984int
3985qla4_8xxx_stop_firmware(struct scsi_qla_host *ha)
3986{
3987	int status;
3988	uint32_t mbox_cmd[MBOX_REG_COUNT];
3989	uint32_t mbox_sts[MBOX_REG_COUNT];
3990
3991	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
3992	memset(&mbox_sts, 0, sizeof(mbox_sts));
3993
3994	mbox_cmd[0] = MBOX_CMD_STOP_FW;
3995	status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1,
3996	    &mbox_cmd[0], &mbox_sts[0]);
3997
3998	DEBUG2(printk("scsi%ld: %s: status = %d\n", ha->host_no,
3999	    __func__, status));
4000	return status;
4001}
4002
4003/**
4004 * qla4_82xx_isp_reset - Resets ISP and aborts all outstanding commands.
4005 * @ha: pointer to host adapter structure.
4006 **/
4007int
4008qla4_82xx_isp_reset(struct scsi_qla_host *ha)
4009{
4010	int rval;
4011	uint32_t dev_state;
4012
4013	qla4_82xx_idc_lock(ha);
4014	dev_state = qla4_82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
4015
4016	if (dev_state == QLA8XXX_DEV_READY) {
4017		ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
4018		qla4_82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
4019		    QLA8XXX_DEV_NEED_RESET);
4020		set_bit(AF_8XXX_RST_OWNER, &ha->flags);
4021	} else
4022		ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
4023
4024	qla4_82xx_idc_unlock(ha);
4025
4026	rval = qla4_8xxx_device_state_handler(ha);
4027
4028	qla4_82xx_idc_lock(ha);
4029	qla4_8xxx_clear_rst_ready(ha);
4030	qla4_82xx_idc_unlock(ha);
4031
4032	if (rval == QLA_SUCCESS) {
4033		ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n");
4034		clear_bit(AF_FW_RECOVERY, &ha->flags);
4035	}
4036
4037	return rval;
4038}
4039
4040/**
4041 * qla4_8xxx_get_sys_info - get adapter MAC address(es) and serial number
4042 * @ha: pointer to host adapter structure.
4043 *
4044 **/
4045int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
4046{
4047	uint32_t mbox_cmd[MBOX_REG_COUNT];
4048	uint32_t mbox_sts[MBOX_REG_COUNT];
4049	struct mbx_sys_info *sys_info;
4050	dma_addr_t sys_info_dma;
4051	int status = QLA_ERROR;
4052
4053	sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info),
4054				      &sys_info_dma, GFP_KERNEL);
4055	if (sys_info == NULL) {
4056		DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
4057		    ha->host_no, __func__));
4058		return status;
4059	}
4060
4061	memset(sys_info, 0, sizeof(*sys_info));
4062	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4063	memset(&mbox_sts, 0, sizeof(mbox_sts));
4064
4065	mbox_cmd[0] = MBOX_CMD_GET_SYS_INFO;
4066	mbox_cmd[1] = LSDW(sys_info_dma);
4067	mbox_cmd[2] = MSDW(sys_info_dma);
4068	mbox_cmd[4] = sizeof(*sys_info);
4069
4070	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 6, &mbox_cmd[0],
4071	    &mbox_sts[0]) != QLA_SUCCESS) {
4072		DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO failed\n",
4073		    ha->host_no, __func__));
4074		goto exit_validate_mac82;
4075	}
4076
4077	/* Make sure we receive the minimum required data to cache internally */
4078	if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
4079	    offsetof(struct mbx_sys_info, reserved)) {
4080		DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
4081		    " error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
4082		goto exit_validate_mac82;
4083	}
4084
4085	/* Save M.A.C. address & serial_number */
4086	ha->port_num = sys_info->port_num;
4087	memcpy(ha->my_mac, &sys_info->mac_addr[0],
4088	    min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr)));
4089	memcpy(ha->serial_number, &sys_info->serial_number,
4090	    min(sizeof(ha->serial_number), sizeof(sys_info->serial_number)));
4091	memcpy(ha->model_name, &sys_info->board_id_str,
4092	       min(sizeof(ha->model_name), sizeof(sys_info->board_id_str)));
4093	ha->phy_port_cnt = sys_info->phys_port_cnt;
4094	ha->phy_port_num = sys_info->port_num;
4095	ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt;
4096
4097	DEBUG2(printk("scsi%ld: %s: "
4098	    "mac %02x:%02x:%02x:%02x:%02x:%02x "
4099	    "serial %s\n", ha->host_no, __func__,
4100	    ha->my_mac[0], ha->my_mac[1], ha->my_mac[2],
4101	    ha->my_mac[3], ha->my_mac[4], ha->my_mac[5],
4102	    ha->serial_number));
4103
4104	status = QLA_SUCCESS;
4105
4106exit_validate_mac82:
4107	dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info,
4108			  sys_info_dma);
4109	return status;
4110}
4111
4112/* Interrupt handling helpers. */
4113
4114int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
4115{
4116	uint32_t mbox_cmd[MBOX_REG_COUNT];
4117	uint32_t mbox_sts[MBOX_REG_COUNT];
4118
4119	DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
4120
4121	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4122	memset(&mbox_sts, 0, sizeof(mbox_sts));
4123	mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
4124	mbox_cmd[1] = INTR_ENABLE;
4125	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
4126		&mbox_sts[0]) != QLA_SUCCESS) {
4127		DEBUG2(ql4_printk(KERN_INFO, ha,
4128		    "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
4129		    __func__, mbox_sts[0]));
4130		return QLA_ERROR;
4131	}
4132	return QLA_SUCCESS;
4133}
4134
4135int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
4136{
4137	uint32_t mbox_cmd[MBOX_REG_COUNT];
4138	uint32_t mbox_sts[MBOX_REG_COUNT];
4139
4140	DEBUG2(ql4_printk(KERN_INFO, ha, "%s\n", __func__));
4141
4142	memset(&mbox_cmd, 0, sizeof(mbox_cmd));
4143	memset(&mbox_sts, 0, sizeof(mbox_sts));
4144	mbox_cmd[0] = MBOX_CMD_ENABLE_INTRS;
4145	mbox_cmd[1] = INTR_DISABLE;
4146	if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0],
4147	    &mbox_sts[0]) != QLA_SUCCESS) {
4148		DEBUG2(ql4_printk(KERN_INFO, ha,
4149			"%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n",
4150			__func__, mbox_sts[0]));
4151		return QLA_ERROR;
4152	}
4153
4154	return QLA_SUCCESS;
4155}
4156
4157void
4158qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
4159{
4160	qla4_8xxx_intr_enable(ha);
4161
4162	spin_lock_irq(&ha->hardware_lock);
4163	/* BIT 10 - reset */
4164	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
4165	spin_unlock_irq(&ha->hardware_lock);
4166	set_bit(AF_INTERRUPTS_ON, &ha->flags);
4167}
4168
4169void
4170qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
4171{
4172	if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
4173		qla4_8xxx_intr_disable(ha);
4174
4175	spin_lock_irq(&ha->hardware_lock);
4176	/* BIT 10 - set */
4177	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
4178	spin_unlock_irq(&ha->hardware_lock);
4179}
4180
4181struct ql4_init_msix_entry {
4182	uint16_t entry;
4183	uint16_t index;
4184	const char *name;
4185	irq_handler_t handler;
4186};
4187
4188static struct ql4_init_msix_entry qla4_8xxx_msix_entries[QLA_MSIX_ENTRIES] = {
4189	{ QLA_MSIX_DEFAULT, QLA_MIDX_DEFAULT,
4190	    "qla4xxx (default)",
4191	    (irq_handler_t)qla4_8xxx_default_intr_handler },
4192	{ QLA_MSIX_RSP_Q, QLA_MIDX_RSP_Q,
4193	    "qla4xxx (rsp_q)", (irq_handler_t)qla4_8xxx_msix_rsp_q },
4194};
4195
4196void
4197qla4_8xxx_disable_msix(struct scsi_qla_host *ha)
4198{
4199	int i;
4200	struct ql4_msix_entry *qentry;
4201
4202	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
4203		qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
4204		if (qentry->have_irq) {
4205			free_irq(qentry->msix_vector, ha);
4206			DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
4207				__func__, qla4_8xxx_msix_entries[i].name));
4208		}
4209	}
4210	pci_disable_msix(ha->pdev);
4211	clear_bit(AF_MSIX_ENABLED, &ha->flags);
4212}
4213
4214int
4215qla4_8xxx_enable_msix(struct scsi_qla_host *ha)
4216{
4217	int i, ret;
4218	struct msix_entry entries[QLA_MSIX_ENTRIES];
4219	struct ql4_msix_entry *qentry;
4220
4221	for (i = 0; i < QLA_MSIX_ENTRIES; i++)
4222		entries[i].entry = qla4_8xxx_msix_entries[i].entry;
4223
4224	ret = pci_enable_msix_exact(ha->pdev, entries, ARRAY_SIZE(entries));
4225	if (ret) {
4226		ql4_printk(KERN_WARNING, ha,
4227		    "MSI-X: Failed to enable support -- %d/%d\n",
4228		    QLA_MSIX_ENTRIES, ret);
4229		goto msix_out;
4230	}
4231	set_bit(AF_MSIX_ENABLED, &ha->flags);
4232
4233	for (i = 0; i < QLA_MSIX_ENTRIES; i++) {
4234		qentry = &ha->msix_entries[qla4_8xxx_msix_entries[i].index];
4235		qentry->msix_vector = entries[i].vector;
4236		qentry->msix_entry = entries[i].entry;
4237		qentry->have_irq = 0;
4238		ret = request_irq(qentry->msix_vector,
4239		    qla4_8xxx_msix_entries[i].handler, 0,
4240		    qla4_8xxx_msix_entries[i].name, ha);
4241		if (ret) {
4242			ql4_printk(KERN_WARNING, ha,
4243			    "MSI-X: Unable to register handler -- %x/%d.\n",
4244			    qla4_8xxx_msix_entries[i].index, ret);
4245			qla4_8xxx_disable_msix(ha);
4246			goto msix_out;
4247		}
4248		qentry->have_irq = 1;
4249		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %s\n",
4250			__func__, qla4_8xxx_msix_entries[i].name));
4251	}
4252msix_out:
4253	return ret;
4254}
4255
4256int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha)
4257{
4258	int status = QLA_SUCCESS;
4259
4260	/* Dont retry adapter initialization if IRQ allocation failed */
4261	if (!test_bit(AF_IRQ_ATTACHED, &ha->flags)) {
4262		ql4_printk(KERN_WARNING, ha, "%s: Skipping retry of adapter initialization as IRQs are not attached\n",
4263			   __func__);
4264		status = QLA_ERROR;
4265		goto exit_init_adapter_failure;
4266	}
4267
4268	/* Since interrupts are registered in start_firmware for
4269	 * 8xxx, release them here if initialize_adapter fails
4270	 * and retry adapter initialization */
4271	qla4xxx_free_irqs(ha);
4272
4273exit_init_adapter_failure:
4274	return status;
4275}
4276