1/*
2 * Linux network driver for QLogic BR-series Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
16 * All rights reserved
17 * www.qlogic.com
18 */
19
20#include "bfa_ioc.h"
21#include "bfi_reg.h"
22#include "bfa_defs.h"
23
24/* IOC local definitions */
25
26#define bfa_ioc_state_disabled(__sm)			\
27	(((__sm) == BFI_IOC_UNINIT) ||			\
28	 ((__sm) == BFI_IOC_INITING) ||			\
29	 ((__sm) == BFI_IOC_HWINIT) ||			\
30	 ((__sm) == BFI_IOC_DISABLED) ||		\
31	 ((__sm) == BFI_IOC_FAIL) ||			\
32	 ((__sm) == BFI_IOC_CFG_DISABLED))
33
34/* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
35
36#define bfa_ioc_firmware_lock(__ioc)			\
37			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
38#define bfa_ioc_firmware_unlock(__ioc)			\
39			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
40#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
41#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
42#define bfa_ioc_notify_fail(__ioc)			\
43			((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
44#define bfa_ioc_sync_start(__ioc)               \
45			((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
46#define bfa_ioc_sync_join(__ioc)			\
47			((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
48#define bfa_ioc_sync_leave(__ioc)			\
49			((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
50#define bfa_ioc_sync_ack(__ioc)				\
51			((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
52#define bfa_ioc_sync_complete(__ioc)			\
53			((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
54#define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate)		\
55			((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
56#define bfa_ioc_get_cur_ioc_fwstate(__ioc)		\
57			((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
58#define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate)		\
59		((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
60#define bfa_ioc_get_alt_ioc_fwstate(__ioc)		\
61			((__ioc)->ioc_hwif->ioc_get_alt_fwstate(__ioc))
62
63#define bfa_ioc_mbox_cmd_pending(__ioc)		\
64			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
65			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
66
67static bool bfa_nw_auto_recover = true;
68
69/*
70 * forward declarations
71 */
72static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
73static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
74static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
75static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
76static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
77static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
78static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
79static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
80static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
81static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
82static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
83static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
84static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
85static void bfa_ioc_recover(struct bfa_ioc *ioc);
86static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
87static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
88static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
89static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
90static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
91static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
92static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
93static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
94static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
95static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
96static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
97			enum bfi_fwboot_type boot_type, u32 boot_param);
98static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
99static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
100						char *serial_num);
101static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
102						char *fw_ver);
103static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
104						char *chip_rev);
105static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
106						char *optrom_ver);
107static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
108						char *manufacturer);
109static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
110static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
111
112/* IOC state machine definitions/declarations */
113enum ioc_event {
114	IOC_E_RESET		= 1,	/*!< IOC reset request		*/
115	IOC_E_ENABLE		= 2,	/*!< IOC enable request		*/
116	IOC_E_DISABLE		= 3,	/*!< IOC disable request	*/
117	IOC_E_DETACH		= 4,	/*!< driver detach cleanup	*/
118	IOC_E_ENABLED		= 5,	/*!< f/w enabled		*/
119	IOC_E_FWRSP_GETATTR	= 6,	/*!< IOC get attribute response	*/
120	IOC_E_DISABLED		= 7,	/*!< f/w disabled		*/
121	IOC_E_PFFAILED		= 8,	/*!< failure notice by iocpf sm	*/
122	IOC_E_HBFAIL		= 9,	/*!< heartbeat failure		*/
123	IOC_E_HWERROR		= 10,	/*!< hardware error interrupt	*/
124	IOC_E_TIMEOUT		= 11,	/*!< timeout			*/
125	IOC_E_HWFAILED		= 12,	/*!< PCI mapping failure notice	*/
126};
127
128bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
131bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
132bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
133bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
134bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
135bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
136bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
137bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
138
139static struct bfa_sm_table ioc_sm_table[] = {
140	{BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
141	{BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
142	{BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
143	{BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144	{BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145	{BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
146	{BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
147	{BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149	{BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
150};
151
152/*
153 * Forward declareations for iocpf state machine
154 */
155static void bfa_iocpf_enable(struct bfa_ioc *ioc);
156static void bfa_iocpf_disable(struct bfa_ioc *ioc);
157static void bfa_iocpf_fail(struct bfa_ioc *ioc);
158static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
159static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
160static void bfa_iocpf_stop(struct bfa_ioc *ioc);
161
162/* IOCPF state machine events */
163enum iocpf_event {
164	IOCPF_E_ENABLE		= 1,	/*!< IOCPF enable request	*/
165	IOCPF_E_DISABLE		= 2,	/*!< IOCPF disable request	*/
166	IOCPF_E_STOP		= 3,	/*!< stop on driver detach	*/
167	IOCPF_E_FWREADY		= 4,	/*!< f/w initialization done	*/
168	IOCPF_E_FWRSP_ENABLE	= 5,	/*!< enable f/w response	*/
169	IOCPF_E_FWRSP_DISABLE	= 6,	/*!< disable f/w response	*/
170	IOCPF_E_FAIL		= 7,	/*!< failure notice by ioc sm	*/
171	IOCPF_E_INITFAIL	= 8,	/*!< init fail notice by ioc sm	*/
172	IOCPF_E_GETATTRFAIL	= 9,	/*!< init fail notice by ioc sm	*/
173	IOCPF_E_SEMLOCKED	= 10,   /*!< h/w semaphore is locked	*/
174	IOCPF_E_TIMEOUT		= 11,   /*!< f/w response timeout	*/
175	IOCPF_E_SEM_ERROR	= 12,   /*!< h/w sem mapping error	*/
176};
177
178/* IOCPF states */
179enum bfa_iocpf_state {
180	BFA_IOCPF_RESET		= 1,	/*!< IOC is in reset state */
181	BFA_IOCPF_SEMWAIT	= 2,	/*!< Waiting for IOC h/w semaphore */
182	BFA_IOCPF_HWINIT	= 3,	/*!< IOC h/w is being initialized */
183	BFA_IOCPF_READY		= 4,	/*!< IOCPF is initialized */
184	BFA_IOCPF_INITFAIL	= 5,	/*!< IOCPF failed */
185	BFA_IOCPF_FAIL		= 6,	/*!< IOCPF failed */
186	BFA_IOCPF_DISABLING	= 7,	/*!< IOCPF is being disabled */
187	BFA_IOCPF_DISABLED	= 8,	/*!< IOCPF is disabled */
188	BFA_IOCPF_FWMISMATCH	= 9,	/*!< IOC f/w different from drivers */
189};
190
191bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
194bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
197bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
199						enum iocpf_event);
200bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
201bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
202bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
203bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
204bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
205						enum iocpf_event);
206bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
207
208static struct bfa_sm_table iocpf_sm_table[] = {
209	{BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
210	{BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
211	{BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
212	{BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
213	{BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
214	{BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
215	{BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
216	{BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
217	{BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
218	{BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
219	{BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
220	{BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
221	{BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
222	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
223};
224
225/* IOC State Machine */
226
227/* Beginning state. IOC uninit state. */
228static void
229bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
230{
231}
232
233/* IOC is in uninit state. */
234static void
235bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
236{
237	switch (event) {
238	case IOC_E_RESET:
239		bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
240		break;
241
242	default:
243		bfa_sm_fault(event);
244	}
245}
246
247/* Reset entry actions -- initialize state machine */
248static void
249bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
250{
251	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
252}
253
254/* IOC is in reset state. */
255static void
256bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
257{
258	switch (event) {
259	case IOC_E_ENABLE:
260		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
261		break;
262
263	case IOC_E_DISABLE:
264		bfa_ioc_disable_comp(ioc);
265		break;
266
267	case IOC_E_DETACH:
268		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
269		break;
270
271	default:
272		bfa_sm_fault(event);
273	}
274}
275
276static void
277bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
278{
279	bfa_iocpf_enable(ioc);
280}
281
282/* Host IOC function is being enabled, awaiting response from firmware.
283 * Semaphore is acquired.
284 */
285static void
286bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
287{
288	switch (event) {
289	case IOC_E_ENABLED:
290		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
291		break;
292
293	case IOC_E_PFFAILED:
294		/* !!! fall through !!! */
295	case IOC_E_HWERROR:
296		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
297		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
298		if (event != IOC_E_PFFAILED)
299			bfa_iocpf_initfail(ioc);
300		break;
301
302	case IOC_E_HWFAILED:
303		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
304		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
305		break;
306
307	case IOC_E_DISABLE:
308		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
309		break;
310
311	case IOC_E_DETACH:
312		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
313		bfa_iocpf_stop(ioc);
314		break;
315
316	case IOC_E_ENABLE:
317		break;
318
319	default:
320		bfa_sm_fault(event);
321	}
322}
323
324/* Semaphore should be acquired for version check. */
325static void
326bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
327{
328	mod_timer(&ioc->ioc_timer, jiffies +
329		msecs_to_jiffies(BFA_IOC_TOV));
330	bfa_ioc_send_getattr(ioc);
331}
332
333/* IOC configuration in progress. Timer is active. */
334static void
335bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
336{
337	switch (event) {
338	case IOC_E_FWRSP_GETATTR:
339		del_timer(&ioc->ioc_timer);
340		bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
341		break;
342
343	case IOC_E_PFFAILED:
344	case IOC_E_HWERROR:
345		del_timer(&ioc->ioc_timer);
346		/* fall through */
347	case IOC_E_TIMEOUT:
348		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
349		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
350		if (event != IOC_E_PFFAILED)
351			bfa_iocpf_getattrfail(ioc);
352		break;
353
354	case IOC_E_DISABLE:
355		del_timer(&ioc->ioc_timer);
356		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
357		break;
358
359	case IOC_E_ENABLE:
360		break;
361
362	default:
363		bfa_sm_fault(event);
364	}
365}
366
367static void
368bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
369{
370	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
371	bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
372	bfa_ioc_hb_monitor(ioc);
373}
374
375static void
376bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
377{
378	switch (event) {
379	case IOC_E_ENABLE:
380		break;
381
382	case IOC_E_DISABLE:
383		bfa_ioc_hb_stop(ioc);
384		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
385		break;
386
387	case IOC_E_PFFAILED:
388	case IOC_E_HWERROR:
389		bfa_ioc_hb_stop(ioc);
390		/* !!! fall through !!! */
391	case IOC_E_HBFAIL:
392		if (ioc->iocpf.auto_recover)
393			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
394		else
395			bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
396
397		bfa_ioc_fail_notify(ioc);
398
399		if (event != IOC_E_PFFAILED)
400			bfa_iocpf_fail(ioc);
401		break;
402
403	default:
404		bfa_sm_fault(event);
405	}
406}
407
408static void
409bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
410{
411	bfa_iocpf_disable(ioc);
412}
413
414/* IOC is being disabled */
415static void
416bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
417{
418	switch (event) {
419	case IOC_E_DISABLED:
420		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
421		break;
422
423	case IOC_E_HWERROR:
424		/*
425		 * No state change.  Will move to disabled state
426		 * after iocpf sm completes failure processing and
427		 * moves to disabled state.
428		 */
429		bfa_iocpf_fail(ioc);
430		break;
431
432	case IOC_E_HWFAILED:
433		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
434		bfa_ioc_disable_comp(ioc);
435		break;
436
437	default:
438		bfa_sm_fault(event);
439	}
440}
441
442/* IOC disable completion entry. */
443static void
444bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
445{
446	bfa_ioc_disable_comp(ioc);
447}
448
449static void
450bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
451{
452	switch (event) {
453	case IOC_E_ENABLE:
454		bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
455		break;
456
457	case IOC_E_DISABLE:
458		ioc->cbfn->disable_cbfn(ioc->bfa);
459		break;
460
461	case IOC_E_DETACH:
462		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
463		bfa_iocpf_stop(ioc);
464		break;
465
466	default:
467		bfa_sm_fault(event);
468	}
469}
470
471static void
472bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
473{
474}
475
476/* Hardware initialization retry. */
477static void
478bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
479{
480	switch (event) {
481	case IOC_E_ENABLED:
482		bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
483		break;
484
485	case IOC_E_PFFAILED:
486	case IOC_E_HWERROR:
487		/**
488		 * Initialization retry failed.
489		 */
490		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
491		bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
492		if (event != IOC_E_PFFAILED)
493			bfa_iocpf_initfail(ioc);
494		break;
495
496	case IOC_E_HWFAILED:
497		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
498		bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
499		break;
500
501	case IOC_E_ENABLE:
502		break;
503
504	case IOC_E_DISABLE:
505		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
506		break;
507
508	case IOC_E_DETACH:
509		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510		bfa_iocpf_stop(ioc);
511		break;
512
513	default:
514		bfa_sm_fault(event);
515	}
516}
517
518static void
519bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
520{
521}
522
523/* IOC failure. */
524static void
525bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
526{
527	switch (event) {
528	case IOC_E_ENABLE:
529		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
530		break;
531
532	case IOC_E_DISABLE:
533		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
534		break;
535
536	case IOC_E_DETACH:
537		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
538		bfa_iocpf_stop(ioc);
539		break;
540
541	case IOC_E_HWERROR:
542		/* HB failure notification, ignore. */
543		break;
544
545	default:
546		bfa_sm_fault(event);
547	}
548}
549
550static void
551bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
552{
553}
554
555/* IOC failure. */
556static void
557bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
558{
559	switch (event) {
560
561	case IOC_E_ENABLE:
562		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
563		break;
564
565	case IOC_E_DISABLE:
566		ioc->cbfn->disable_cbfn(ioc->bfa);
567		break;
568
569	case IOC_E_DETACH:
570		bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
571		break;
572
573	default:
574		bfa_sm_fault(event);
575	}
576}
577
578/* IOCPF State Machine */
579
580/* Reset entry actions -- initialize state machine */
581static void
582bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
583{
584	iocpf->fw_mismatch_notified = false;
585	iocpf->auto_recover = bfa_nw_auto_recover;
586}
587
588/* Beginning state. IOC is in reset state. */
589static void
590bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
591{
592	switch (event) {
593	case IOCPF_E_ENABLE:
594		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
595		break;
596
597	case IOCPF_E_STOP:
598		break;
599
600	default:
601		bfa_sm_fault(event);
602	}
603}
604
605/* Semaphore should be acquired for version check. */
606static void
607bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
608{
609	bfa_ioc_hw_sem_init(iocpf->ioc);
610	bfa_ioc_hw_sem_get(iocpf->ioc);
611}
612
613/* Awaiting h/w semaphore to continue with version check. */
614static void
615bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
616{
617	struct bfa_ioc *ioc = iocpf->ioc;
618
619	switch (event) {
620	case IOCPF_E_SEMLOCKED:
621		if (bfa_ioc_firmware_lock(ioc)) {
622			if (bfa_ioc_sync_start(ioc)) {
623				bfa_ioc_sync_join(ioc);
624				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
625			} else {
626				bfa_ioc_firmware_unlock(ioc);
627				bfa_nw_ioc_hw_sem_release(ioc);
628				mod_timer(&ioc->sem_timer, jiffies +
629					msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
630			}
631		} else {
632			bfa_nw_ioc_hw_sem_release(ioc);
633			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
634		}
635		break;
636
637	case IOCPF_E_SEM_ERROR:
638		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
639		bfa_ioc_pf_hwfailed(ioc);
640		break;
641
642	case IOCPF_E_DISABLE:
643		bfa_ioc_hw_sem_get_cancel(ioc);
644		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
645		bfa_ioc_pf_disabled(ioc);
646		break;
647
648	case IOCPF_E_STOP:
649		bfa_ioc_hw_sem_get_cancel(ioc);
650		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
651		break;
652
653	default:
654		bfa_sm_fault(event);
655	}
656}
657
658/* Notify enable completion callback */
659static void
660bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
661{
662	/* Call only the first time sm enters fwmismatch state. */
663	if (!iocpf->fw_mismatch_notified)
664		bfa_ioc_pf_fwmismatch(iocpf->ioc);
665
666	iocpf->fw_mismatch_notified = true;
667	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
668		msecs_to_jiffies(BFA_IOC_TOV));
669}
670
671/* Awaiting firmware version match. */
672static void
673bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
674{
675	struct bfa_ioc *ioc = iocpf->ioc;
676
677	switch (event) {
678	case IOCPF_E_TIMEOUT:
679		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
680		break;
681
682	case IOCPF_E_DISABLE:
683		del_timer(&ioc->iocpf_timer);
684		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
685		bfa_ioc_pf_disabled(ioc);
686		break;
687
688	case IOCPF_E_STOP:
689		del_timer(&ioc->iocpf_timer);
690		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
691		break;
692
693	default:
694		bfa_sm_fault(event);
695	}
696}
697
698/* Request for semaphore. */
699static void
700bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
701{
702	bfa_ioc_hw_sem_get(iocpf->ioc);
703}
704
705/* Awaiting semaphore for h/w initialzation. */
706static void
707bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
708{
709	struct bfa_ioc *ioc = iocpf->ioc;
710
711	switch (event) {
712	case IOCPF_E_SEMLOCKED:
713		if (bfa_ioc_sync_complete(ioc)) {
714			bfa_ioc_sync_join(ioc);
715			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
716		} else {
717			bfa_nw_ioc_hw_sem_release(ioc);
718			mod_timer(&ioc->sem_timer, jiffies +
719				msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
720		}
721		break;
722
723	case IOCPF_E_SEM_ERROR:
724		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
725		bfa_ioc_pf_hwfailed(ioc);
726		break;
727
728	case IOCPF_E_DISABLE:
729		bfa_ioc_hw_sem_get_cancel(ioc);
730		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
731		break;
732
733	default:
734		bfa_sm_fault(event);
735	}
736}
737
738static void
739bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
740{
741	iocpf->poll_time = 0;
742	bfa_ioc_reset(iocpf->ioc, false);
743}
744
745/* Hardware is being initialized. Interrupts are enabled.
746 * Holding hardware semaphore lock.
747 */
748static void
749bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
750{
751	struct bfa_ioc *ioc = iocpf->ioc;
752
753	switch (event) {
754	case IOCPF_E_FWREADY:
755		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
756		break;
757
758	case IOCPF_E_TIMEOUT:
759		bfa_nw_ioc_hw_sem_release(ioc);
760			bfa_ioc_pf_failed(ioc);
761		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
762		break;
763
764	case IOCPF_E_DISABLE:
765		del_timer(&ioc->iocpf_timer);
766		bfa_ioc_sync_leave(ioc);
767		bfa_nw_ioc_hw_sem_release(ioc);
768		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
769		break;
770
771	default:
772		bfa_sm_fault(event);
773	}
774}
775
776static void
777bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
778{
779	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
780		msecs_to_jiffies(BFA_IOC_TOV));
781	/**
782	 * Enable Interrupts before sending fw IOC ENABLE cmd.
783	 */
784	iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
785	bfa_ioc_send_enable(iocpf->ioc);
786}
787
788/* Host IOC function is being enabled, awaiting response from firmware.
789 * Semaphore is acquired.
790 */
791static void
792bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
793{
794	struct bfa_ioc *ioc = iocpf->ioc;
795
796	switch (event) {
797	case IOCPF_E_FWRSP_ENABLE:
798		del_timer(&ioc->iocpf_timer);
799		bfa_nw_ioc_hw_sem_release(ioc);
800		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
801		break;
802
803	case IOCPF_E_INITFAIL:
804		del_timer(&ioc->iocpf_timer);
805		/*
806		 * !!! fall through !!!
807		 */
808	case IOCPF_E_TIMEOUT:
809		bfa_nw_ioc_hw_sem_release(ioc);
810		if (event == IOCPF_E_TIMEOUT)
811			bfa_ioc_pf_failed(ioc);
812		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
813		break;
814
815	case IOCPF_E_DISABLE:
816		del_timer(&ioc->iocpf_timer);
817		bfa_nw_ioc_hw_sem_release(ioc);
818		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
819		break;
820
821	default:
822		bfa_sm_fault(event);
823	}
824}
825
826static void
827bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
828{
829	bfa_ioc_pf_enabled(iocpf->ioc);
830}
831
832static void
833bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
834{
835	switch (event) {
836	case IOCPF_E_DISABLE:
837		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
838		break;
839
840	case IOCPF_E_GETATTRFAIL:
841		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
842		break;
843
844	case IOCPF_E_FAIL:
845		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
846		break;
847
848	default:
849		bfa_sm_fault(event);
850	}
851}
852
853static void
854bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
855{
856	mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
857		msecs_to_jiffies(BFA_IOC_TOV));
858	bfa_ioc_send_disable(iocpf->ioc);
859}
860
861/* IOC is being disabled */
862static void
863bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
864{
865	struct bfa_ioc *ioc = iocpf->ioc;
866
867	switch (event) {
868	case IOCPF_E_FWRSP_DISABLE:
869		del_timer(&ioc->iocpf_timer);
870		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
871		break;
872
873	case IOCPF_E_FAIL:
874		del_timer(&ioc->iocpf_timer);
875		/*
876		 * !!! fall through !!!
877		 */
878
879	case IOCPF_E_TIMEOUT:
880		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
881		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
882		break;
883
884	case IOCPF_E_FWRSP_ENABLE:
885		break;
886
887	default:
888		bfa_sm_fault(event);
889	}
890}
891
892static void
893bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
894{
895	bfa_ioc_hw_sem_get(iocpf->ioc);
896}
897
898/* IOC hb ack request is being removed. */
899static void
900bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
901{
902	struct bfa_ioc *ioc = iocpf->ioc;
903
904	switch (event) {
905	case IOCPF_E_SEMLOCKED:
906		bfa_ioc_sync_leave(ioc);
907		bfa_nw_ioc_hw_sem_release(ioc);
908		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
909		break;
910
911	case IOCPF_E_SEM_ERROR:
912		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
913		bfa_ioc_pf_hwfailed(ioc);
914		break;
915
916	case IOCPF_E_FAIL:
917		break;
918
919	default:
920		bfa_sm_fault(event);
921	}
922}
923
924/* IOC disable completion entry. */
925static void
926bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
927{
928	bfa_ioc_mbox_flush(iocpf->ioc);
929	bfa_ioc_pf_disabled(iocpf->ioc);
930}
931
932static void
933bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
934{
935	struct bfa_ioc *ioc = iocpf->ioc;
936
937	switch (event) {
938	case IOCPF_E_ENABLE:
939		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
940		break;
941
942	case IOCPF_E_STOP:
943		bfa_ioc_firmware_unlock(ioc);
944		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
945		break;
946
947	default:
948		bfa_sm_fault(event);
949	}
950}
951
952static void
953bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
954{
955	bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
956	bfa_ioc_hw_sem_get(iocpf->ioc);
957}
958
959/* Hardware initialization failed. */
960static void
961bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
962{
963	struct bfa_ioc *ioc = iocpf->ioc;
964
965	switch (event) {
966	case IOCPF_E_SEMLOCKED:
967		bfa_ioc_notify_fail(ioc);
968		bfa_ioc_sync_leave(ioc);
969		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
970		bfa_nw_ioc_hw_sem_release(ioc);
971		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
972		break;
973
974	case IOCPF_E_SEM_ERROR:
975		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
976		bfa_ioc_pf_hwfailed(ioc);
977		break;
978
979	case IOCPF_E_DISABLE:
980		bfa_ioc_hw_sem_get_cancel(ioc);
981		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
982		break;
983
984	case IOCPF_E_STOP:
985		bfa_ioc_hw_sem_get_cancel(ioc);
986		bfa_ioc_firmware_unlock(ioc);
987		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
988		break;
989
990	case IOCPF_E_FAIL:
991		break;
992
993	default:
994		bfa_sm_fault(event);
995	}
996}
997
998static void
999bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1000{
1001}
1002
1003/* Hardware initialization failed. */
1004static void
1005bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1006{
1007	struct bfa_ioc *ioc = iocpf->ioc;
1008
1009	switch (event) {
1010	case IOCPF_E_DISABLE:
1011		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1012		break;
1013
1014	case IOCPF_E_STOP:
1015		bfa_ioc_firmware_unlock(ioc);
1016		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1017		break;
1018
1019	default:
1020		bfa_sm_fault(event);
1021	}
1022}
1023
1024static void
1025bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1026{
1027	/**
1028	 * Mark IOC as failed in hardware and stop firmware.
1029	 */
1030	bfa_ioc_lpu_stop(iocpf->ioc);
1031
1032	/**
1033	 * Flush any queued up mailbox requests.
1034	 */
1035	bfa_ioc_mbox_flush(iocpf->ioc);
1036	bfa_ioc_hw_sem_get(iocpf->ioc);
1037}
1038
1039/* IOC is in failed state. */
1040static void
1041bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1042{
1043	struct bfa_ioc *ioc = iocpf->ioc;
1044
1045	switch (event) {
1046	case IOCPF_E_SEMLOCKED:
1047		bfa_ioc_sync_ack(ioc);
1048		bfa_ioc_notify_fail(ioc);
1049		if (!iocpf->auto_recover) {
1050			bfa_ioc_sync_leave(ioc);
1051			bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1052			bfa_nw_ioc_hw_sem_release(ioc);
1053			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1054		} else {
1055			if (bfa_ioc_sync_complete(ioc))
1056				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1057			else {
1058				bfa_nw_ioc_hw_sem_release(ioc);
1059				bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1060			}
1061		}
1062		break;
1063
1064	case IOCPF_E_SEM_ERROR:
1065		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1066		bfa_ioc_pf_hwfailed(ioc);
1067		break;
1068
1069	case IOCPF_E_DISABLE:
1070		bfa_ioc_hw_sem_get_cancel(ioc);
1071		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1072		break;
1073
1074	case IOCPF_E_FAIL:
1075		break;
1076
1077	default:
1078		bfa_sm_fault(event);
1079	}
1080}
1081
1082static void
1083bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1084{
1085}
1086
1087/* IOC is in failed state. */
1088static void
1089bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1090{
1091	switch (event) {
1092	case IOCPF_E_DISABLE:
1093		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1094		break;
1095
1096	default:
1097		bfa_sm_fault(event);
1098	}
1099}
1100
1101/* BFA IOC private functions */
1102
1103/* Notify common modules registered for notification. */
1104static void
1105bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1106{
1107	struct bfa_ioc_notify *notify;
1108	struct list_head			*qe;
1109
1110	list_for_each(qe, &ioc->notify_q) {
1111		notify = (struct bfa_ioc_notify *)qe;
1112		notify->cbfn(notify->cbarg, event);
1113	}
1114}
1115
1116static void
1117bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1118{
1119	ioc->cbfn->disable_cbfn(ioc->bfa);
1120	bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1121}
1122
1123bool
1124bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1125{
1126	u32 r32;
1127	int cnt = 0;
1128#define BFA_SEM_SPINCNT	3000
1129
1130	r32 = readl(sem_reg);
1131
1132	while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1133		cnt++;
1134		udelay(2);
1135		r32 = readl(sem_reg);
1136	}
1137
1138	if (!(r32 & 1))
1139		return true;
1140
1141	return false;
1142}
1143
1144void
1145bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1146{
1147	readl(sem_reg);
1148	writel(1, sem_reg);
1149}
1150
1151/* Clear fwver hdr */
1152static void
1153bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1154{
1155	u32 pgnum, pgoff, loff = 0;
1156	int i;
1157
1158	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1159	pgoff = PSS_SMEM_PGOFF(loff);
1160	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1161
1162	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1163		writel(0, ioc->ioc_regs.smem_page_start + loff);
1164		loff += sizeof(u32);
1165	}
1166}
1167
1168
1169static void
1170bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1171{
1172	struct bfi_ioc_image_hdr fwhdr;
1173	u32 fwstate, r32;
1174
1175	/* Spin on init semaphore to serialize. */
1176	r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1177	while (r32 & 0x1) {
1178		udelay(20);
1179		r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1180	}
1181
1182	fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1183	if (fwstate == BFI_IOC_UNINIT) {
1184		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1185		return;
1186	}
1187
1188	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1189
1190	if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1191		writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1192		return;
1193	}
1194
1195	bfa_ioc_fwver_clear(ioc);
1196	bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1197	bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1198
1199	/*
1200	 * Try to lock and then unlock the semaphore.
1201	 */
1202	readl(ioc->ioc_regs.ioc_sem_reg);
1203	writel(1, ioc->ioc_regs.ioc_sem_reg);
1204
1205	/* Unlock init semaphore */
1206	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1207}
1208
1209static void
1210bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1211{
1212	u32	r32;
1213
1214	/**
1215	 * First read to the semaphore register will return 0, subsequent reads
1216	 * will return 1. Semaphore is released by writing 1 to the register
1217	 */
1218	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1219	if (r32 == ~0) {
1220		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1221		return;
1222	}
1223	if (!(r32 & 1)) {
1224		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1225		return;
1226	}
1227
1228	mod_timer(&ioc->sem_timer, jiffies +
1229		msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1230}
1231
1232void
1233bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1234{
1235	writel(1, ioc->ioc_regs.ioc_sem_reg);
1236}
1237
1238static void
1239bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1240{
1241	del_timer(&ioc->sem_timer);
1242}
1243
1244/* Initialize LPU local memory (aka secondary memory / SRAM) */
1245static void
1246bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1247{
1248	u32	pss_ctl;
1249	int		i;
1250#define PSS_LMEM_INIT_TIME  10000
1251
1252	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1253	pss_ctl &= ~__PSS_LMEM_RESET;
1254	pss_ctl |= __PSS_LMEM_INIT_EN;
1255
1256	/*
1257	 * i2c workaround 12.5khz clock
1258	 */
1259	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1260	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1261
1262	/**
1263	 * wait for memory initialization to be complete
1264	 */
1265	i = 0;
1266	do {
1267		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1268		i++;
1269	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1270
1271	/**
1272	 * If memory initialization is not successful, IOC timeout will catch
1273	 * such failures.
1274	 */
1275	BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1276
1277	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1278	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1279}
1280
1281static void
1282bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1283{
1284	u32	pss_ctl;
1285
1286	/**
1287	 * Take processor out of reset.
1288	 */
1289	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1290	pss_ctl &= ~__PSS_LPU0_RESET;
1291
1292	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1293}
1294
1295static void
1296bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1297{
1298	u32	pss_ctl;
1299
1300	/**
1301	 * Put processors in reset.
1302	 */
1303	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1304	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1305
1306	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1307}
1308
1309/* Get driver and firmware versions. */
1310void
1311bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1312{
1313	u32	pgnum;
1314	u32	loff = 0;
1315	int		i;
1316	u32	*fwsig = (u32 *) fwhdr;
1317
1318	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1319	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1320
1321	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1322	     i++) {
1323		fwsig[i] =
1324			swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1325		loff += sizeof(u32);
1326	}
1327}
1328
1329static bool
1330bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
1331			struct bfi_ioc_image_hdr *fwhdr_2)
1332{
1333	int i;
1334
1335	for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1336		if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1337			return false;
1338	}
1339
1340	return true;
1341}
1342
1343/* Returns TRUE if major minor and maintenance are same.
1344 * If patch version are same, check for MD5 Checksum to be same.
1345 */
1346static bool
1347bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
1348			  struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1349{
1350	if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1351		return false;
1352	if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1353		return false;
1354	if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1355		return false;
1356	if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1357		return false;
1358	if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1359	    drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1360	    drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
1361		return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1362
1363	return true;
1364}
1365
1366static bool
1367bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
1368{
1369	if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1370		return false;
1371
1372	return true;
1373}
1374
1375static bool
1376fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
1377{
1378	if (fwhdr->fwver.phase == 0 &&
1379	    fwhdr->fwver.build == 0)
1380		return false;
1381
1382	return true;
1383}
1384
1385/* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1386static enum bfi_ioc_img_ver_cmp
1387bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
1388			 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1389{
1390	if (bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp) == false)
1391		return BFI_IOC_IMG_VER_INCOMP;
1392
1393	if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1394		return BFI_IOC_IMG_VER_BETTER;
1395	else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1396		return BFI_IOC_IMG_VER_OLD;
1397
1398	/* GA takes priority over internal builds of the same patch stream.
1399	 * At this point major minor maint and patch numbers are same.
1400	 */
1401	if (fwhdr_is_ga(base_fwhdr) == true)
1402		if (fwhdr_is_ga(fwhdr_to_cmp))
1403			return BFI_IOC_IMG_VER_SAME;
1404		else
1405			return BFI_IOC_IMG_VER_OLD;
1406	else
1407		if (fwhdr_is_ga(fwhdr_to_cmp))
1408			return BFI_IOC_IMG_VER_BETTER;
1409
1410	if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1411		return BFI_IOC_IMG_VER_BETTER;
1412	else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1413		return BFI_IOC_IMG_VER_OLD;
1414
1415	if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1416		return BFI_IOC_IMG_VER_BETTER;
1417	else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1418		return BFI_IOC_IMG_VER_OLD;
1419
1420	/* All Version Numbers are equal.
1421	 * Md5 check to be done as a part of compatibility check.
1422	 */
1423	return BFI_IOC_IMG_VER_SAME;
1424}
1425
1426/* register definitions */
1427#define FLI_CMD_REG			0x0001d000
1428#define FLI_WRDATA_REG			0x0001d00c
1429#define FLI_RDDATA_REG			0x0001d010
1430#define FLI_ADDR_REG			0x0001d004
1431#define FLI_DEV_STATUS_REG		0x0001d014
1432
1433#define BFA_FLASH_FIFO_SIZE		128	/* fifo size */
1434#define BFA_FLASH_CHECK_MAX		10000	/* max # of status check */
1435#define BFA_FLASH_BLOCKING_OP_MAX	1000000	/* max # of blocking op check */
1436#define BFA_FLASH_WIP_MASK		0x01	/* write in progress bit mask */
1437
1438#define NFC_STATE_RUNNING		0x20000001
1439#define NFC_STATE_PAUSED		0x00004560
1440#define NFC_VER_VALID			0x147
1441
1442enum bfa_flash_cmd {
1443	BFA_FLASH_FAST_READ	= 0x0b,	/* fast read */
1444	BFA_FLASH_WRITE_ENABLE	= 0x06,	/* write enable */
1445	BFA_FLASH_SECTOR_ERASE	= 0xd8,	/* sector erase */
1446	BFA_FLASH_WRITE		= 0x02,	/* write */
1447	BFA_FLASH_READ_STATUS	= 0x05,	/* read status */
1448};
1449
1450/* hardware error definition */
1451enum bfa_flash_err {
1452	BFA_FLASH_NOT_PRESENT	= -1,	/*!< flash not present */
1453	BFA_FLASH_UNINIT	= -2,	/*!< flash not initialized */
1454	BFA_FLASH_BAD		= -3,	/*!< flash bad */
1455	BFA_FLASH_BUSY		= -4,	/*!< flash busy */
1456	BFA_FLASH_ERR_CMD_ACT	= -5,	/*!< command active never cleared */
1457	BFA_FLASH_ERR_FIFO_CNT	= -6,	/*!< fifo count never cleared */
1458	BFA_FLASH_ERR_WIP	= -7,	/*!< write-in-progress never cleared */
1459	BFA_FLASH_ERR_TIMEOUT	= -8,	/*!< fli timeout */
1460	BFA_FLASH_ERR_LEN	= -9,	/*!< invalid length */
1461};
1462
1463/* flash command register data structure */
1464union bfa_flash_cmd_reg {
1465	struct {
1466#ifdef __BIG_ENDIAN
1467		u32	act:1;
1468		u32	rsv:1;
1469		u32	write_cnt:9;
1470		u32	read_cnt:9;
1471		u32	addr_cnt:4;
1472		u32	cmd:8;
1473#else
1474		u32	cmd:8;
1475		u32	addr_cnt:4;
1476		u32	read_cnt:9;
1477		u32	write_cnt:9;
1478		u32	rsv:1;
1479		u32	act:1;
1480#endif
1481	} r;
1482	u32	i;
1483};
1484
1485/* flash device status register data structure */
1486union bfa_flash_dev_status_reg {
1487	struct {
1488#ifdef __BIG_ENDIAN
1489		u32	rsv:21;
1490		u32	fifo_cnt:6;
1491		u32	busy:1;
1492		u32	init_status:1;
1493		u32	present:1;
1494		u32	bad:1;
1495		u32	good:1;
1496#else
1497		u32	good:1;
1498		u32	bad:1;
1499		u32	present:1;
1500		u32	init_status:1;
1501		u32	busy:1;
1502		u32	fifo_cnt:6;
1503		u32	rsv:21;
1504#endif
1505	} r;
1506	u32	i;
1507};
1508
1509/* flash address register data structure */
1510union bfa_flash_addr_reg {
1511	struct {
1512#ifdef __BIG_ENDIAN
1513		u32	addr:24;
1514		u32	dummy:8;
1515#else
1516		u32	dummy:8;
1517		u32	addr:24;
1518#endif
1519	} r;
1520	u32	i;
1521};
1522
1523/* Flash raw private functions */
1524static void
1525bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
1526		  u8 rd_cnt, u8 ad_cnt, u8 op)
1527{
1528	union bfa_flash_cmd_reg cmd;
1529
1530	cmd.i = 0;
1531	cmd.r.act = 1;
1532	cmd.r.write_cnt = wr_cnt;
1533	cmd.r.read_cnt = rd_cnt;
1534	cmd.r.addr_cnt = ad_cnt;
1535	cmd.r.cmd = op;
1536	writel(cmd.i, (pci_bar + FLI_CMD_REG));
1537}
1538
1539static void
1540bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
1541{
1542	union bfa_flash_addr_reg addr;
1543
1544	addr.r.addr = address & 0x00ffffff;
1545	addr.r.dummy = 0;
1546	writel(addr.i, (pci_bar + FLI_ADDR_REG));
1547}
1548
1549static int
1550bfa_flash_cmd_act_check(void __iomem *pci_bar)
1551{
1552	union bfa_flash_cmd_reg cmd;
1553
1554	cmd.i = readl(pci_bar + FLI_CMD_REG);
1555
1556	if (cmd.r.act)
1557		return BFA_FLASH_ERR_CMD_ACT;
1558
1559	return 0;
1560}
1561
1562/* Flush FLI data fifo. */
1563static u32
1564bfa_flash_fifo_flush(void __iomem *pci_bar)
1565{
1566	u32 i;
1567	u32 t;
1568	union bfa_flash_dev_status_reg dev_status;
1569
1570	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1571
1572	if (!dev_status.r.fifo_cnt)
1573		return 0;
1574
1575	/* fifo counter in terms of words */
1576	for (i = 0; i < dev_status.r.fifo_cnt; i++)
1577		t = readl(pci_bar + FLI_RDDATA_REG);
1578
1579	/* Check the device status. It may take some time. */
1580	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1581		dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1582		if (!dev_status.r.fifo_cnt)
1583			break;
1584	}
1585
1586	if (dev_status.r.fifo_cnt)
1587		return BFA_FLASH_ERR_FIFO_CNT;
1588
1589	return 0;
1590}
1591
1592/* Read flash status. */
1593static u32
1594bfa_flash_status_read(void __iomem *pci_bar)
1595{
1596	union bfa_flash_dev_status_reg	dev_status;
1597	u32				status;
1598	u32			ret_status;
1599	int				i;
1600
1601	status = bfa_flash_fifo_flush(pci_bar);
1602	if (status < 0)
1603		return status;
1604
1605	bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
1606
1607	for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1608		status = bfa_flash_cmd_act_check(pci_bar);
1609		if (!status)
1610			break;
1611	}
1612
1613	if (status)
1614		return status;
1615
1616	dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1617	if (!dev_status.r.fifo_cnt)
1618		return BFA_FLASH_BUSY;
1619
1620	ret_status = readl(pci_bar + FLI_RDDATA_REG);
1621	ret_status >>= 24;
1622
1623	status = bfa_flash_fifo_flush(pci_bar);
1624	if (status < 0)
1625		return status;
1626
1627	return ret_status;
1628}
1629
1630/* Start flash read operation. */
1631static u32
1632bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1633		     char *buf)
1634{
1635	u32 status;
1636
1637	/* len must be mutiple of 4 and not exceeding fifo size */
1638	if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
1639		return BFA_FLASH_ERR_LEN;
1640
1641	/* check status */
1642	status = bfa_flash_status_read(pci_bar);
1643	if (status == BFA_FLASH_BUSY)
1644		status = bfa_flash_status_read(pci_bar);
1645
1646	if (status < 0)
1647		return status;
1648
1649	/* check if write-in-progress bit is cleared */
1650	if (status & BFA_FLASH_WIP_MASK)
1651		return BFA_FLASH_ERR_WIP;
1652
1653	bfa_flash_set_addr(pci_bar, offset);
1654
1655	bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
1656
1657	return 0;
1658}
1659
1660/* Check flash read operation. */
1661static u32
1662bfa_flash_read_check(void __iomem *pci_bar)
1663{
1664	if (bfa_flash_cmd_act_check(pci_bar))
1665		return 1;
1666
1667	return 0;
1668}
1669
1670/* End flash read operation. */
1671static void
1672bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
1673{
1674	u32 i;
1675
1676	/* read data fifo up to 32 words */
1677	for (i = 0; i < len; i += 4) {
1678		u32 w = readl(pci_bar + FLI_RDDATA_REG);
1679		*((u32 *)(buf + i)) = swab32(w);
1680	}
1681
1682	bfa_flash_fifo_flush(pci_bar);
1683}
1684
1685/* Perform flash raw read. */
1686
1687#define FLASH_BLOCKING_OP_MAX   500
1688#define FLASH_SEM_LOCK_REG	0x18820
1689
1690static int
1691bfa_raw_sem_get(void __iomem *bar)
1692{
1693	int	locked;
1694
1695	locked = readl((bar + FLASH_SEM_LOCK_REG));
1696
1697	return !locked;
1698}
1699
1700static enum bfa_status
1701bfa_flash_sem_get(void __iomem *bar)
1702{
1703	u32 n = FLASH_BLOCKING_OP_MAX;
1704
1705	while (!bfa_raw_sem_get(bar)) {
1706		if (--n <= 0)
1707			return BFA_STATUS_BADFLASH;
1708		mdelay(10);
1709	}
1710	return BFA_STATUS_OK;
1711}
1712
1713static void
1714bfa_flash_sem_put(void __iomem *bar)
1715{
1716	writel(0, (bar + FLASH_SEM_LOCK_REG));
1717}
1718
1719static enum bfa_status
1720bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1721		   u32 len)
1722{
1723	u32 n, status;
1724	u32 off, l, s, residue, fifo_sz;
1725
1726	residue = len;
1727	off = 0;
1728	fifo_sz = BFA_FLASH_FIFO_SIZE;
1729	status = bfa_flash_sem_get(pci_bar);
1730	if (status != BFA_STATUS_OK)
1731		return status;
1732
1733	while (residue) {
1734		s = offset + off;
1735		n = s / fifo_sz;
1736		l = (n + 1) * fifo_sz - s;
1737		if (l > residue)
1738			l = residue;
1739
1740		status = bfa_flash_read_start(pci_bar, offset + off, l,
1741								&buf[off]);
1742		if (status < 0) {
1743			bfa_flash_sem_put(pci_bar);
1744			return BFA_STATUS_FAILED;
1745		}
1746
1747		n = BFA_FLASH_BLOCKING_OP_MAX;
1748		while (bfa_flash_read_check(pci_bar)) {
1749			if (--n <= 0) {
1750				bfa_flash_sem_put(pci_bar);
1751				return BFA_STATUS_FAILED;
1752			}
1753		}
1754
1755		bfa_flash_read_end(pci_bar, l, &buf[off]);
1756
1757		residue -= l;
1758		off += l;
1759	}
1760	bfa_flash_sem_put(pci_bar);
1761
1762	return BFA_STATUS_OK;
1763}
1764
1765#define BFA_FLASH_PART_FWIMG_ADDR	0x100000 /* fw image address */
1766
1767static enum bfa_status
1768bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
1769			      u32 *fwimg)
1770{
1771	return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1772			BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1773			(char *)fwimg, BFI_FLASH_CHUNK_SZ);
1774}
1775
1776static enum bfi_ioc_img_ver_cmp
1777bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
1778			struct bfi_ioc_image_hdr *base_fwhdr)
1779{
1780	struct bfi_ioc_image_hdr *flash_fwhdr;
1781	enum bfa_status status;
1782	u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1783
1784	status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1785	if (status != BFA_STATUS_OK)
1786		return BFI_IOC_IMG_VER_INCOMP;
1787
1788	flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
1789	if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
1790		return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1791	else
1792		return BFI_IOC_IMG_VER_INCOMP;
1793}
1794
1795/**
1796 * Returns TRUE if driver is willing to work with current smem f/w version.
1797 */
1798bool
1799bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1800{
1801	struct bfi_ioc_image_hdr *drv_fwhdr;
1802	enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
1803
1804	drv_fwhdr = (struct bfi_ioc_image_hdr *)
1805		bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1806
1807	/* If smem is incompatible or old, driver should not work with it. */
1808	drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
1809	if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1810	    drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1811		return false;
1812	}
1813
1814	/* IF Flash has a better F/W than smem do not work with smem.
1815	 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1816	 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1817	 */
1818	smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
1819
1820	if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
1821		return false;
1822	else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
1823		return true;
1824	else
1825		return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1826			true : false;
1827}
1828
1829/* Return true if current running version is valid. Firmware signature and
1830 * execution context (driver/bios) must match.
1831 */
1832static bool
1833bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1834{
1835	struct bfi_ioc_image_hdr fwhdr;
1836
1837	bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1838	if (swab32(fwhdr.bootenv) != boot_env)
1839		return false;
1840
1841	return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1842}
1843
1844/* Conditionally flush any pending message from firmware at start. */
1845static void
1846bfa_ioc_msgflush(struct bfa_ioc *ioc)
1847{
1848	u32	r32;
1849
1850	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1851	if (r32)
1852		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1853}
1854
1855static void
1856bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1857{
1858	enum bfi_ioc_state ioc_fwstate;
1859	bool fwvalid;
1860	u32 boot_env;
1861
1862	ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1863
1864	if (force)
1865		ioc_fwstate = BFI_IOC_UNINIT;
1866
1867	boot_env = BFI_FWBOOT_ENV_OS;
1868
1869	/**
1870	 * check if firmware is valid
1871	 */
1872	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1873		false : bfa_ioc_fwver_valid(ioc, boot_env);
1874
1875	if (!fwvalid) {
1876		if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1877								BFA_STATUS_OK)
1878			bfa_ioc_poll_fwinit(ioc);
1879
1880		return;
1881	}
1882
1883	/**
1884	 * If hardware initialization is in progress (initialized by other IOC),
1885	 * just wait for an initialization completion interrupt.
1886	 */
1887	if (ioc_fwstate == BFI_IOC_INITING) {
1888		bfa_ioc_poll_fwinit(ioc);
1889		return;
1890	}
1891
1892	/**
1893	 * If IOC function is disabled and firmware version is same,
1894	 * just re-enable IOC.
1895	 */
1896	if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1897		/**
1898		 * When using MSI-X any pending firmware ready event should
1899		 * be flushed. Otherwise MSI-X interrupts are not delivered.
1900		 */
1901		bfa_ioc_msgflush(ioc);
1902		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1903		return;
1904	}
1905
1906	/**
1907	 * Initialize the h/w for any other states.
1908	 */
1909	if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1910							BFA_STATUS_OK)
1911		bfa_ioc_poll_fwinit(ioc);
1912}
1913
1914void
1915bfa_nw_ioc_timeout(void *ioc_arg)
1916{
1917	struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1918
1919	bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1920}
1921
1922static void
1923bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1924{
1925	u32 *msgp = (u32 *) ioc_msg;
1926	u32 i;
1927
1928	BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1929
1930	/*
1931	 * first write msg to mailbox registers
1932	 */
1933	for (i = 0; i < len / sizeof(u32); i++)
1934		writel(cpu_to_le32(msgp[i]),
1935			      ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1936
1937	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1938		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1939
1940	/*
1941	 * write 1 to mailbox CMD to trigger LPU event
1942	 */
1943	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1944	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1945}
1946
1947static void
1948bfa_ioc_send_enable(struct bfa_ioc *ioc)
1949{
1950	struct bfi_ioc_ctrl_req enable_req;
1951	struct timeval tv;
1952
1953	bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1954		    bfa_ioc_portid(ioc));
1955	enable_req.clscode = htons(ioc->clscode);
1956	do_gettimeofday(&tv);
1957	enable_req.tv_sec = ntohl(tv.tv_sec);
1958	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1959}
1960
1961static void
1962bfa_ioc_send_disable(struct bfa_ioc *ioc)
1963{
1964	struct bfi_ioc_ctrl_req disable_req;
1965
1966	bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1967		    bfa_ioc_portid(ioc));
1968	bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1969}
1970
1971static void
1972bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1973{
1974	struct bfi_ioc_getattr_req attr_req;
1975
1976	bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1977		    bfa_ioc_portid(ioc));
1978	bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1979	bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1980}
1981
1982void
1983bfa_nw_ioc_hb_check(void *cbarg)
1984{
1985	struct bfa_ioc *ioc = cbarg;
1986	u32	hb_count;
1987
1988	hb_count = readl(ioc->ioc_regs.heartbeat);
1989	if (ioc->hb_count == hb_count) {
1990		bfa_ioc_recover(ioc);
1991		return;
1992	} else {
1993		ioc->hb_count = hb_count;
1994	}
1995
1996	bfa_ioc_mbox_poll(ioc);
1997	mod_timer(&ioc->hb_timer, jiffies +
1998		msecs_to_jiffies(BFA_IOC_HB_TOV));
1999}
2000
2001static void
2002bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
2003{
2004	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
2005	mod_timer(&ioc->hb_timer, jiffies +
2006		msecs_to_jiffies(BFA_IOC_HB_TOV));
2007}
2008
2009static void
2010bfa_ioc_hb_stop(struct bfa_ioc *ioc)
2011{
2012	del_timer(&ioc->hb_timer);
2013}
2014
2015/* Initiate a full firmware download. */
2016static enum bfa_status
2017bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
2018		    u32 boot_env)
2019{
2020	u32 *fwimg;
2021	u32 pgnum;
2022	u32 loff = 0;
2023	u32 chunkno = 0;
2024	u32 i;
2025	u32 asicmode;
2026	u32 fwimg_size;
2027	u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
2028	enum bfa_status status;
2029
2030	if (boot_env == BFI_FWBOOT_ENV_OS &&
2031	    boot_type == BFI_FWBOOT_TYPE_FLASH) {
2032		fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
2033
2034		status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2035			BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
2036		if (status != BFA_STATUS_OK)
2037			return status;
2038
2039		fwimg = fwimg_buf;
2040	} else {
2041		fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
2042		fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
2043					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2044	}
2045
2046	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2047
2048	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2049
2050	for (i = 0; i < fwimg_size; i++) {
2051		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
2052			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
2053			if (boot_env == BFI_FWBOOT_ENV_OS &&
2054			    boot_type == BFI_FWBOOT_TYPE_FLASH) {
2055				status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2056					BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
2057					fwimg_buf);
2058				if (status != BFA_STATUS_OK)
2059					return status;
2060
2061				fwimg = fwimg_buf;
2062			} else {
2063				fwimg = bfa_cb_image_get_chunk(
2064					bfa_ioc_asic_gen(ioc),
2065					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2066			}
2067		}
2068
2069		/**
2070		 * write smem
2071		 */
2072		writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
2073			      ((ioc->ioc_regs.smem_page_start) + (loff)));
2074
2075		loff += sizeof(u32);
2076
2077		/**
2078		 * handle page offset wrap around
2079		 */
2080		loff = PSS_SMEM_PGOFF(loff);
2081		if (loff == 0) {
2082			pgnum++;
2083			writel(pgnum,
2084				      ioc->ioc_regs.host_page_num_fn);
2085		}
2086	}
2087
2088	writel(bfa_ioc_smem_pgnum(ioc, 0),
2089		      ioc->ioc_regs.host_page_num_fn);
2090
2091	/*
2092	 * Set boot type, env and device mode at the end.
2093	*/
2094	if (boot_env == BFI_FWBOOT_ENV_OS &&
2095	    boot_type == BFI_FWBOOT_TYPE_FLASH) {
2096		boot_type = BFI_FWBOOT_TYPE_NORMAL;
2097	}
2098	asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
2099					ioc->port0_mode, ioc->port1_mode);
2100	writel(asicmode, ((ioc->ioc_regs.smem_page_start)
2101			+ BFI_FWBOOT_DEVMODE_OFF));
2102	writel(boot_type, ((ioc->ioc_regs.smem_page_start)
2103			+ (BFI_FWBOOT_TYPE_OFF)));
2104	writel(boot_env, ((ioc->ioc_regs.smem_page_start)
2105			+ (BFI_FWBOOT_ENV_OFF)));
2106	return BFA_STATUS_OK;
2107}
2108
2109static void
2110bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
2111{
2112	bfa_ioc_hwinit(ioc, force);
2113}
2114
2115/* BFA ioc enable reply by firmware */
2116static void
2117bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
2118			u8 cap_bm)
2119{
2120	struct bfa_iocpf *iocpf = &ioc->iocpf;
2121
2122	ioc->port_mode = ioc->port_mode_cfg = port_mode;
2123	ioc->ad_cap_bm = cap_bm;
2124	bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2125}
2126
2127/* Update BFA configuration from firmware configuration. */
2128static void
2129bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
2130{
2131	struct bfi_ioc_attr *attr = ioc->attr;
2132
2133	attr->adapter_prop  = ntohl(attr->adapter_prop);
2134	attr->card_type     = ntohl(attr->card_type);
2135	attr->maxfrsize	    = ntohs(attr->maxfrsize);
2136
2137	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
2138}
2139
2140/* Attach time initialization of mbox logic. */
2141static void
2142bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
2143{
2144	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2145	int	mc;
2146
2147	INIT_LIST_HEAD(&mod->cmd_q);
2148	for (mc = 0; mc < BFI_MC_MAX; mc++) {
2149		mod->mbhdlr[mc].cbfn = NULL;
2150		mod->mbhdlr[mc].cbarg = ioc->bfa;
2151	}
2152}
2153
2154/* Mbox poll timer -- restarts any pending mailbox requests. */
2155static void
2156bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
2157{
2158	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2159	struct bfa_mbox_cmd *cmd;
2160	bfa_mbox_cmd_cbfn_t cbfn;
2161	void *cbarg;
2162	u32 stat;
2163
2164	/**
2165	 * If no command pending, do nothing
2166	 */
2167	if (list_empty(&mod->cmd_q))
2168		return;
2169
2170	/**
2171	 * If previous command is not yet fetched by firmware, do nothing
2172	 */
2173	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2174	if (stat)
2175		return;
2176
2177	/**
2178	 * Enqueue command to firmware.
2179	 */
2180	bfa_q_deq(&mod->cmd_q, &cmd);
2181	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2182
2183	/**
2184	 * Give a callback to the client, indicating that the command is sent
2185	 */
2186	if (cmd->cbfn) {
2187		cbfn = cmd->cbfn;
2188		cbarg = cmd->cbarg;
2189		cmd->cbfn = NULL;
2190		cbfn(cbarg);
2191	}
2192}
2193
2194/* Cleanup any pending requests. */
2195static void
2196bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
2197{
2198	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2199	struct bfa_mbox_cmd *cmd;
2200
2201	while (!list_empty(&mod->cmd_q))
2202		bfa_q_deq(&mod->cmd_q, &cmd);
2203}
2204
2205/**
2206 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2207 *
2208 * @ioc:     memory for IOC
2209 * @tbuf:    app memory to store data from smem
2210 * @soff:    smem offset
2211 * @sz:      size of smem in bytes
2212 */
2213static int
2214bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
2215{
2216	u32 pgnum, loff, r32;
2217	int i, len;
2218	u32 *buf = tbuf;
2219
2220	pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2221	loff = PSS_SMEM_PGOFF(soff);
2222
2223	/*
2224	 *  Hold semaphore to serialize pll init and fwtrc.
2225	*/
2226	if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
2227		return 1;
2228
2229	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2230
2231	len = sz/sizeof(u32);
2232	for (i = 0; i < len; i++) {
2233		r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
2234		buf[i] = be32_to_cpu(r32);
2235		loff += sizeof(u32);
2236
2237		/**
2238		 * handle page offset wrap around
2239		 */
2240		loff = PSS_SMEM_PGOFF(loff);
2241		if (loff == 0) {
2242			pgnum++;
2243			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2244		}
2245	}
2246
2247	writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2248	       ioc->ioc_regs.host_page_num_fn);
2249
2250	/*
2251	 * release semaphore
2252	 */
2253	readl(ioc->ioc_regs.ioc_init_sem_reg);
2254	writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2255	return 0;
2256}
2257
2258/* Retrieve saved firmware trace from a prior IOC failure. */
2259int
2260bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2261{
2262	u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
2263	int tlen, status = 0;
2264
2265	tlen = *trclen;
2266	if (tlen > BNA_DBG_FWTRC_LEN)
2267		tlen = BNA_DBG_FWTRC_LEN;
2268
2269	status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
2270	*trclen = tlen;
2271	return status;
2272}
2273
2274/* Save firmware trace if configured. */
2275static void
2276bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
2277{
2278	int tlen;
2279
2280	if (ioc->dbg_fwsave_once) {
2281		ioc->dbg_fwsave_once = 0;
2282		if (ioc->dbg_fwsave_len) {
2283			tlen = ioc->dbg_fwsave_len;
2284			bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2285		}
2286	}
2287}
2288
2289/* Retrieve saved firmware trace from a prior IOC failure. */
2290int
2291bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2292{
2293	int tlen;
2294
2295	if (ioc->dbg_fwsave_len == 0)
2296		return BFA_STATUS_ENOFSAVE;
2297
2298	tlen = *trclen;
2299	if (tlen > ioc->dbg_fwsave_len)
2300		tlen = ioc->dbg_fwsave_len;
2301
2302	memcpy(trcdata, ioc->dbg_fwsave, tlen);
2303	*trclen = tlen;
2304	return BFA_STATUS_OK;
2305}
2306
2307static void
2308bfa_ioc_fail_notify(struct bfa_ioc *ioc)
2309{
2310	/**
2311	 * Notify driver and common modules registered for notification.
2312	 */
2313	ioc->cbfn->hbfail_cbfn(ioc->bfa);
2314	bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2315	bfa_nw_ioc_debug_save_ftrc(ioc);
2316}
2317
2318/* IOCPF to IOC interface */
2319static void
2320bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
2321{
2322	bfa_fsm_send_event(ioc, IOC_E_ENABLED);
2323}
2324
2325static void
2326bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
2327{
2328	bfa_fsm_send_event(ioc, IOC_E_DISABLED);
2329}
2330
2331static void
2332bfa_ioc_pf_failed(struct bfa_ioc *ioc)
2333{
2334	bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
2335}
2336
2337static void
2338bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
2339{
2340	bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
2341}
2342
2343static void
2344bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
2345{
2346	/**
2347	 * Provide enable completion callback and AEN notification.
2348	 */
2349	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2350}
2351
2352/* IOC public */
2353static enum bfa_status
2354bfa_ioc_pll_init(struct bfa_ioc *ioc)
2355{
2356	/*
2357	 *  Hold semaphore so that nobody can access the chip during init.
2358	 */
2359	bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2360
2361	bfa_ioc_pll_init_asic(ioc);
2362
2363	ioc->pllinit = true;
2364
2365	/* Initialize LMEM */
2366	bfa_ioc_lmem_init(ioc);
2367
2368	/*
2369	 *  release semaphore.
2370	 */
2371	bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2372
2373	return BFA_STATUS_OK;
2374}
2375
2376/* Interface used by diag module to do firmware boot with memory test
2377 * as the entry vector.
2378 */
2379static enum bfa_status
2380bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2381		u32 boot_env)
2382{
2383	struct bfi_ioc_image_hdr *drv_fwhdr;
2384	enum bfa_status status;
2385	bfa_ioc_stats(ioc, ioc_boots);
2386
2387	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2388		return BFA_STATUS_FAILED;
2389	if (boot_env == BFI_FWBOOT_ENV_OS &&
2390	    boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2391		drv_fwhdr = (struct bfi_ioc_image_hdr *)
2392			bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2393		/* Work with Flash iff flash f/w is better than driver f/w.
2394		 * Otherwise push drivers firmware.
2395		 */
2396		if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2397			BFI_IOC_IMG_VER_BETTER)
2398			boot_type = BFI_FWBOOT_TYPE_FLASH;
2399	}
2400
2401	/**
2402	 * Initialize IOC state of all functions on a chip reset.
2403	 */
2404	if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2405		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2406		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2407	} else {
2408		bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2409		bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2410	}
2411
2412	bfa_ioc_msgflush(ioc);
2413	status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2414	if (status == BFA_STATUS_OK)
2415		bfa_ioc_lpu_start(ioc);
2416	else
2417		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2418
2419	return status;
2420}
2421
2422/* Enable/disable IOC failure auto recovery. */
2423void
2424bfa_nw_ioc_auto_recover(bool auto_recover)
2425{
2426	bfa_nw_auto_recover = auto_recover;
2427}
2428
2429static bool
2430bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
2431{
2432	u32	*msgp = mbmsg;
2433	u32	r32;
2434	int		i;
2435
2436	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2437	if ((r32 & 1) == 0)
2438		return false;
2439
2440	/**
2441	 * read the MBOX msg
2442	 */
2443	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2444	     i++) {
2445		r32 = readl(ioc->ioc_regs.lpu_mbox +
2446				   i * sizeof(u32));
2447		msgp[i] = htonl(r32);
2448	}
2449
2450	/**
2451	 * turn off mailbox interrupt by clearing mailbox status
2452	 */
2453	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2454	readl(ioc->ioc_regs.lpu_mbox_cmd);
2455
2456	return true;
2457}
2458
2459static void
2460bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2461{
2462	union bfi_ioc_i2h_msg_u	*msg;
2463	struct bfa_iocpf *iocpf = &ioc->iocpf;
2464
2465	msg = (union bfi_ioc_i2h_msg_u *) m;
2466
2467	bfa_ioc_stats(ioc, ioc_isrs);
2468
2469	switch (msg->mh.msg_id) {
2470	case BFI_IOC_I2H_HBEAT:
2471		break;
2472
2473	case BFI_IOC_I2H_ENABLE_REPLY:
2474		bfa_ioc_enable_reply(ioc,
2475			(enum bfa_mode)msg->fw_event.port_mode,
2476			msg->fw_event.cap_bm);
2477		break;
2478
2479	case BFI_IOC_I2H_DISABLE_REPLY:
2480		bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2481		break;
2482
2483	case BFI_IOC_I2H_GETATTR_REPLY:
2484		bfa_ioc_getattr_reply(ioc);
2485		break;
2486
2487	default:
2488		BUG_ON(1);
2489	}
2490}
2491
2492/**
2493 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2494 *
2495 * @ioc:	memory for IOC
2496 * @bfa:	driver instance structure
2497 */
2498void
2499bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2500{
2501	ioc->bfa	= bfa;
2502	ioc->cbfn	= cbfn;
2503	ioc->fcmode	= false;
2504	ioc->pllinit	= false;
2505	ioc->dbg_fwsave_once = true;
2506	ioc->iocpf.ioc  = ioc;
2507
2508	bfa_ioc_mbox_attach(ioc);
2509	INIT_LIST_HEAD(&ioc->notify_q);
2510
2511	bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2512	bfa_fsm_send_event(ioc, IOC_E_RESET);
2513}
2514
2515/* Driver detach time IOC cleanup. */
2516void
2517bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2518{
2519	bfa_fsm_send_event(ioc, IOC_E_DETACH);
2520
2521	/* Done with detach, empty the notify_q. */
2522	INIT_LIST_HEAD(&ioc->notify_q);
2523}
2524
2525/**
2526 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2527 *
2528 * @pcidev:	PCI device information for this IOC
2529 */
2530void
2531bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2532		 enum bfi_pcifn_class clscode)
2533{
2534	ioc->clscode	= clscode;
2535	ioc->pcidev	= *pcidev;
2536
2537	/**
2538	 * Initialize IOC and device personality
2539	 */
2540	ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2541	ioc->asic_mode  = BFI_ASIC_MODE_FC;
2542
2543	switch (pcidev->device_id) {
2544	case PCI_DEVICE_ID_BROCADE_CT:
2545		ioc->asic_gen = BFI_ASIC_GEN_CT;
2546		ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2547		ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2548		ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2549		ioc->ad_cap_bm = BFA_CM_CNA;
2550		break;
2551
2552	case BFA_PCI_DEVICE_ID_CT2:
2553		ioc->asic_gen = BFI_ASIC_GEN_CT2;
2554		if (clscode == BFI_PCIFN_CLASS_FC &&
2555			pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2556			ioc->asic_mode  = BFI_ASIC_MODE_FC16;
2557			ioc->fcmode = true;
2558			ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2559			ioc->ad_cap_bm = BFA_CM_HBA;
2560		} else {
2561			ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2562			ioc->asic_mode  = BFI_ASIC_MODE_ETH;
2563			if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2564				ioc->port_mode =
2565				ioc->port_mode_cfg = BFA_MODE_CNA;
2566				ioc->ad_cap_bm = BFA_CM_CNA;
2567			} else {
2568				ioc->port_mode =
2569				ioc->port_mode_cfg = BFA_MODE_NIC;
2570				ioc->ad_cap_bm = BFA_CM_NIC;
2571			}
2572		}
2573		break;
2574
2575	default:
2576		BUG_ON(1);
2577	}
2578
2579	/**
2580	 * Set asic specific interfaces.
2581	 */
2582	if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2583		bfa_nw_ioc_set_ct_hwif(ioc);
2584	else {
2585		WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2586		bfa_nw_ioc_set_ct2_hwif(ioc);
2587		bfa_nw_ioc_ct2_poweron(ioc);
2588	}
2589
2590	bfa_ioc_map_port(ioc);
2591	bfa_ioc_reg_init(ioc);
2592}
2593
2594/**
2595 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2596 *
2597 * @dm_kva:	kernel virtual address of IOC dma memory
2598 * @dm_pa:	physical address of IOC dma memory
2599 */
2600void
2601bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc,  u8 *dm_kva, u64 dm_pa)
2602{
2603	/**
2604	 * dma memory for firmware attribute
2605	 */
2606	ioc->attr_dma.kva = dm_kva;
2607	ioc->attr_dma.pa = dm_pa;
2608	ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2609}
2610
2611/* Return size of dma memory required. */
2612u32
2613bfa_nw_ioc_meminfo(void)
2614{
2615	return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2616}
2617
2618void
2619bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2620{
2621	bfa_ioc_stats(ioc, ioc_enables);
2622	ioc->dbg_fwsave_once = true;
2623
2624	bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2625}
2626
2627void
2628bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2629{
2630	bfa_ioc_stats(ioc, ioc_disables);
2631	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2632}
2633
2634/* Initialize memory for saving firmware trace. */
2635void
2636bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2637{
2638	ioc->dbg_fwsave = dbg_fwsave;
2639	ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2640}
2641
2642static u32
2643bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2644{
2645	return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2646}
2647
2648/* Register mailbox message handler function, to be called by common modules */
2649void
2650bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2651		    bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2652{
2653	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2654
2655	mod->mbhdlr[mc].cbfn	= cbfn;
2656	mod->mbhdlr[mc].cbarg = cbarg;
2657}
2658
2659/**
2660 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2661 *
2662 * @ioc:	IOC instance
2663 * @cmd:	Mailbox command
2664 *
2665 * Waits if mailbox is busy. Responsibility of caller to serialize
2666 */
2667bool
2668bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2669			bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2670{
2671	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2672	u32			stat;
2673
2674	cmd->cbfn = cbfn;
2675	cmd->cbarg = cbarg;
2676
2677	/**
2678	 * If a previous command is pending, queue new command
2679	 */
2680	if (!list_empty(&mod->cmd_q)) {
2681		list_add_tail(&cmd->qe, &mod->cmd_q);
2682		return true;
2683	}
2684
2685	/**
2686	 * If mailbox is busy, queue command for poll timer
2687	 */
2688	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2689	if (stat) {
2690		list_add_tail(&cmd->qe, &mod->cmd_q);
2691		return true;
2692	}
2693
2694	/**
2695	 * mailbox is free -- queue command to firmware
2696	 */
2697	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2698
2699	return false;
2700}
2701
2702/* Handle mailbox interrupts */
2703void
2704bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2705{
2706	struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2707	struct bfi_mbmsg m;
2708	int				mc;
2709
2710	if (bfa_ioc_msgget(ioc, &m)) {
2711		/**
2712		 * Treat IOC message class as special.
2713		 */
2714		mc = m.mh.msg_class;
2715		if (mc == BFI_MC_IOC) {
2716			bfa_ioc_isr(ioc, &m);
2717			return;
2718		}
2719
2720		if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2721			return;
2722
2723		mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2724	}
2725
2726	bfa_ioc_lpu_read_stat(ioc);
2727
2728	/**
2729	 * Try to send pending mailbox commands
2730	 */
2731	bfa_ioc_mbox_poll(ioc);
2732}
2733
2734void
2735bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2736{
2737	bfa_ioc_stats(ioc, ioc_hbfails);
2738	bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2739	bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2740}
2741
2742/* return true if IOC is disabled */
2743bool
2744bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2745{
2746	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2747		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2748}
2749
2750/* return true if IOC is operational */
2751bool
2752bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2753{
2754	return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2755}
2756
2757/* Add to IOC heartbeat failure notification queue. To be used by common
2758 * modules such as cee, port, diag.
2759 */
2760void
2761bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2762			struct bfa_ioc_notify *notify)
2763{
2764	list_add_tail(&notify->qe, &ioc->notify_q);
2765}
2766
2767#define BFA_MFG_NAME "QLogic"
2768static void
2769bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2770			 struct bfa_adapter_attr *ad_attr)
2771{
2772	struct bfi_ioc_attr *ioc_attr;
2773
2774	ioc_attr = ioc->attr;
2775
2776	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2777	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2778	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2779	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2780	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2781		      sizeof(struct bfa_mfg_vpd));
2782
2783	ad_attr->nports = bfa_ioc_get_nports(ioc);
2784	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2785
2786	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2787	/* For now, model descr uses same model string */
2788	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2789
2790	ad_attr->card_type = ioc_attr->card_type;
2791	ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2792
2793	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2794		ad_attr->prototype = 1;
2795	else
2796		ad_attr->prototype = 0;
2797
2798	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2799	ad_attr->mac  = bfa_nw_ioc_get_mac(ioc);
2800
2801	ad_attr->pcie_gen = ioc_attr->pcie_gen;
2802	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2803	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2804	ad_attr->asic_rev = ioc_attr->asic_rev;
2805
2806	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2807}
2808
2809static enum bfa_ioc_type
2810bfa_ioc_get_type(struct bfa_ioc *ioc)
2811{
2812	if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2813		return BFA_IOC_TYPE_LL;
2814
2815	BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2816
2817	return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2818		? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2819}
2820
2821static void
2822bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2823{
2824	memcpy(serial_num,
2825			(void *)ioc->attr->brcd_serialnum,
2826			BFA_ADAPTER_SERIAL_NUM_LEN);
2827}
2828
2829static void
2830bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2831{
2832	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2833}
2834
2835static void
2836bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2837{
2838	BUG_ON(!(chip_rev));
2839
2840	memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2841
2842	chip_rev[0] = 'R';
2843	chip_rev[1] = 'e';
2844	chip_rev[2] = 'v';
2845	chip_rev[3] = '-';
2846	chip_rev[4] = ioc->attr->asic_rev;
2847	chip_rev[5] = '\0';
2848}
2849
2850static void
2851bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2852{
2853	memcpy(optrom_ver, ioc->attr->optrom_version,
2854		      BFA_VERSION_LEN);
2855}
2856
2857static void
2858bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2859{
2860	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2861}
2862
2863static void
2864bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2865{
2866	struct bfi_ioc_attr *ioc_attr;
2867
2868	BUG_ON(!(model));
2869	memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2870
2871	ioc_attr = ioc->attr;
2872
2873	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2874		BFA_MFG_NAME, ioc_attr->card_type);
2875}
2876
2877static enum bfa_ioc_state
2878bfa_ioc_get_state(struct bfa_ioc *ioc)
2879{
2880	enum bfa_iocpf_state iocpf_st;
2881	enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2882
2883	if (ioc_st == BFA_IOC_ENABLING ||
2884		ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2885
2886		iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2887
2888		switch (iocpf_st) {
2889		case BFA_IOCPF_SEMWAIT:
2890			ioc_st = BFA_IOC_SEMWAIT;
2891			break;
2892
2893		case BFA_IOCPF_HWINIT:
2894			ioc_st = BFA_IOC_HWINIT;
2895			break;
2896
2897		case BFA_IOCPF_FWMISMATCH:
2898			ioc_st = BFA_IOC_FWMISMATCH;
2899			break;
2900
2901		case BFA_IOCPF_FAIL:
2902			ioc_st = BFA_IOC_FAIL;
2903			break;
2904
2905		case BFA_IOCPF_INITFAIL:
2906			ioc_st = BFA_IOC_INITFAIL;
2907			break;
2908
2909		default:
2910			break;
2911		}
2912	}
2913	return ioc_st;
2914}
2915
2916void
2917bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2918{
2919	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2920
2921	ioc_attr->state = bfa_ioc_get_state(ioc);
2922	ioc_attr->port_id = bfa_ioc_portid(ioc);
2923	ioc_attr->port_mode = ioc->port_mode;
2924
2925	ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2926	ioc_attr->cap_bm = ioc->ad_cap_bm;
2927
2928	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2929
2930	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2931
2932	ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2933	ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2934	ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2935	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2936}
2937
2938/* WWN public */
2939static u64
2940bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2941{
2942	return ioc->attr->pwwn;
2943}
2944
2945mac_t
2946bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
2947{
2948	return ioc->attr->mac;
2949}
2950
2951/* Firmware failure detected. Start recovery actions. */
2952static void
2953bfa_ioc_recover(struct bfa_ioc *ioc)
2954{
2955	pr_crit("Heart Beat of IOC has failed\n");
2956	bfa_ioc_stats(ioc, ioc_hbfails);
2957	bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2958	bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2959}
2960
2961/* BFA IOC PF private functions */
2962
2963static void
2964bfa_iocpf_enable(struct bfa_ioc *ioc)
2965{
2966	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2967}
2968
2969static void
2970bfa_iocpf_disable(struct bfa_ioc *ioc)
2971{
2972	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2973}
2974
2975static void
2976bfa_iocpf_fail(struct bfa_ioc *ioc)
2977{
2978	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2979}
2980
2981static void
2982bfa_iocpf_initfail(struct bfa_ioc *ioc)
2983{
2984	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2985}
2986
2987static void
2988bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2989{
2990	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2991}
2992
2993static void
2994bfa_iocpf_stop(struct bfa_ioc *ioc)
2995{
2996	bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2997}
2998
2999void
3000bfa_nw_iocpf_timeout(void *ioc_arg)
3001{
3002	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
3003	enum bfa_iocpf_state iocpf_st;
3004
3005	iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
3006
3007	if (iocpf_st == BFA_IOCPF_HWINIT)
3008		bfa_ioc_poll_fwinit(ioc);
3009	else
3010		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3011}
3012
3013void
3014bfa_nw_iocpf_sem_timeout(void *ioc_arg)
3015{
3016	struct bfa_ioc  *ioc = (struct bfa_ioc *) ioc_arg;
3017
3018	bfa_ioc_hw_sem_get(ioc);
3019}
3020
3021static void
3022bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
3023{
3024	u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3025
3026	if (fwstate == BFI_IOC_DISABLED) {
3027		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3028		return;
3029	}
3030
3031	if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3032		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3033	} else {
3034		ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3035		mod_timer(&ioc->iocpf_timer, jiffies +
3036			msecs_to_jiffies(BFA_IOC_POLL_TOV));
3037	}
3038}
3039
3040/*
3041 *	Flash module specific
3042 */
3043
3044/*
3045 * FLASH DMA buffer should be big enough to hold both MFG block and
3046 * asic block(64k) at the same time and also should be 2k aligned to
3047 * avoid write segement to cross sector boundary.
3048 */
3049#define BFA_FLASH_SEG_SZ	2048
3050#define BFA_FLASH_DMA_BUF_SZ	\
3051	roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3052
3053static void
3054bfa_flash_cb(struct bfa_flash *flash)
3055{
3056	flash->op_busy = 0;
3057	if (flash->cbfn)
3058		flash->cbfn(flash->cbarg, flash->status);
3059}
3060
3061static void
3062bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
3063{
3064	struct bfa_flash *flash = cbarg;
3065
3066	switch (event) {
3067	case BFA_IOC_E_DISABLED:
3068	case BFA_IOC_E_FAILED:
3069		if (flash->op_busy) {
3070			flash->status = BFA_STATUS_IOC_FAILURE;
3071			flash->cbfn(flash->cbarg, flash->status);
3072			flash->op_busy = 0;
3073		}
3074		break;
3075	default:
3076		break;
3077	}
3078}
3079
3080/*
3081 * Send flash write request.
3082 */
3083static void
3084bfa_flash_write_send(struct bfa_flash *flash)
3085{
3086	struct bfi_flash_write_req *msg =
3087			(struct bfi_flash_write_req *) flash->mb.msg;
3088	u32	len;
3089
3090	msg->type = be32_to_cpu(flash->type);
3091	msg->instance = flash->instance;
3092	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3093	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3094	       flash->residue : BFA_FLASH_DMA_BUF_SZ;
3095	msg->length = be32_to_cpu(len);
3096
3097	/* indicate if it's the last msg of the whole write operation */
3098	msg->last = (len == flash->residue) ? 1 : 0;
3099
3100	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3101		    bfa_ioc_portid(flash->ioc));
3102	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3103	memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3104	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3105
3106	flash->residue -= len;
3107	flash->offset += len;
3108}
3109
3110/**
3111 * bfa_flash_read_send - Send flash read request.
3112 *
3113 * @cbarg: callback argument
3114 */
3115static void
3116bfa_flash_read_send(void *cbarg)
3117{
3118	struct bfa_flash *flash = cbarg;
3119	struct bfi_flash_read_req *msg =
3120			(struct bfi_flash_read_req *) flash->mb.msg;
3121	u32	len;
3122
3123	msg->type = be32_to_cpu(flash->type);
3124	msg->instance = flash->instance;
3125	msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3126	len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3127	       flash->residue : BFA_FLASH_DMA_BUF_SZ;
3128	msg->length = be32_to_cpu(len);
3129	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3130		    bfa_ioc_portid(flash->ioc));
3131	bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3132	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3133}
3134
3135/**
3136 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3137 *
3138 * @flasharg: flash structure
3139 * @msg: message structure
3140 */
3141static void
3142bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
3143{
3144	struct bfa_flash *flash = flasharg;
3145	u32	status;
3146
3147	union {
3148		struct bfi_flash_query_rsp *query;
3149		struct bfi_flash_write_rsp *write;
3150		struct bfi_flash_read_rsp *read;
3151		struct bfi_mbmsg   *msg;
3152	} m;
3153
3154	m.msg = msg;
3155
3156	/* receiving response after ioc failure */
3157	if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
3158		return;
3159
3160	switch (msg->mh.msg_id) {
3161	case BFI_FLASH_I2H_QUERY_RSP:
3162		status = be32_to_cpu(m.query->status);
3163		if (status == BFA_STATUS_OK) {
3164			u32	i;
3165			struct bfa_flash_attr *attr, *f;
3166
3167			attr = (struct bfa_flash_attr *) flash->ubuf;
3168			f = (struct bfa_flash_attr *) flash->dbuf_kva;
3169			attr->status = be32_to_cpu(f->status);
3170			attr->npart = be32_to_cpu(f->npart);
3171			for (i = 0; i < attr->npart; i++) {
3172				attr->part[i].part_type =
3173					be32_to_cpu(f->part[i].part_type);
3174				attr->part[i].part_instance =
3175					be32_to_cpu(f->part[i].part_instance);
3176				attr->part[i].part_off =
3177					be32_to_cpu(f->part[i].part_off);
3178				attr->part[i].part_size =
3179					be32_to_cpu(f->part[i].part_size);
3180				attr->part[i].part_len =
3181					be32_to_cpu(f->part[i].part_len);
3182				attr->part[i].part_status =
3183					be32_to_cpu(f->part[i].part_status);
3184			}
3185		}
3186		flash->status = status;
3187		bfa_flash_cb(flash);
3188		break;
3189	case BFI_FLASH_I2H_WRITE_RSP:
3190		status = be32_to_cpu(m.write->status);
3191		if (status != BFA_STATUS_OK || flash->residue == 0) {
3192			flash->status = status;
3193			bfa_flash_cb(flash);
3194		} else
3195			bfa_flash_write_send(flash);
3196		break;
3197	case BFI_FLASH_I2H_READ_RSP:
3198		status = be32_to_cpu(m.read->status);
3199		if (status != BFA_STATUS_OK) {
3200			flash->status = status;
3201			bfa_flash_cb(flash);
3202		} else {
3203			u32 len = be32_to_cpu(m.read->length);
3204			memcpy(flash->ubuf + flash->offset,
3205			       flash->dbuf_kva, len);
3206			flash->residue -= len;
3207			flash->offset += len;
3208			if (flash->residue == 0) {
3209				flash->status = status;
3210				bfa_flash_cb(flash);
3211			} else
3212				bfa_flash_read_send(flash);
3213		}
3214		break;
3215	case BFI_FLASH_I2H_BOOT_VER_RSP:
3216	case BFI_FLASH_I2H_EVENT:
3217		break;
3218	default:
3219		WARN_ON(1);
3220	}
3221}
3222
3223/*
3224 * Flash memory info API.
3225 */
3226u32
3227bfa_nw_flash_meminfo(void)
3228{
3229	return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3230}
3231
3232/**
3233 * bfa_nw_flash_attach - Flash attach API.
3234 *
3235 * @flash: flash structure
3236 * @ioc: ioc structure
3237 * @dev: device structure
3238 */
3239void
3240bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
3241{
3242	flash->ioc = ioc;
3243	flash->cbfn = NULL;
3244	flash->cbarg = NULL;
3245	flash->op_busy = 0;
3246
3247	bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
3248	bfa_q_qe_init(&flash->ioc_notify);
3249	bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
3250	list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
3251}
3252
3253/**
3254 * bfa_nw_flash_memclaim - Claim memory for flash
3255 *
3256 * @flash: flash structure
3257 * @dm_kva: pointer to virtual memory address
3258 * @dm_pa: physical memory address
3259 */
3260void
3261bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
3262{
3263	flash->dbuf_kva = dm_kva;
3264	flash->dbuf_pa = dm_pa;
3265	memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
3266	dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3267	dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3268}
3269
3270/**
3271 * bfa_nw_flash_get_attr - Get flash attribute.
3272 *
3273 * @flash: flash structure
3274 * @attr: flash attribute structure
3275 * @cbfn: callback function
3276 * @cbarg: callback argument
3277 *
3278 * Return status.
3279 */
3280enum bfa_status
3281bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
3282		      bfa_cb_flash cbfn, void *cbarg)
3283{
3284	struct bfi_flash_query_req *msg =
3285			(struct bfi_flash_query_req *) flash->mb.msg;
3286
3287	if (!bfa_nw_ioc_is_operational(flash->ioc))
3288		return BFA_STATUS_IOC_NON_OP;
3289
3290	if (flash->op_busy)
3291		return BFA_STATUS_DEVBUSY;
3292
3293	flash->op_busy = 1;
3294	flash->cbfn = cbfn;
3295	flash->cbarg = cbarg;
3296	flash->ubuf = (u8 *) attr;
3297
3298	bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3299		    bfa_ioc_portid(flash->ioc));
3300	bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
3301	bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3302
3303	return BFA_STATUS_OK;
3304}
3305
3306/**
3307 * bfa_nw_flash_update_part - Update flash partition.
3308 *
3309 * @flash: flash structure
3310 * @type: flash partition type
3311 * @instance: flash partition instance
3312 * @buf: update data buffer
3313 * @len: data buffer length
3314 * @offset: offset relative to the partition starting address
3315 * @cbfn: callback function
3316 * @cbarg: callback argument
3317 *
3318 * Return status.
3319 */
3320enum bfa_status
3321bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
3322			 void *buf, u32 len, u32 offset,
3323			 bfa_cb_flash cbfn, void *cbarg)
3324{
3325	if (!bfa_nw_ioc_is_operational(flash->ioc))
3326		return BFA_STATUS_IOC_NON_OP;
3327
3328	/*
3329	 * 'len' must be in word (4-byte) boundary
3330	 */
3331	if (!len || (len & 0x03))
3332		return BFA_STATUS_FLASH_BAD_LEN;
3333
3334	if (type == BFA_FLASH_PART_MFG)
3335		return BFA_STATUS_EINVAL;
3336
3337	if (flash->op_busy)
3338		return BFA_STATUS_DEVBUSY;
3339
3340	flash->op_busy = 1;
3341	flash->cbfn = cbfn;
3342	flash->cbarg = cbarg;
3343	flash->type = type;
3344	flash->instance = instance;
3345	flash->residue = len;
3346	flash->offset = 0;
3347	flash->addr_off = offset;
3348	flash->ubuf = buf;
3349
3350	bfa_flash_write_send(flash);
3351
3352	return BFA_STATUS_OK;
3353}
3354
3355/**
3356 * bfa_nw_flash_read_part - Read flash partition.
3357 *
3358 * @flash: flash structure
3359 * @type: flash partition type
3360 * @instance: flash partition instance
3361 * @buf: read data buffer
3362 * @len: data buffer length
3363 * @offset: offset relative to the partition starting address
3364 * @cbfn: callback function
3365 * @cbarg: callback argument
3366 *
3367 * Return status.
3368 */
3369enum bfa_status
3370bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
3371		       void *buf, u32 len, u32 offset,
3372		       bfa_cb_flash cbfn, void *cbarg)
3373{
3374	if (!bfa_nw_ioc_is_operational(flash->ioc))
3375		return BFA_STATUS_IOC_NON_OP;
3376
3377	/*
3378	 * 'len' must be in word (4-byte) boundary
3379	 */
3380	if (!len || (len & 0x03))
3381		return BFA_STATUS_FLASH_BAD_LEN;
3382
3383	if (flash->op_busy)
3384		return BFA_STATUS_DEVBUSY;
3385
3386	flash->op_busy = 1;
3387	flash->cbfn = cbfn;
3388	flash->cbarg = cbarg;
3389	flash->type = type;
3390	flash->instance = instance;
3391	flash->residue = len;
3392	flash->offset = 0;
3393	flash->addr_off = offset;
3394	flash->ubuf = buf;
3395
3396	bfa_flash_read_send(flash);
3397
3398	return BFA_STATUS_OK;
3399}
3400