1/*
2 * Linux network driver for QLogic BR-series Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
16 * All rights reserved
17 * www.qlogic.com
18 */
19#include "bna.h"
20
21static inline int
22ethport_can_be_up(struct bna_ethport *ethport)
23{
24	int ready = 0;
25	if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
26		ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
27			 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
28			 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
29	else
30		ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
31			 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
32			 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
33	return ready;
34}
35
36#define ethport_is_up ethport_can_be_up
37
38enum bna_ethport_event {
39	ETHPORT_E_START			= 1,
40	ETHPORT_E_STOP			= 2,
41	ETHPORT_E_FAIL			= 3,
42	ETHPORT_E_UP			= 4,
43	ETHPORT_E_DOWN			= 5,
44	ETHPORT_E_FWRESP_UP_OK		= 6,
45	ETHPORT_E_FWRESP_DOWN		= 7,
46	ETHPORT_E_FWRESP_UP_FAIL	= 8,
47};
48
49enum bna_enet_event {
50	ENET_E_START			= 1,
51	ENET_E_STOP			= 2,
52	ENET_E_FAIL			= 3,
53	ENET_E_PAUSE_CFG		= 4,
54	ENET_E_MTU_CFG			= 5,
55	ENET_E_FWRESP_PAUSE		= 6,
56	ENET_E_CHLD_STOPPED		= 7,
57};
58
59enum bna_ioceth_event {
60	IOCETH_E_ENABLE			= 1,
61	IOCETH_E_DISABLE		= 2,
62	IOCETH_E_IOC_RESET		= 3,
63	IOCETH_E_IOC_FAILED		= 4,
64	IOCETH_E_IOC_READY		= 5,
65	IOCETH_E_ENET_ATTR_RESP		= 6,
66	IOCETH_E_ENET_STOPPED		= 7,
67	IOCETH_E_IOC_DISABLED		= 8,
68};
69
70#define bna_stats_copy(_name, _type)					\
71do {									\
72	count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64);	\
73	stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats;	\
74	stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats;	\
75	for (i = 0; i < count; i++)					\
76		stats_dst[i] = be64_to_cpu(stats_src[i]);		\
77} while (0)								\
78
79/*
80 * FW response handlers
81 */
82
83static void
84bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
85				struct bfi_msgq_mhdr *msghdr)
86{
87	ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
88
89	if (ethport_can_be_up(ethport))
90		bfa_fsm_send_event(ethport, ETHPORT_E_UP);
91}
92
93static void
94bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
95				struct bfi_msgq_mhdr *msghdr)
96{
97	int ethport_up = ethport_is_up(ethport);
98
99	ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
100
101	if (ethport_up)
102		bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
103}
104
105static void
106bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
107				struct bfi_msgq_mhdr *msghdr)
108{
109	struct bfi_enet_enable_req *admin_req =
110		&ethport->bfi_enet_cmd.admin_req;
111	struct bfi_enet_rsp *rsp =
112		container_of(msghdr, struct bfi_enet_rsp, mh);
113
114	switch (admin_req->enable) {
115	case BNA_STATUS_T_ENABLED:
116		if (rsp->error == BFI_ENET_CMD_OK)
117			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
118		else {
119			ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
120			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
121		}
122		break;
123
124	case BNA_STATUS_T_DISABLED:
125		bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
126		ethport->link_status = BNA_LINK_DOWN;
127		ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
128		break;
129	}
130}
131
132static void
133bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
134				struct bfi_msgq_mhdr *msghdr)
135{
136	struct bfi_enet_diag_lb_req *diag_lb_req =
137		&ethport->bfi_enet_cmd.lpbk_req;
138	struct bfi_enet_rsp *rsp =
139		container_of(msghdr, struct bfi_enet_rsp, mh);
140
141	switch (diag_lb_req->enable) {
142	case BNA_STATUS_T_ENABLED:
143		if (rsp->error == BFI_ENET_CMD_OK)
144			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
145		else {
146			ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
147			bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
148		}
149		break;
150
151	case BNA_STATUS_T_DISABLED:
152		bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
153		break;
154	}
155}
156
157static void
158bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
159{
160	bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
161}
162
163static void
164bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
165			struct bfi_msgq_mhdr *msghdr)
166{
167	struct bfi_enet_attr_rsp *rsp =
168		container_of(msghdr, struct bfi_enet_attr_rsp, mh);
169
170	/**
171	 * Store only if not set earlier, since BNAD can override the HW
172	 * attributes
173	 */
174	if (!ioceth->attr.fw_query_complete) {
175		ioceth->attr.num_txq = ntohl(rsp->max_cfg);
176		ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
177		ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
178		ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
179		ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
180		ioceth->attr.fw_query_complete = true;
181	}
182
183	bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
184}
185
186static void
187bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
188{
189	struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
190	u64 *stats_src;
191	u64 *stats_dst;
192	u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
193	u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
194	int count;
195	int i;
196
197	bna_stats_copy(mac, mac);
198	bna_stats_copy(bpc, bpc);
199	bna_stats_copy(rad, rad);
200	bna_stats_copy(rlb, rad);
201	bna_stats_copy(fc_rx, fc_rx);
202	bna_stats_copy(fc_tx, fc_tx);
203
204	stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
205
206	/* Copy Rxf stats to SW area, scatter them while copying */
207	for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
208		stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
209		memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
210		if (rx_enet_mask & ((u32)(1 << i))) {
211			int k;
212			count = sizeof(struct bfi_enet_stats_rxf) /
213				sizeof(u64);
214			for (k = 0; k < count; k++) {
215				stats_dst[k] = be64_to_cpu(*stats_src);
216				stats_src++;
217			}
218		}
219	}
220
221	/* Copy Txf stats to SW area, scatter them while copying */
222	for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
223		stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
224		memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
225		if (tx_enet_mask & ((u32)(1 << i))) {
226			int k;
227			count = sizeof(struct bfi_enet_stats_txf) /
228				sizeof(u64);
229			for (k = 0; k < count; k++) {
230				stats_dst[k] = be64_to_cpu(*stats_src);
231				stats_src++;
232			}
233		}
234	}
235
236	bna->stats_mod.stats_get_busy = false;
237	bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
238}
239
240static void
241bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
242			struct bfi_msgq_mhdr *msghdr)
243{
244	ethport->link_status = BNA_LINK_UP;
245
246	/* Dispatch events */
247	ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
248}
249
250static void
251bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
252				struct bfi_msgq_mhdr *msghdr)
253{
254	ethport->link_status = BNA_LINK_DOWN;
255
256	/* Dispatch events */
257	ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
258}
259
260static void
261bna_err_handler(struct bna *bna, u32 intr_status)
262{
263	if (BNA_IS_HALT_INTR(bna, intr_status))
264		bna_halt_clear(bna);
265
266	bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
267}
268
269void
270bna_mbox_handler(struct bna *bna, u32 intr_status)
271{
272	if (BNA_IS_ERR_INTR(bna, intr_status)) {
273		bna_err_handler(bna, intr_status);
274		return;
275	}
276	if (BNA_IS_MBOX_INTR(bna, intr_status))
277		bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
278}
279
280static void
281bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
282{
283	struct bna *bna = (struct bna *)arg;
284	struct bna_tx *tx;
285	struct bna_rx *rx;
286
287	switch (msghdr->msg_id) {
288	case BFI_ENET_I2H_RX_CFG_SET_RSP:
289		bna_rx_from_rid(bna, msghdr->enet_id, rx);
290		if (rx)
291			bna_bfi_rx_enet_start_rsp(rx, msghdr);
292		break;
293
294	case BFI_ENET_I2H_RX_CFG_CLR_RSP:
295		bna_rx_from_rid(bna, msghdr->enet_id, rx);
296		if (rx)
297			bna_bfi_rx_enet_stop_rsp(rx, msghdr);
298		break;
299
300	case BFI_ENET_I2H_RIT_CFG_RSP:
301	case BFI_ENET_I2H_RSS_CFG_RSP:
302	case BFI_ENET_I2H_RSS_ENABLE_RSP:
303	case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
304	case BFI_ENET_I2H_RX_DEFAULT_RSP:
305	case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
306	case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
307	case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
308	case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
309	case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
310	case BFI_ENET_I2H_RX_VLAN_SET_RSP:
311	case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
312		bna_rx_from_rid(bna, msghdr->enet_id, rx);
313		if (rx)
314			bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
315		break;
316
317	case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
318		bna_rx_from_rid(bna, msghdr->enet_id, rx);
319		if (rx)
320			bna_bfi_rxf_ucast_set_rsp(&rx->rxf, msghdr);
321		break;
322
323	case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
324		bna_rx_from_rid(bna, msghdr->enet_id, rx);
325		if (rx)
326			bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
327		break;
328
329	case BFI_ENET_I2H_TX_CFG_SET_RSP:
330		bna_tx_from_rid(bna, msghdr->enet_id, tx);
331		if (tx)
332			bna_bfi_tx_enet_start_rsp(tx, msghdr);
333		break;
334
335	case BFI_ENET_I2H_TX_CFG_CLR_RSP:
336		bna_tx_from_rid(bna, msghdr->enet_id, tx);
337		if (tx)
338			bna_bfi_tx_enet_stop_rsp(tx, msghdr);
339		break;
340
341	case BFI_ENET_I2H_PORT_ADMIN_RSP:
342		bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
343		break;
344
345	case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
346		bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
347		break;
348
349	case BFI_ENET_I2H_SET_PAUSE_RSP:
350		bna_bfi_pause_set_rsp(&bna->enet, msghdr);
351		break;
352
353	case BFI_ENET_I2H_GET_ATTR_RSP:
354		bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
355		break;
356
357	case BFI_ENET_I2H_STATS_GET_RSP:
358		bna_bfi_stats_get_rsp(bna, msghdr);
359		break;
360
361	case BFI_ENET_I2H_STATS_CLR_RSP:
362		/* No-op */
363		break;
364
365	case BFI_ENET_I2H_LINK_UP_AEN:
366		bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
367		break;
368
369	case BFI_ENET_I2H_LINK_DOWN_AEN:
370		bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
371		break;
372
373	case BFI_ENET_I2H_PORT_ENABLE_AEN:
374		bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
375		break;
376
377	case BFI_ENET_I2H_PORT_DISABLE_AEN:
378		bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
379		break;
380
381	case BFI_ENET_I2H_BW_UPDATE_AEN:
382		bna_bfi_bw_update_aen(&bna->tx_mod);
383		break;
384
385	default:
386		break;
387	}
388}
389
390/* ETHPORT */
391
392#define call_ethport_stop_cbfn(_ethport)				\
393do {									\
394	if ((_ethport)->stop_cbfn) {					\
395		void (*cbfn)(struct bna_enet *);			\
396		cbfn = (_ethport)->stop_cbfn;				\
397		(_ethport)->stop_cbfn = NULL;				\
398		cbfn(&(_ethport)->bna->enet);				\
399	}								\
400} while (0)
401
402#define call_ethport_adminup_cbfn(ethport, status)			\
403do {									\
404	if ((ethport)->adminup_cbfn) {					\
405		void (*cbfn)(struct bnad *, enum bna_cb_status);	\
406		cbfn = (ethport)->adminup_cbfn;				\
407		(ethport)->adminup_cbfn = NULL;				\
408		cbfn((ethport)->bna->bnad, status);			\
409	}								\
410} while (0)
411
412static void
413bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
414{
415	struct bfi_enet_enable_req *admin_up_req =
416		&ethport->bfi_enet_cmd.admin_req;
417
418	bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
419		BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
420	admin_up_req->mh.num_entries = htons(
421		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
422	admin_up_req->enable = BNA_STATUS_T_ENABLED;
423
424	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
425		sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
426	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
427}
428
429static void
430bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
431{
432	struct bfi_enet_enable_req *admin_down_req =
433		&ethport->bfi_enet_cmd.admin_req;
434
435	bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
436		BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
437	admin_down_req->mh.num_entries = htons(
438		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
439	admin_down_req->enable = BNA_STATUS_T_DISABLED;
440
441	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
442		sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
443	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
444}
445
446static void
447bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
448{
449	struct bfi_enet_diag_lb_req *lpbk_up_req =
450		&ethport->bfi_enet_cmd.lpbk_req;
451
452	bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
453		BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
454	lpbk_up_req->mh.num_entries = htons(
455		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
456	lpbk_up_req->mode = (ethport->bna->enet.type ==
457				BNA_ENET_T_LOOPBACK_INTERNAL) ?
458				BFI_ENET_DIAG_LB_OPMODE_EXT :
459				BFI_ENET_DIAG_LB_OPMODE_CBL;
460	lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
461
462	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
463		sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
464	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
465}
466
467static void
468bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
469{
470	struct bfi_enet_diag_lb_req *lpbk_down_req =
471		&ethport->bfi_enet_cmd.lpbk_req;
472
473	bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
474		BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
475	lpbk_down_req->mh.num_entries = htons(
476		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
477	lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
478
479	bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
480		sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
481	bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
482}
483
484static void
485bna_bfi_ethport_up(struct bna_ethport *ethport)
486{
487	if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488		bna_bfi_ethport_admin_up(ethport);
489	else
490		bna_bfi_ethport_lpbk_up(ethport);
491}
492
493static void
494bna_bfi_ethport_down(struct bna_ethport *ethport)
495{
496	if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
497		bna_bfi_ethport_admin_down(ethport);
498	else
499		bna_bfi_ethport_lpbk_down(ethport);
500}
501
502bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
503			enum bna_ethport_event);
504bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
505			enum bna_ethport_event);
506bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
507			enum bna_ethport_event);
508bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
509			enum bna_ethport_event);
510bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
511			enum bna_ethport_event);
512bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
513			enum bna_ethport_event);
514
515static void
516bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
517{
518	call_ethport_stop_cbfn(ethport);
519}
520
521static void
522bna_ethport_sm_stopped(struct bna_ethport *ethport,
523			enum bna_ethport_event event)
524{
525	switch (event) {
526	case ETHPORT_E_START:
527		bfa_fsm_set_state(ethport, bna_ethport_sm_down);
528		break;
529
530	case ETHPORT_E_STOP:
531		call_ethport_stop_cbfn(ethport);
532		break;
533
534	case ETHPORT_E_FAIL:
535		/* No-op */
536		break;
537
538	case ETHPORT_E_DOWN:
539		/* This event is received due to Rx objects failing */
540		/* No-op */
541		break;
542
543	default:
544		bfa_sm_fault(event);
545	}
546}
547
548static void
549bna_ethport_sm_down_entry(struct bna_ethport *ethport)
550{
551}
552
553static void
554bna_ethport_sm_down(struct bna_ethport *ethport,
555			enum bna_ethport_event event)
556{
557	switch (event) {
558	case ETHPORT_E_STOP:
559		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
560		break;
561
562	case ETHPORT_E_FAIL:
563		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
564		break;
565
566	case ETHPORT_E_UP:
567		bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
568		bna_bfi_ethport_up(ethport);
569		break;
570
571	default:
572		bfa_sm_fault(event);
573	}
574}
575
576static void
577bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
578{
579}
580
581static void
582bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
583			enum bna_ethport_event event)
584{
585	switch (event) {
586	case ETHPORT_E_STOP:
587		bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
588		break;
589
590	case ETHPORT_E_FAIL:
591		call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
592		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
593		break;
594
595	case ETHPORT_E_DOWN:
596		call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
597		bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
598		break;
599
600	case ETHPORT_E_FWRESP_UP_OK:
601		call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
602		bfa_fsm_set_state(ethport, bna_ethport_sm_up);
603		break;
604
605	case ETHPORT_E_FWRESP_UP_FAIL:
606		call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
607		bfa_fsm_set_state(ethport, bna_ethport_sm_down);
608		break;
609
610	case ETHPORT_E_FWRESP_DOWN:
611		/* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
612		bna_bfi_ethport_up(ethport);
613		break;
614
615	default:
616		bfa_sm_fault(event);
617	}
618}
619
620static void
621bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
622{
623	/**
624	 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
625	 * mbox due to up_resp_wait -> down_resp_wait transition on event
626	 * ETHPORT_E_DOWN
627	 */
628}
629
630static void
631bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
632			enum bna_ethport_event event)
633{
634	switch (event) {
635	case ETHPORT_E_STOP:
636		bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
637		break;
638
639	case ETHPORT_E_FAIL:
640		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
641		break;
642
643	case ETHPORT_E_UP:
644		bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
645		break;
646
647	case ETHPORT_E_FWRESP_UP_OK:
648		/* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
649		bna_bfi_ethport_down(ethport);
650		break;
651
652	case ETHPORT_E_FWRESP_UP_FAIL:
653	case ETHPORT_E_FWRESP_DOWN:
654		bfa_fsm_set_state(ethport, bna_ethport_sm_down);
655		break;
656
657	default:
658		bfa_sm_fault(event);
659	}
660}
661
662static void
663bna_ethport_sm_up_entry(struct bna_ethport *ethport)
664{
665}
666
667static void
668bna_ethport_sm_up(struct bna_ethport *ethport,
669			enum bna_ethport_event event)
670{
671	switch (event) {
672	case ETHPORT_E_STOP:
673		bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
674		bna_bfi_ethport_down(ethport);
675		break;
676
677	case ETHPORT_E_FAIL:
678		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
679		break;
680
681	case ETHPORT_E_DOWN:
682		bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
683		bna_bfi_ethport_down(ethport);
684		break;
685
686	default:
687		bfa_sm_fault(event);
688	}
689}
690
691static void
692bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
693{
694}
695
696static void
697bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
698			enum bna_ethport_event event)
699{
700	switch (event) {
701	case ETHPORT_E_FAIL:
702		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
703		break;
704
705	case ETHPORT_E_DOWN:
706		/**
707		 * This event is received due to Rx objects stopping in
708		 * parallel to ethport
709		 */
710		/* No-op */
711		break;
712
713	case ETHPORT_E_FWRESP_UP_OK:
714		/* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
715		bna_bfi_ethport_down(ethport);
716		break;
717
718	case ETHPORT_E_FWRESP_UP_FAIL:
719	case ETHPORT_E_FWRESP_DOWN:
720		bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
721		break;
722
723	default:
724		bfa_sm_fault(event);
725	}
726}
727
728static void
729bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
730{
731	ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
732	ethport->bna = bna;
733
734	ethport->link_status = BNA_LINK_DOWN;
735	ethport->link_cbfn = bnad_cb_ethport_link_status;
736
737	ethport->rx_started_count = 0;
738
739	ethport->stop_cbfn = NULL;
740	ethport->adminup_cbfn = NULL;
741
742	bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
743}
744
745static void
746bna_ethport_uninit(struct bna_ethport *ethport)
747{
748	ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
749	ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
750
751	ethport->bna = NULL;
752}
753
754static void
755bna_ethport_start(struct bna_ethport *ethport)
756{
757	bfa_fsm_send_event(ethport, ETHPORT_E_START);
758}
759
760static void
761bna_enet_cb_ethport_stopped(struct bna_enet *enet)
762{
763	bfa_wc_down(&enet->chld_stop_wc);
764}
765
766static void
767bna_ethport_stop(struct bna_ethport *ethport)
768{
769	ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
770	bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
771}
772
773static void
774bna_ethport_fail(struct bna_ethport *ethport)
775{
776	/* Reset the physical port status to enabled */
777	ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
778
779	if (ethport->link_status != BNA_LINK_DOWN) {
780		ethport->link_status = BNA_LINK_DOWN;
781		ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
782	}
783	bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
784}
785
786/* Should be called only when ethport is disabled */
787void
788bna_ethport_cb_rx_started(struct bna_ethport *ethport)
789{
790	ethport->rx_started_count++;
791
792	if (ethport->rx_started_count == 1) {
793		ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
794
795		if (ethport_can_be_up(ethport))
796			bfa_fsm_send_event(ethport, ETHPORT_E_UP);
797	}
798}
799
800void
801bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
802{
803	int ethport_up = ethport_is_up(ethport);
804
805	ethport->rx_started_count--;
806
807	if (ethport->rx_started_count == 0) {
808		ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
809
810		if (ethport_up)
811			bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
812	}
813}
814
815/* ENET */
816
817#define bna_enet_chld_start(enet)					\
818do {									\
819	enum bna_tx_type tx_type =					\
820		((enet)->type == BNA_ENET_T_REGULAR) ?			\
821		BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;			\
822	enum bna_rx_type rx_type =					\
823		((enet)->type == BNA_ENET_T_REGULAR) ?			\
824		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
825	bna_ethport_start(&(enet)->bna->ethport);			\
826	bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type);		\
827	bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);		\
828} while (0)
829
830#define bna_enet_chld_stop(enet)					\
831do {									\
832	enum bna_tx_type tx_type =					\
833		((enet)->type == BNA_ENET_T_REGULAR) ?			\
834		BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK;			\
835	enum bna_rx_type rx_type =					\
836		((enet)->type == BNA_ENET_T_REGULAR) ?			\
837		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
838	bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
839	bfa_wc_up(&(enet)->chld_stop_wc);				\
840	bna_ethport_stop(&(enet)->bna->ethport);			\
841	bfa_wc_up(&(enet)->chld_stop_wc);				\
842	bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type);			\
843	bfa_wc_up(&(enet)->chld_stop_wc);				\
844	bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);			\
845	bfa_wc_wait(&(enet)->chld_stop_wc);				\
846} while (0)
847
848#define bna_enet_chld_fail(enet)					\
849do {									\
850	bna_ethport_fail(&(enet)->bna->ethport);			\
851	bna_tx_mod_fail(&(enet)->bna->tx_mod);				\
852	bna_rx_mod_fail(&(enet)->bna->rx_mod);				\
853} while (0)
854
855#define bna_enet_rx_start(enet)						\
856do {									\
857	enum bna_rx_type rx_type =					\
858		((enet)->type == BNA_ENET_T_REGULAR) ?			\
859		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
860	bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type);		\
861} while (0)
862
863#define bna_enet_rx_stop(enet)						\
864do {									\
865	enum bna_rx_type rx_type =					\
866		((enet)->type == BNA_ENET_T_REGULAR) ?			\
867		BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK;			\
868	bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
869	bfa_wc_up(&(enet)->chld_stop_wc);				\
870	bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type);			\
871	bfa_wc_wait(&(enet)->chld_stop_wc);				\
872} while (0)
873
874#define call_enet_stop_cbfn(enet)					\
875do {									\
876	if ((enet)->stop_cbfn) {					\
877		void (*cbfn)(void *);					\
878		void *cbarg;						\
879		cbfn = (enet)->stop_cbfn;				\
880		cbarg = (enet)->stop_cbarg;				\
881		(enet)->stop_cbfn = NULL;				\
882		(enet)->stop_cbarg = NULL;				\
883		cbfn(cbarg);						\
884	}								\
885} while (0)
886
887#define call_enet_pause_cbfn(enet)					\
888do {									\
889	if ((enet)->pause_cbfn) {					\
890		void (*cbfn)(struct bnad *);				\
891		cbfn = (enet)->pause_cbfn;				\
892		(enet)->pause_cbfn = NULL;				\
893		cbfn((enet)->bna->bnad);				\
894	}								\
895} while (0)
896
897#define call_enet_mtu_cbfn(enet)					\
898do {									\
899	if ((enet)->mtu_cbfn) {						\
900		void (*cbfn)(struct bnad *);				\
901		cbfn = (enet)->mtu_cbfn;				\
902		(enet)->mtu_cbfn = NULL;				\
903		cbfn((enet)->bna->bnad);				\
904	}								\
905} while (0)
906
907static void bna_enet_cb_chld_stopped(void *arg);
908static void bna_bfi_pause_set(struct bna_enet *enet);
909
910bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
911			enum bna_enet_event);
912bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
913			enum bna_enet_event);
914bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
915			enum bna_enet_event);
916bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
917			enum bna_enet_event);
918bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
919			enum bna_enet_event);
920bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
921			enum bna_enet_event);
922bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
923			enum bna_enet_event);
924
925static void
926bna_enet_sm_stopped_entry(struct bna_enet *enet)
927{
928	call_enet_pause_cbfn(enet);
929	call_enet_mtu_cbfn(enet);
930	call_enet_stop_cbfn(enet);
931}
932
933static void
934bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
935{
936	switch (event) {
937	case ENET_E_START:
938		bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
939		break;
940
941	case ENET_E_STOP:
942		call_enet_stop_cbfn(enet);
943		break;
944
945	case ENET_E_FAIL:
946		/* No-op */
947		break;
948
949	case ENET_E_PAUSE_CFG:
950		call_enet_pause_cbfn(enet);
951		break;
952
953	case ENET_E_MTU_CFG:
954		call_enet_mtu_cbfn(enet);
955		break;
956
957	case ENET_E_CHLD_STOPPED:
958		/**
959		 * This event is received due to Ethport, Tx and Rx objects
960		 * failing
961		 */
962		/* No-op */
963		break;
964
965	default:
966		bfa_sm_fault(event);
967	}
968}
969
970static void
971bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
972{
973	bna_bfi_pause_set(enet);
974}
975
976static void
977bna_enet_sm_pause_init_wait(struct bna_enet *enet,
978				enum bna_enet_event event)
979{
980	switch (event) {
981	case ENET_E_STOP:
982		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
983		bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
984		break;
985
986	case ENET_E_FAIL:
987		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
988		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
989		break;
990
991	case ENET_E_PAUSE_CFG:
992		enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
993		break;
994
995	case ENET_E_MTU_CFG:
996		/* No-op */
997		break;
998
999	case ENET_E_FWRESP_PAUSE:
1000		if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1001			enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1002			bna_bfi_pause_set(enet);
1003		} else {
1004			bfa_fsm_set_state(enet, bna_enet_sm_started);
1005			bna_enet_chld_start(enet);
1006		}
1007		break;
1008
1009	default:
1010		bfa_sm_fault(event);
1011	}
1012}
1013
1014static void
1015bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1016{
1017	enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1018}
1019
1020static void
1021bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1022				enum bna_enet_event event)
1023{
1024	switch (event) {
1025	case ENET_E_FAIL:
1026	case ENET_E_FWRESP_PAUSE:
1027		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1028		break;
1029
1030	default:
1031		bfa_sm_fault(event);
1032	}
1033}
1034
1035static void
1036bna_enet_sm_started_entry(struct bna_enet *enet)
1037{
1038	/**
1039	 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1040	 * inadvertently called during cfg_wait->started transition as well
1041	 */
1042	call_enet_pause_cbfn(enet);
1043	call_enet_mtu_cbfn(enet);
1044}
1045
1046static void
1047bna_enet_sm_started(struct bna_enet *enet,
1048			enum bna_enet_event event)
1049{
1050	switch (event) {
1051	case ENET_E_STOP:
1052		bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1053		break;
1054
1055	case ENET_E_FAIL:
1056		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1057		bna_enet_chld_fail(enet);
1058		break;
1059
1060	case ENET_E_PAUSE_CFG:
1061		bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1062		bna_bfi_pause_set(enet);
1063		break;
1064
1065	case ENET_E_MTU_CFG:
1066		bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1067		bna_enet_rx_stop(enet);
1068		break;
1069
1070	default:
1071		bfa_sm_fault(event);
1072	}
1073}
1074
1075static void
1076bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1077{
1078}
1079
1080static void
1081bna_enet_sm_cfg_wait(struct bna_enet *enet,
1082			enum bna_enet_event event)
1083{
1084	switch (event) {
1085	case ENET_E_STOP:
1086		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1087		enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1088		bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1089		break;
1090
1091	case ENET_E_FAIL:
1092		enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1093		enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1094		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1095		bna_enet_chld_fail(enet);
1096		break;
1097
1098	case ENET_E_PAUSE_CFG:
1099		enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1100		break;
1101
1102	case ENET_E_MTU_CFG:
1103		enet->flags |= BNA_ENET_F_MTU_CHANGED;
1104		break;
1105
1106	case ENET_E_CHLD_STOPPED:
1107		bna_enet_rx_start(enet);
1108		/* Fall through */
1109	case ENET_E_FWRESP_PAUSE:
1110		if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1111			enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1112			bna_bfi_pause_set(enet);
1113		} else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1114			enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1115			bna_enet_rx_stop(enet);
1116		} else {
1117			bfa_fsm_set_state(enet, bna_enet_sm_started);
1118		}
1119		break;
1120
1121	default:
1122		bfa_sm_fault(event);
1123	}
1124}
1125
1126static void
1127bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1128{
1129	enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1130	enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1131}
1132
1133static void
1134bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1135				enum bna_enet_event event)
1136{
1137	switch (event) {
1138	case ENET_E_FAIL:
1139		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1140		bna_enet_chld_fail(enet);
1141		break;
1142
1143	case ENET_E_FWRESP_PAUSE:
1144	case ENET_E_CHLD_STOPPED:
1145		bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1146		break;
1147
1148	default:
1149		bfa_sm_fault(event);
1150	}
1151}
1152
1153static void
1154bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1155{
1156	bna_enet_chld_stop(enet);
1157}
1158
1159static void
1160bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1161				enum bna_enet_event event)
1162{
1163	switch (event) {
1164	case ENET_E_FAIL:
1165		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1166		bna_enet_chld_fail(enet);
1167		break;
1168
1169	case ENET_E_CHLD_STOPPED:
1170		bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1171		break;
1172
1173	default:
1174		bfa_sm_fault(event);
1175	}
1176}
1177
1178static void
1179bna_bfi_pause_set(struct bna_enet *enet)
1180{
1181	struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1182
1183	bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1184		BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1185	pause_req->mh.num_entries = htons(
1186	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1187	pause_req->tx_pause = enet->pause_config.tx_pause;
1188	pause_req->rx_pause = enet->pause_config.rx_pause;
1189
1190	bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1191		sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1192	bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1193}
1194
1195static void
1196bna_enet_cb_chld_stopped(void *arg)
1197{
1198	struct bna_enet *enet = (struct bna_enet *)arg;
1199
1200	bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1201}
1202
1203static void
1204bna_enet_init(struct bna_enet *enet, struct bna *bna)
1205{
1206	enet->bna = bna;
1207	enet->flags = 0;
1208	enet->mtu = 0;
1209	enet->type = BNA_ENET_T_REGULAR;
1210
1211	enet->stop_cbfn = NULL;
1212	enet->stop_cbarg = NULL;
1213
1214	enet->pause_cbfn = NULL;
1215
1216	enet->mtu_cbfn = NULL;
1217
1218	bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1219}
1220
1221static void
1222bna_enet_uninit(struct bna_enet *enet)
1223{
1224	enet->flags = 0;
1225
1226	enet->bna = NULL;
1227}
1228
1229static void
1230bna_enet_start(struct bna_enet *enet)
1231{
1232	enet->flags |= BNA_ENET_F_IOCETH_READY;
1233	if (enet->flags & BNA_ENET_F_ENABLED)
1234		bfa_fsm_send_event(enet, ENET_E_START);
1235}
1236
1237static void
1238bna_ioceth_cb_enet_stopped(void *arg)
1239{
1240	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1241
1242	bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1243}
1244
1245static void
1246bna_enet_stop(struct bna_enet *enet)
1247{
1248	enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1249	enet->stop_cbarg = &enet->bna->ioceth;
1250
1251	enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1252	bfa_fsm_send_event(enet, ENET_E_STOP);
1253}
1254
1255static void
1256bna_enet_fail(struct bna_enet *enet)
1257{
1258	enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1259	bfa_fsm_send_event(enet, ENET_E_FAIL);
1260}
1261
1262void
1263bna_enet_cb_tx_stopped(struct bna_enet *enet)
1264{
1265	bfa_wc_down(&enet->chld_stop_wc);
1266}
1267
1268void
1269bna_enet_cb_rx_stopped(struct bna_enet *enet)
1270{
1271	bfa_wc_down(&enet->chld_stop_wc);
1272}
1273
1274int
1275bna_enet_mtu_get(struct bna_enet *enet)
1276{
1277	return enet->mtu;
1278}
1279
1280void
1281bna_enet_enable(struct bna_enet *enet)
1282{
1283	if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1284		return;
1285
1286	enet->flags |= BNA_ENET_F_ENABLED;
1287
1288	if (enet->flags & BNA_ENET_F_IOCETH_READY)
1289		bfa_fsm_send_event(enet, ENET_E_START);
1290}
1291
1292void
1293bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1294		 void (*cbfn)(void *))
1295{
1296	if (type == BNA_SOFT_CLEANUP) {
1297		(*cbfn)(enet->bna->bnad);
1298		return;
1299	}
1300
1301	enet->stop_cbfn = cbfn;
1302	enet->stop_cbarg = enet->bna->bnad;
1303
1304	enet->flags &= ~BNA_ENET_F_ENABLED;
1305
1306	bfa_fsm_send_event(enet, ENET_E_STOP);
1307}
1308
1309void
1310bna_enet_pause_config(struct bna_enet *enet,
1311		      struct bna_pause_config *pause_config,
1312		      void (*cbfn)(struct bnad *))
1313{
1314	enet->pause_config = *pause_config;
1315
1316	enet->pause_cbfn = cbfn;
1317
1318	bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1319}
1320
1321void
1322bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1323		 void (*cbfn)(struct bnad *))
1324{
1325	enet->mtu = mtu;
1326
1327	enet->mtu_cbfn = cbfn;
1328
1329	bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1330}
1331
1332void
1333bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1334{
1335	*mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1336}
1337
1338/* IOCETH */
1339
1340#define enable_mbox_intr(_ioceth)					\
1341do {									\
1342	u32 intr_status;						\
1343	bna_intr_status_get((_ioceth)->bna, intr_status);		\
1344	bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad);			\
1345	bna_mbox_intr_enable((_ioceth)->bna);				\
1346} while (0)
1347
1348#define disable_mbox_intr(_ioceth)					\
1349do {									\
1350	bna_mbox_intr_disable((_ioceth)->bna);				\
1351	bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad);		\
1352} while (0)
1353
1354#define call_ioceth_stop_cbfn(_ioceth)					\
1355do {									\
1356	if ((_ioceth)->stop_cbfn) {					\
1357		void (*cbfn)(struct bnad *);				\
1358		struct bnad *cbarg;					\
1359		cbfn = (_ioceth)->stop_cbfn;				\
1360		cbarg = (_ioceth)->stop_cbarg;				\
1361		(_ioceth)->stop_cbfn = NULL;				\
1362		(_ioceth)->stop_cbarg = NULL;				\
1363		cbfn(cbarg);						\
1364	}								\
1365} while (0)
1366
1367#define bna_stats_mod_uninit(_stats_mod)				\
1368do {									\
1369} while (0)
1370
1371#define bna_stats_mod_start(_stats_mod)					\
1372do {									\
1373	(_stats_mod)->ioc_ready = true;					\
1374} while (0)
1375
1376#define bna_stats_mod_stop(_stats_mod)					\
1377do {									\
1378	(_stats_mod)->ioc_ready = false;				\
1379} while (0)
1380
1381#define bna_stats_mod_fail(_stats_mod)					\
1382do {									\
1383	(_stats_mod)->ioc_ready = false;				\
1384	(_stats_mod)->stats_get_busy = false;				\
1385	(_stats_mod)->stats_clr_busy = false;				\
1386} while (0)
1387
1388static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1389
1390bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1391			enum bna_ioceth_event);
1392bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1393			enum bna_ioceth_event);
1394bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1395			enum bna_ioceth_event);
1396bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1397			enum bna_ioceth_event);
1398bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1399			enum bna_ioceth_event);
1400bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1401			enum bna_ioceth_event);
1402bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1403			enum bna_ioceth_event);
1404bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1405			enum bna_ioceth_event);
1406
1407static void
1408bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1409{
1410	call_ioceth_stop_cbfn(ioceth);
1411}
1412
1413static void
1414bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1415			enum bna_ioceth_event event)
1416{
1417	switch (event) {
1418	case IOCETH_E_ENABLE:
1419		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1420		bfa_nw_ioc_enable(&ioceth->ioc);
1421		break;
1422
1423	case IOCETH_E_DISABLE:
1424		bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1425		break;
1426
1427	case IOCETH_E_IOC_RESET:
1428		enable_mbox_intr(ioceth);
1429		break;
1430
1431	case IOCETH_E_IOC_FAILED:
1432		disable_mbox_intr(ioceth);
1433		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1434		break;
1435
1436	default:
1437		bfa_sm_fault(event);
1438	}
1439}
1440
1441static void
1442bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1443{
1444	/**
1445	 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1446	 * previous state due to failed -> ioc_ready_wait transition.
1447	 */
1448}
1449
1450static void
1451bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1452				enum bna_ioceth_event event)
1453{
1454	switch (event) {
1455	case IOCETH_E_DISABLE:
1456		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1457		bfa_nw_ioc_disable(&ioceth->ioc);
1458		break;
1459
1460	case IOCETH_E_IOC_RESET:
1461		enable_mbox_intr(ioceth);
1462		break;
1463
1464	case IOCETH_E_IOC_FAILED:
1465		disable_mbox_intr(ioceth);
1466		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1467		break;
1468
1469	case IOCETH_E_IOC_READY:
1470		bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1471		break;
1472
1473	default:
1474		bfa_sm_fault(event);
1475	}
1476}
1477
1478static void
1479bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1480{
1481	bna_bfi_attr_get(ioceth);
1482}
1483
1484static void
1485bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1486				enum bna_ioceth_event event)
1487{
1488	switch (event) {
1489	case IOCETH_E_DISABLE:
1490		bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1491		break;
1492
1493	case IOCETH_E_IOC_FAILED:
1494		disable_mbox_intr(ioceth);
1495		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1496		break;
1497
1498	case IOCETH_E_ENET_ATTR_RESP:
1499		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1500		break;
1501
1502	default:
1503		bfa_sm_fault(event);
1504	}
1505}
1506
1507static void
1508bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1509{
1510	bna_enet_start(&ioceth->bna->enet);
1511	bna_stats_mod_start(&ioceth->bna->stats_mod);
1512	bnad_cb_ioceth_ready(ioceth->bna->bnad);
1513}
1514
1515static void
1516bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1517{
1518	switch (event) {
1519	case IOCETH_E_DISABLE:
1520		bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1521		break;
1522
1523	case IOCETH_E_IOC_FAILED:
1524		disable_mbox_intr(ioceth);
1525		bna_enet_fail(&ioceth->bna->enet);
1526		bna_stats_mod_fail(&ioceth->bna->stats_mod);
1527		bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1528		break;
1529
1530	default:
1531		bfa_sm_fault(event);
1532	}
1533}
1534
1535static void
1536bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1537{
1538}
1539
1540static void
1541bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1542				enum bna_ioceth_event event)
1543{
1544	switch (event) {
1545	case IOCETH_E_IOC_FAILED:
1546		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1547		disable_mbox_intr(ioceth);
1548		bfa_nw_ioc_disable(&ioceth->ioc);
1549		break;
1550
1551	case IOCETH_E_ENET_ATTR_RESP:
1552		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1553		bfa_nw_ioc_disable(&ioceth->ioc);
1554		break;
1555
1556	default:
1557		bfa_sm_fault(event);
1558	}
1559}
1560
1561static void
1562bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1563{
1564	bna_stats_mod_stop(&ioceth->bna->stats_mod);
1565	bna_enet_stop(&ioceth->bna->enet);
1566}
1567
1568static void
1569bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1570				enum bna_ioceth_event event)
1571{
1572	switch (event) {
1573	case IOCETH_E_IOC_FAILED:
1574		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1575		disable_mbox_intr(ioceth);
1576		bna_enet_fail(&ioceth->bna->enet);
1577		bna_stats_mod_fail(&ioceth->bna->stats_mod);
1578		bfa_nw_ioc_disable(&ioceth->ioc);
1579		break;
1580
1581	case IOCETH_E_ENET_STOPPED:
1582		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1583		bfa_nw_ioc_disable(&ioceth->ioc);
1584		break;
1585
1586	default:
1587		bfa_sm_fault(event);
1588	}
1589}
1590
1591static void
1592bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1593{
1594}
1595
1596static void
1597bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1598				enum bna_ioceth_event event)
1599{
1600	switch (event) {
1601	case IOCETH_E_IOC_DISABLED:
1602		disable_mbox_intr(ioceth);
1603		bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1604		break;
1605
1606	case IOCETH_E_ENET_STOPPED:
1607		/* This event is received due to enet failing */
1608		/* No-op */
1609		break;
1610
1611	default:
1612		bfa_sm_fault(event);
1613	}
1614}
1615
1616static void
1617bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1618{
1619	bnad_cb_ioceth_failed(ioceth->bna->bnad);
1620}
1621
1622static void
1623bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1624			enum bna_ioceth_event event)
1625{
1626	switch (event) {
1627	case IOCETH_E_DISABLE:
1628		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1629		bfa_nw_ioc_disable(&ioceth->ioc);
1630		break;
1631
1632	case IOCETH_E_IOC_RESET:
1633		enable_mbox_intr(ioceth);
1634		bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1635		break;
1636
1637	case IOCETH_E_IOC_FAILED:
1638		break;
1639
1640	default:
1641		bfa_sm_fault(event);
1642	}
1643}
1644
1645static void
1646bna_bfi_attr_get(struct bna_ioceth *ioceth)
1647{
1648	struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1649
1650	bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1651		BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1652	attr_req->mh.num_entries = htons(
1653	bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1654	bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1655		sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1656	bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1657}
1658
1659/* IOC callback functions */
1660
1661static void
1662bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1663{
1664	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1665
1666	if (error)
1667		bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1668	else
1669		bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1670}
1671
1672static void
1673bna_cb_ioceth_disable(void *arg)
1674{
1675	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1676
1677	bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1678}
1679
1680static void
1681bna_cb_ioceth_hbfail(void *arg)
1682{
1683	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1684
1685	bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1686}
1687
1688static void
1689bna_cb_ioceth_reset(void *arg)
1690{
1691	struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1692
1693	bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1694}
1695
1696static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1697	bna_cb_ioceth_enable,
1698	bna_cb_ioceth_disable,
1699	bna_cb_ioceth_hbfail,
1700	bna_cb_ioceth_reset
1701};
1702
1703static void bna_attr_init(struct bna_ioceth *ioceth)
1704{
1705	ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1706	ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1707	ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1708	ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1709	ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1710	ioceth->attr.fw_query_complete = false;
1711}
1712
1713static void
1714bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1715		struct bna_res_info *res_info)
1716{
1717	u64 dma;
1718	u8 *kva;
1719
1720	ioceth->bna = bna;
1721
1722	/**
1723	 * Attach IOC and claim:
1724	 *	1. DMA memory for IOC attributes
1725	 *	2. Kernel memory for FW trace
1726	 */
1727	bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1728	bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1729
1730	BNA_GET_DMA_ADDR(
1731		&res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1732	kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1733	bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1734
1735	kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1736	bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1737
1738	/**
1739	 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1740	 * DMA memory.
1741	 */
1742	BNA_GET_DMA_ADDR(
1743		&res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1744	kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1745	bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1746	bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1747	kva += bfa_nw_cee_meminfo();
1748	dma += bfa_nw_cee_meminfo();
1749
1750	bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1751	bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1752	kva += bfa_nw_flash_meminfo();
1753	dma += bfa_nw_flash_meminfo();
1754
1755	bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1756	bfa_msgq_memclaim(&bna->msgq, kva, dma);
1757	bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1758	kva += bfa_msgq_meminfo();
1759	dma += bfa_msgq_meminfo();
1760
1761	ioceth->stop_cbfn = NULL;
1762	ioceth->stop_cbarg = NULL;
1763
1764	bna_attr_init(ioceth);
1765
1766	bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1767}
1768
1769static void
1770bna_ioceth_uninit(struct bna_ioceth *ioceth)
1771{
1772	bfa_nw_ioc_detach(&ioceth->ioc);
1773
1774	ioceth->bna = NULL;
1775}
1776
1777void
1778bna_ioceth_enable(struct bna_ioceth *ioceth)
1779{
1780	if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1781		bnad_cb_ioceth_ready(ioceth->bna->bnad);
1782		return;
1783	}
1784
1785	if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1786		bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1787}
1788
1789void
1790bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1791{
1792	if (type == BNA_SOFT_CLEANUP) {
1793		bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1794		return;
1795	}
1796
1797	ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1798	ioceth->stop_cbarg = ioceth->bna->bnad;
1799
1800	bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1801}
1802
1803static void
1804bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1805		  struct bna_res_info *res_info)
1806{
1807	int i;
1808
1809	ucam_mod->ucmac = (struct bna_mac *)
1810	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1811
1812	INIT_LIST_HEAD(&ucam_mod->free_q);
1813	for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1814		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1815		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1816	}
1817
1818	/* A separate queue to allow synchronous setting of a list of MACs */
1819	INIT_LIST_HEAD(&ucam_mod->del_q);
1820	for (i = i; i < (bna->ioceth.attr.num_ucmac * 2); i++) {
1821		bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1822		list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->del_q);
1823	}
1824
1825	ucam_mod->bna = bna;
1826}
1827
1828static void
1829bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1830{
1831	struct list_head *qe;
1832	int i;
1833
1834	i = 0;
1835	list_for_each(qe, &ucam_mod->free_q)
1836		i++;
1837
1838	i = 0;
1839	list_for_each(qe, &ucam_mod->del_q)
1840		i++;
1841
1842	ucam_mod->bna = NULL;
1843}
1844
1845static void
1846bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1847		  struct bna_res_info *res_info)
1848{
1849	int i;
1850
1851	mcam_mod->mcmac = (struct bna_mac *)
1852	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1853
1854	INIT_LIST_HEAD(&mcam_mod->free_q);
1855	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1856		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1857		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1858	}
1859
1860	mcam_mod->mchandle = (struct bna_mcam_handle *)
1861	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1862
1863	INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1864	for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1865		bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1866		list_add_tail(&mcam_mod->mchandle[i].qe,
1867				&mcam_mod->free_handle_q);
1868	}
1869
1870	/* A separate queue to allow synchronous setting of a list of MACs */
1871	INIT_LIST_HEAD(&mcam_mod->del_q);
1872	for (i = i; i < (bna->ioceth.attr.num_mcmac * 2); i++) {
1873		bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1874		list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->del_q);
1875	}
1876
1877	mcam_mod->bna = bna;
1878}
1879
1880static void
1881bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1882{
1883	struct list_head *qe;
1884	int i;
1885
1886	i = 0;
1887	list_for_each(qe, &mcam_mod->free_q) i++;
1888
1889	i = 0;
1890	list_for_each(qe, &mcam_mod->del_q) i++;
1891
1892	i = 0;
1893	list_for_each(qe, &mcam_mod->free_handle_q) i++;
1894
1895	mcam_mod->bna = NULL;
1896}
1897
1898static void
1899bna_bfi_stats_get(struct bna *bna)
1900{
1901	struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1902
1903	bna->stats_mod.stats_get_busy = true;
1904
1905	bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1906		BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1907	stats_req->mh.num_entries = htons(
1908		bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1909	stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1910	stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1911	stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1912	stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1913	stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1914
1915	bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1916		sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1917	bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1918}
1919
1920void
1921bna_res_req(struct bna_res_info *res_info)
1922{
1923	/* DMA memory for COMMON_MODULE */
1924	res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1925	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1926	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1927	res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1928				(bfa_nw_cee_meminfo() +
1929				 bfa_nw_flash_meminfo() +
1930				 bfa_msgq_meminfo()), PAGE_SIZE);
1931
1932	/* DMA memory for retrieving IOC attributes */
1933	res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1934	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1935	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1936	res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1937				ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1938
1939	/* Virtual memory for retreiving fw_trc */
1940	res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1941	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1942	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1943	res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1944
1945	/* DMA memory for retreiving stats */
1946	res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1947	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1948	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1949	res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1950				ALIGN(sizeof(struct bfi_enet_stats),
1951					PAGE_SIZE);
1952}
1953
1954void
1955bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1956{
1957	struct bna_attr *attr = &bna->ioceth.attr;
1958
1959	/* Virtual memory for Tx objects - stored by Tx module */
1960	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1961	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1962		BNA_MEM_T_KVA;
1963	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1964	res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1965		attr->num_txq * sizeof(struct bna_tx);
1966
1967	/* Virtual memory for TxQ - stored by Tx module */
1968	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1969	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1970		BNA_MEM_T_KVA;
1971	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1972	res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1973		attr->num_txq * sizeof(struct bna_txq);
1974
1975	/* Virtual memory for Rx objects - stored by Rx module */
1976	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1977	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1978		BNA_MEM_T_KVA;
1979	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1980	res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1981		attr->num_rxp * sizeof(struct bna_rx);
1982
1983	/* Virtual memory for RxPath - stored by Rx module */
1984	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1985	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1986		BNA_MEM_T_KVA;
1987	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1988	res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1989		attr->num_rxp * sizeof(struct bna_rxp);
1990
1991	/* Virtual memory for RxQ - stored by Rx module */
1992	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1993	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1994		BNA_MEM_T_KVA;
1995	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1996	res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1997		(attr->num_rxp * 2) * sizeof(struct bna_rxq);
1998
1999	/* Virtual memory for Unicast MAC address - stored by ucam module */
2000	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2001	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
2002		BNA_MEM_T_KVA;
2003	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
2004	res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
2005		(attr->num_ucmac * 2) * sizeof(struct bna_mac);
2006
2007	/* Virtual memory for Multicast MAC address - stored by mcam module */
2008	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
2009	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
2010		BNA_MEM_T_KVA;
2011	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
2012	res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
2013		(attr->num_mcmac * 2) * sizeof(struct bna_mac);
2014
2015	/* Virtual memory for Multicast handle - stored by mcam module */
2016	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
2017	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
2018		BNA_MEM_T_KVA;
2019	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
2020	res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
2021		attr->num_mcmac * sizeof(struct bna_mcam_handle);
2022}
2023
2024void
2025bna_init(struct bna *bna, struct bnad *bnad,
2026		struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
2027{
2028	bna->bnad = bnad;
2029	bna->pcidev = *pcidev;
2030
2031	bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2032		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2033	bna->stats.hw_stats_dma.msb =
2034		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2035	bna->stats.hw_stats_dma.lsb =
2036		res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2037
2038	bna_reg_addr_init(bna, &bna->pcidev);
2039
2040	/* Also initializes diag, cee, sfp, phy_port, msgq */
2041	bna_ioceth_init(&bna->ioceth, bna, res_info);
2042
2043	bna_enet_init(&bna->enet, bna);
2044	bna_ethport_init(&bna->ethport, bna);
2045}
2046
2047void
2048bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2049{
2050	bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2051
2052	bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2053
2054	bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2055
2056	bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2057
2058	bna->default_mode_rid = BFI_INVALID_RID;
2059	bna->promisc_rid = BFI_INVALID_RID;
2060
2061	bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2062}
2063
2064void
2065bna_uninit(struct bna *bna)
2066{
2067	if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2068		bna_mcam_mod_uninit(&bna->mcam_mod);
2069		bna_ucam_mod_uninit(&bna->ucam_mod);
2070		bna_rx_mod_uninit(&bna->rx_mod);
2071		bna_tx_mod_uninit(&bna->tx_mod);
2072		bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2073	}
2074
2075	bna_stats_mod_uninit(&bna->stats_mod);
2076	bna_ethport_uninit(&bna->ethport);
2077	bna_enet_uninit(&bna->enet);
2078
2079	bna_ioceth_uninit(&bna->ioceth);
2080
2081	bna->bnad = NULL;
2082}
2083
2084int
2085bna_num_txq_set(struct bna *bna, int num_txq)
2086{
2087	if (bna->ioceth.attr.fw_query_complete &&
2088		(num_txq <= bna->ioceth.attr.num_txq)) {
2089		bna->ioceth.attr.num_txq = num_txq;
2090		return BNA_CB_SUCCESS;
2091	}
2092
2093	return BNA_CB_FAIL;
2094}
2095
2096int
2097bna_num_rxp_set(struct bna *bna, int num_rxp)
2098{
2099	if (bna->ioceth.attr.fw_query_complete &&
2100		(num_rxp <= bna->ioceth.attr.num_rxp)) {
2101		bna->ioceth.attr.num_rxp = num_rxp;
2102		return BNA_CB_SUCCESS;
2103	}
2104
2105	return BNA_CB_FAIL;
2106}
2107
2108struct bna_mac *
2109bna_cam_mod_mac_get(struct list_head *head)
2110{
2111	struct list_head *qe;
2112
2113	if (list_empty(head))
2114		return NULL;
2115
2116	bfa_q_deq(head, &qe);
2117	return (struct bna_mac *)qe;
2118}
2119
2120void
2121bna_cam_mod_mac_put(struct list_head *tail, struct bna_mac *mac)
2122{
2123	list_add_tail(&mac->qe, tail);
2124}
2125
2126struct bna_mcam_handle *
2127bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2128{
2129	struct list_head *qe;
2130
2131	if (list_empty(&mcam_mod->free_handle_q))
2132		return NULL;
2133
2134	bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2135
2136	return (struct bna_mcam_handle *)qe;
2137}
2138
2139void
2140bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2141			struct bna_mcam_handle *handle)
2142{
2143	list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2144}
2145
2146void
2147bna_hw_stats_get(struct bna *bna)
2148{
2149	if (!bna->stats_mod.ioc_ready) {
2150		bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2151		return;
2152	}
2153	if (bna->stats_mod.stats_get_busy) {
2154		bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2155		return;
2156	}
2157
2158	bna_bfi_stats_get(bna);
2159}
2160