1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic_sriov.h"
9#include "qlcnic.h"
10#include "qlcnic_83xx_hw.h"
11#include <linux/types.h>
12
13#define QLC_BC_COMMAND	0
14#define QLC_BC_RESPONSE	1
15
16#define QLC_MBOX_RESP_TIMEOUT		(10 * HZ)
17#define QLC_MBOX_CH_FREE_TIMEOUT	(10 * HZ)
18
19#define QLC_BC_MSG		0
20#define QLC_BC_CFREE		1
21#define QLC_BC_FLR		2
22#define QLC_BC_HDR_SZ		16
23#define QLC_BC_PAYLOAD_SZ	(1024 - QLC_BC_HDR_SZ)
24
25#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF		2048
26#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF	512
27
28#define QLC_83XX_VF_RESET_FAIL_THRESH	8
29#define QLC_BC_CMD_MAX_RETRY_CNT	5
30
31static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
32static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
37				  struct qlcnic_cmd_args *);
38static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
39static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
40static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
41static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
42static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
43					struct qlcnic_cmd_args *);
44
45static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
46	.read_crb			= qlcnic_83xx_read_crb,
47	.write_crb			= qlcnic_83xx_write_crb,
48	.read_reg			= qlcnic_83xx_rd_reg_indirect,
49	.write_reg			= qlcnic_83xx_wrt_reg_indirect,
50	.get_mac_address		= qlcnic_83xx_get_mac_address,
51	.setup_intr			= qlcnic_83xx_setup_intr,
52	.alloc_mbx_args			= qlcnic_83xx_alloc_mbx_args,
53	.mbx_cmd			= qlcnic_sriov_issue_cmd,
54	.get_func_no			= qlcnic_83xx_get_func_no,
55	.api_lock			= qlcnic_83xx_cam_lock,
56	.api_unlock			= qlcnic_83xx_cam_unlock,
57	.process_lb_rcv_ring_diag	= qlcnic_83xx_process_rcv_ring_diag,
58	.create_rx_ctx			= qlcnic_83xx_create_rx_ctx,
59	.create_tx_ctx			= qlcnic_83xx_create_tx_ctx,
60	.del_rx_ctx			= qlcnic_83xx_del_rx_ctx,
61	.del_tx_ctx			= qlcnic_83xx_del_tx_ctx,
62	.setup_link_event		= qlcnic_83xx_setup_link_event,
63	.get_nic_info			= qlcnic_83xx_get_nic_info,
64	.get_pci_info			= qlcnic_83xx_get_pci_info,
65	.set_nic_info			= qlcnic_83xx_set_nic_info,
66	.change_macvlan			= qlcnic_83xx_sre_macaddr_change,
67	.napi_enable			= qlcnic_83xx_napi_enable,
68	.napi_disable			= qlcnic_83xx_napi_disable,
69	.config_intr_coal		= qlcnic_83xx_config_intr_coal,
70	.config_rss			= qlcnic_83xx_config_rss,
71	.config_hw_lro			= qlcnic_83xx_config_hw_lro,
72	.config_promisc_mode		= qlcnic_83xx_nic_set_promisc,
73	.change_l2_filter		= qlcnic_83xx_change_l2_filter,
74	.get_board_info			= qlcnic_83xx_get_port_info,
75	.free_mac_list			= qlcnic_sriov_vf_free_mac_list,
76	.enable_sds_intr		= qlcnic_83xx_enable_sds_intr,
77	.disable_sds_intr		= qlcnic_83xx_disable_sds_intr,
78};
79
80static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
81	.config_bridged_mode	= qlcnic_config_bridged_mode,
82	.config_led		= qlcnic_config_led,
83	.cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
84	.napi_add		= qlcnic_83xx_napi_add,
85	.napi_del		= qlcnic_83xx_napi_del,
86	.shutdown		= qlcnic_sriov_vf_shutdown,
87	.resume			= qlcnic_sriov_vf_resume,
88	.config_ipaddr		= qlcnic_83xx_config_ipaddr,
89	.clear_legacy_intr	= qlcnic_83xx_clear_legacy_intr,
90};
91
92static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
93	{QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
94	{QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
95	{QLCNIC_BC_CMD_GET_ACL, 3, 14},
96	{QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
97};
98
99static inline bool qlcnic_sriov_bc_msg_check(u32 val)
100{
101	return (val & (1 << QLC_BC_MSG)) ? true : false;
102}
103
104static inline bool qlcnic_sriov_channel_free_check(u32 val)
105{
106	return (val & (1 << QLC_BC_CFREE)) ? true : false;
107}
108
109static inline bool qlcnic_sriov_flr_check(u32 val)
110{
111	return (val & (1 << QLC_BC_FLR)) ? true : false;
112}
113
114static inline u8 qlcnic_sriov_target_func_id(u32 val)
115{
116	return (val >> 4) & 0xff;
117}
118
119static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
120{
121	struct pci_dev *dev = adapter->pdev;
122	int pos;
123	u16 stride, offset;
124
125	if (qlcnic_sriov_vf_check(adapter))
126		return 0;
127
128	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
129	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
130	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
131
132	return (dev->devfn + offset + stride * vf_id) & 0xff;
133}
134
135int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
136{
137	struct qlcnic_sriov *sriov;
138	struct qlcnic_back_channel *bc;
139	struct workqueue_struct *wq;
140	struct qlcnic_vport *vp;
141	struct qlcnic_vf_info *vf;
142	int err, i;
143
144	if (!qlcnic_sriov_enable_check(adapter))
145		return -EIO;
146
147	sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
148	if (!sriov)
149		return -ENOMEM;
150
151	adapter->ahw->sriov = sriov;
152	sriov->num_vfs = num_vfs;
153	bc = &sriov->bc;
154	sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
155				 num_vfs, GFP_KERNEL);
156	if (!sriov->vf_info) {
157		err = -ENOMEM;
158		goto qlcnic_free_sriov;
159	}
160
161	wq = create_singlethread_workqueue("bc-trans");
162	if (wq == NULL) {
163		err = -ENOMEM;
164		dev_err(&adapter->pdev->dev,
165			"Cannot create bc-trans workqueue\n");
166		goto qlcnic_free_vf_info;
167	}
168
169	bc->bc_trans_wq = wq;
170
171	wq = create_singlethread_workqueue("async");
172	if (wq == NULL) {
173		err = -ENOMEM;
174		dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
175		goto qlcnic_destroy_trans_wq;
176	}
177
178	bc->bc_async_wq =  wq;
179	INIT_LIST_HEAD(&bc->async_list);
180
181	for (i = 0; i < num_vfs; i++) {
182		vf = &sriov->vf_info[i];
183		vf->adapter = adapter;
184		vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
185		mutex_init(&vf->send_cmd_lock);
186		spin_lock_init(&vf->vlan_list_lock);
187		INIT_LIST_HEAD(&vf->rcv_act.wait_list);
188		INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
189		spin_lock_init(&vf->rcv_act.lock);
190		spin_lock_init(&vf->rcv_pend.lock);
191		init_completion(&vf->ch_free_cmpl);
192
193		INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
194
195		if (qlcnic_sriov_pf_check(adapter)) {
196			vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
197			if (!vp) {
198				err = -ENOMEM;
199				goto qlcnic_destroy_async_wq;
200			}
201			sriov->vf_info[i].vp = vp;
202			vp->vlan_mode = QLC_GUEST_VLAN_MODE;
203			vp->max_tx_bw = MAX_BW;
204			vp->min_tx_bw = MIN_BW;
205			vp->spoofchk = false;
206			random_ether_addr(vp->mac);
207			dev_info(&adapter->pdev->dev,
208				 "MAC Address %pM is configured for VF %d\n",
209				 vp->mac, i);
210		}
211	}
212
213	return 0;
214
215qlcnic_destroy_async_wq:
216	destroy_workqueue(bc->bc_async_wq);
217
218qlcnic_destroy_trans_wq:
219	destroy_workqueue(bc->bc_trans_wq);
220
221qlcnic_free_vf_info:
222	kfree(sriov->vf_info);
223
224qlcnic_free_sriov:
225	kfree(adapter->ahw->sriov);
226	return err;
227}
228
229void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
230{
231	struct qlcnic_bc_trans *trans;
232	struct qlcnic_cmd_args cmd;
233	unsigned long flags;
234
235	spin_lock_irqsave(&t_list->lock, flags);
236
237	while (!list_empty(&t_list->wait_list)) {
238		trans = list_first_entry(&t_list->wait_list,
239					 struct qlcnic_bc_trans, list);
240		list_del(&trans->list);
241		t_list->count--;
242		cmd.req.arg = (u32 *)trans->req_pay;
243		cmd.rsp.arg = (u32 *)trans->rsp_pay;
244		qlcnic_free_mbx_args(&cmd);
245		qlcnic_sriov_cleanup_transaction(trans);
246	}
247
248	spin_unlock_irqrestore(&t_list->lock, flags);
249}
250
251void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
252{
253	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
254	struct qlcnic_back_channel *bc = &sriov->bc;
255	struct qlcnic_vf_info *vf;
256	int i;
257
258	if (!qlcnic_sriov_enable_check(adapter))
259		return;
260
261	qlcnic_sriov_cleanup_async_list(bc);
262	destroy_workqueue(bc->bc_async_wq);
263
264	for (i = 0; i < sriov->num_vfs; i++) {
265		vf = &sriov->vf_info[i];
266		qlcnic_sriov_cleanup_list(&vf->rcv_pend);
267		cancel_work_sync(&vf->trans_work);
268		qlcnic_sriov_cleanup_list(&vf->rcv_act);
269	}
270
271	destroy_workqueue(bc->bc_trans_wq);
272
273	for (i = 0; i < sriov->num_vfs; i++)
274		kfree(sriov->vf_info[i].vp);
275
276	kfree(sriov->vf_info);
277	kfree(adapter->ahw->sriov);
278}
279
280static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
281{
282	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
283	qlcnic_sriov_cfg_bc_intr(adapter, 0);
284	__qlcnic_sriov_cleanup(adapter);
285}
286
287void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
288{
289	if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
290		return;
291
292	qlcnic_sriov_free_vlans(adapter);
293
294	if (qlcnic_sriov_pf_check(adapter))
295		qlcnic_sriov_pf_cleanup(adapter);
296
297	if (qlcnic_sriov_vf_check(adapter))
298		qlcnic_sriov_vf_cleanup(adapter);
299}
300
301static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
302				    u32 *pay, u8 pci_func, u8 size)
303{
304	struct qlcnic_hardware_context *ahw = adapter->ahw;
305	struct qlcnic_mailbox *mbx = ahw->mailbox;
306	struct qlcnic_cmd_args cmd;
307	unsigned long timeout;
308	int err;
309
310	memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
311	cmd.hdr = hdr;
312	cmd.pay = pay;
313	cmd.pay_size = size;
314	cmd.func_num = pci_func;
315	cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
316	cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
317
318	err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
319	if (err) {
320		dev_err(&adapter->pdev->dev,
321			"%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
322			__func__, cmd.cmd_op, cmd.type, ahw->pci_func,
323			ahw->op_mode);
324		return err;
325	}
326
327	if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
328		dev_err(&adapter->pdev->dev,
329			"%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
330			__func__, cmd.cmd_op, cmd.type, ahw->pci_func,
331			ahw->op_mode);
332		flush_workqueue(mbx->work_q);
333	}
334
335	return cmd.rsp_opcode;
336}
337
338static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
339{
340	adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
341	adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
342	adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
343	adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
344	adapter->num_txd = MAX_CMD_DESCRIPTORS;
345	adapter->max_rds_rings = MAX_RDS_RINGS;
346}
347
348int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
349				   struct qlcnic_info *npar_info, u16 vport_id)
350{
351	struct device *dev = &adapter->pdev->dev;
352	struct qlcnic_cmd_args cmd;
353	int err;
354	u32 status;
355
356	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
357	if (err)
358		return err;
359
360	cmd.req.arg[1] = vport_id << 16 | 0x1;
361	err = qlcnic_issue_cmd(adapter, &cmd);
362	if (err) {
363		dev_err(&adapter->pdev->dev,
364			"Failed to get vport info, err=%d\n", err);
365		qlcnic_free_mbx_args(&cmd);
366		return err;
367	}
368
369	status = cmd.rsp.arg[2] & 0xffff;
370	if (status & BIT_0)
371		npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
372	if (status & BIT_1)
373		npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
374	if (status & BIT_2)
375		npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
376	if (status & BIT_3)
377		npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
378	if (status & BIT_4)
379		npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
380	if (status & BIT_5)
381		npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
382	if (status & BIT_6)
383		npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
384	if (status & BIT_7)
385		npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
386	if (status & BIT_8)
387		npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
388	if (status & BIT_9)
389		npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
390
391	npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
392	npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
393	npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
394	npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
395
396	dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
397		 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
398		 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
399		 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
400		 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
401		 "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
402		 npar_info->min_tx_bw, npar_info->max_tx_bw,
403		 npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
404		 npar_info->max_rx_mcast_mac_filters,
405		 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
406		 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
407		 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
408		 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
409		 npar_info->max_remote_ipv6_addrs);
410
411	qlcnic_free_mbx_args(&cmd);
412	return err;
413}
414
415static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
416				      struct qlcnic_cmd_args *cmd)
417{
418	adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
419	adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
420	return 0;
421}
422
423static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
424					    struct qlcnic_cmd_args *cmd)
425{
426	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
427	int i, num_vlans;
428	u16 *vlans;
429
430	if (sriov->allowed_vlans)
431		return 0;
432
433	sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
434	sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
435	dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
436		 sriov->num_allowed_vlans);
437
438	qlcnic_sriov_alloc_vlans(adapter);
439
440	if (!sriov->any_vlan)
441		return 0;
442
443	num_vlans = sriov->num_allowed_vlans;
444	sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
445	if (!sriov->allowed_vlans)
446		return -ENOMEM;
447
448	vlans = (u16 *)&cmd->rsp.arg[3];
449	for (i = 0; i < num_vlans; i++)
450		sriov->allowed_vlans[i] = vlans[i];
451
452	return 0;
453}
454
455static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
456{
457	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
458	struct qlcnic_cmd_args cmd;
459	int ret = 0;
460
461	memset(&cmd, 0, sizeof(cmd));
462	ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
463	if (ret)
464		return ret;
465
466	ret = qlcnic_issue_cmd(adapter, &cmd);
467	if (ret) {
468		dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
469			ret);
470	} else {
471		sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
472		switch (sriov->vlan_mode) {
473		case QLC_GUEST_VLAN_MODE:
474			ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
475			break;
476		case QLC_PVID_MODE:
477			ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
478			break;
479		}
480	}
481
482	qlcnic_free_mbx_args(&cmd);
483	return ret;
484}
485
486static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
487{
488	struct qlcnic_hardware_context *ahw = adapter->ahw;
489	struct qlcnic_info nic_info;
490	int err;
491
492	err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
493	if (err)
494		return err;
495
496	ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
497
498	err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
499	if (err)
500		return -EIO;
501
502	if (qlcnic_83xx_get_port_info(adapter))
503		return -EIO;
504
505	qlcnic_sriov_vf_cfg_buff_desc(adapter);
506	adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
507	dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
508		 adapter->ahw->fw_hal_version);
509
510	ahw->physical_port = (u8) nic_info.phys_port;
511	ahw->switch_mode = nic_info.switch_mode;
512	ahw->max_mtu = nic_info.max_mtu;
513	ahw->op_mode = nic_info.op_mode;
514	ahw->capabilities = nic_info.capabilities;
515	return 0;
516}
517
518static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
519				 int pci_using_dac)
520{
521	int err;
522
523	adapter->flags |= QLCNIC_VLAN_FILTERING;
524	adapter->ahw->total_nic_func = 1;
525	INIT_LIST_HEAD(&adapter->vf_mc_list);
526	if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
527		dev_warn(&adapter->pdev->dev,
528			 "Device does not support MSI interrupts\n");
529
530	/* compute and set default and max tx/sds rings */
531	qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
532	qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
533
534	err = qlcnic_setup_intr(adapter);
535	if (err) {
536		dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
537		goto err_out_disable_msi;
538	}
539
540	err = qlcnic_83xx_setup_mbx_intr(adapter);
541	if (err)
542		goto err_out_disable_msi;
543
544	err = qlcnic_sriov_init(adapter, 1);
545	if (err)
546		goto err_out_disable_mbx_intr;
547
548	err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
549	if (err)
550		goto err_out_cleanup_sriov;
551
552	err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
553	if (err)
554		goto err_out_disable_bc_intr;
555
556	err = qlcnic_sriov_vf_init_driver(adapter);
557	if (err)
558		goto err_out_send_channel_term;
559
560	err = qlcnic_sriov_get_vf_acl(adapter);
561	if (err)
562		goto err_out_send_channel_term;
563
564	err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
565	if (err)
566		goto err_out_send_channel_term;
567
568	pci_set_drvdata(adapter->pdev, adapter);
569	dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
570		 adapter->netdev->name);
571
572	qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
573			     adapter->ahw->idc.delay);
574	return 0;
575
576err_out_send_channel_term:
577	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
578
579err_out_disable_bc_intr:
580	qlcnic_sriov_cfg_bc_intr(adapter, 0);
581
582err_out_cleanup_sriov:
583	__qlcnic_sriov_cleanup(adapter);
584
585err_out_disable_mbx_intr:
586	qlcnic_83xx_free_mbx_intr(adapter);
587
588err_out_disable_msi:
589	qlcnic_teardown_intr(adapter);
590	return err;
591}
592
593static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
594{
595	u32 state;
596
597	do {
598		msleep(20);
599		if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
600			return -EIO;
601		state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
602	} while (state != QLC_83XX_IDC_DEV_READY);
603
604	return 0;
605}
606
607int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
608{
609	struct qlcnic_hardware_context *ahw = adapter->ahw;
610	int err;
611
612	set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
613	ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
614	ahw->reset_context = 0;
615	adapter->fw_fail_cnt = 0;
616	ahw->msix_supported = 1;
617	adapter->need_fw_reset = 0;
618	adapter->flags |= QLCNIC_TX_INTR_SHARED;
619
620	err = qlcnic_sriov_check_dev_ready(adapter);
621	if (err)
622		return err;
623
624	err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
625	if (err)
626		return err;
627
628	if (qlcnic_read_mac_addr(adapter))
629		dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
630
631	INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
632
633	clear_bit(__QLCNIC_RESETTING, &adapter->state);
634	return 0;
635}
636
637void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
638{
639	struct qlcnic_hardware_context *ahw = adapter->ahw;
640
641	ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
642	dev_info(&adapter->pdev->dev,
643		 "HAL Version: %d Non Privileged SRIOV function\n",
644		 ahw->fw_hal_version);
645	adapter->nic_ops = &qlcnic_sriov_vf_ops;
646	set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
647	return;
648}
649
650void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
651{
652	ahw->hw_ops		= &qlcnic_sriov_vf_hw_ops;
653	ahw->reg_tbl		= (u32 *)qlcnic_83xx_reg_tbl;
654	ahw->ext_reg_tbl	= (u32 *)qlcnic_83xx_ext_reg_tbl;
655}
656
657static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
658{
659	u32 pay_size;
660
661	pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
662
663	if (pay_size)
664		pay_size = QLC_BC_PAYLOAD_SZ;
665	else
666		pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
667
668	return pay_size;
669}
670
671int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
672{
673	struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
674	u8 i;
675
676	if (qlcnic_sriov_vf_check(adapter))
677		return 0;
678
679	for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
680		if (vf_info[i].pci_func == pci_func)
681			return i;
682	}
683
684	return -EINVAL;
685}
686
687static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
688{
689	*trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
690	if (!*trans)
691		return -ENOMEM;
692
693	init_completion(&(*trans)->resp_cmpl);
694	return 0;
695}
696
697static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
698					    u32 size)
699{
700	*hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
701	if (!*hdr)
702		return -ENOMEM;
703
704	return 0;
705}
706
707static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
708{
709	const struct qlcnic_mailbox_metadata *mbx_tbl;
710	int i, size;
711
712	mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
713	size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
714
715	for (i = 0; i < size; i++) {
716		if (type == mbx_tbl[i].cmd) {
717			mbx->op_type = QLC_BC_CMD;
718			mbx->req.num = mbx_tbl[i].in_args;
719			mbx->rsp.num = mbx_tbl[i].out_args;
720			mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
721					       GFP_ATOMIC);
722			if (!mbx->req.arg)
723				return -ENOMEM;
724			mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
725					       GFP_ATOMIC);
726			if (!mbx->rsp.arg) {
727				kfree(mbx->req.arg);
728				mbx->req.arg = NULL;
729				return -ENOMEM;
730			}
731			memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
732			memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
733			mbx->req.arg[0] = (type | (mbx->req.num << 16) |
734					   (3 << 29));
735			mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
736			return 0;
737		}
738	}
739	return -EINVAL;
740}
741
742static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
743				       struct qlcnic_cmd_args *cmd,
744				       u16 seq, u8 msg_type)
745{
746	struct qlcnic_bc_hdr *hdr;
747	int i;
748	u32 num_regs, bc_pay_sz;
749	u16 remainder;
750	u8 cmd_op, num_frags, t_num_frags;
751
752	bc_pay_sz = QLC_BC_PAYLOAD_SZ;
753	if (msg_type == QLC_BC_COMMAND) {
754		trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
755		trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
756		num_regs = cmd->req.num;
757		trans->req_pay_size = (num_regs * 4);
758		num_regs = cmd->rsp.num;
759		trans->rsp_pay_size = (num_regs * 4);
760		cmd_op = cmd->req.arg[0] & 0xff;
761		remainder = (trans->req_pay_size) % (bc_pay_sz);
762		num_frags = (trans->req_pay_size) / (bc_pay_sz);
763		if (remainder)
764			num_frags++;
765		t_num_frags = num_frags;
766		if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
767			return -ENOMEM;
768		remainder = (trans->rsp_pay_size) % (bc_pay_sz);
769		num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
770		if (remainder)
771			num_frags++;
772		if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
773			return -ENOMEM;
774		num_frags  = t_num_frags;
775		hdr = trans->req_hdr;
776	}  else {
777		cmd->req.arg = (u32 *)trans->req_pay;
778		cmd->rsp.arg = (u32 *)trans->rsp_pay;
779		cmd_op = cmd->req.arg[0] & 0xff;
780		cmd->cmd_op = cmd_op;
781		remainder = (trans->rsp_pay_size) % (bc_pay_sz);
782		num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
783		if (remainder)
784			num_frags++;
785		cmd->req.num = trans->req_pay_size / 4;
786		cmd->rsp.num = trans->rsp_pay_size / 4;
787		hdr = trans->rsp_hdr;
788		cmd->op_type = trans->req_hdr->op_type;
789	}
790
791	trans->trans_id = seq;
792	trans->cmd_id = cmd_op;
793	for (i = 0; i < num_frags; i++) {
794		hdr[i].version = 2;
795		hdr[i].msg_type = msg_type;
796		hdr[i].op_type = cmd->op_type;
797		hdr[i].num_cmds = 1;
798		hdr[i].num_frags = num_frags;
799		hdr[i].frag_num = i + 1;
800		hdr[i].cmd_op = cmd_op;
801		hdr[i].seq_id = seq;
802	}
803	return 0;
804}
805
806static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
807{
808	if (!trans)
809		return;
810	kfree(trans->req_hdr);
811	kfree(trans->rsp_hdr);
812	kfree(trans);
813}
814
815static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
816				    struct qlcnic_bc_trans *trans, u8 type)
817{
818	struct qlcnic_trans_list *t_list;
819	unsigned long flags;
820	int ret = 0;
821
822	if (type == QLC_BC_RESPONSE) {
823		t_list = &vf->rcv_act;
824		spin_lock_irqsave(&t_list->lock, flags);
825		t_list->count--;
826		list_del(&trans->list);
827		if (t_list->count > 0)
828			ret = 1;
829		spin_unlock_irqrestore(&t_list->lock, flags);
830	}
831	if (type == QLC_BC_COMMAND) {
832		while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
833			msleep(100);
834		vf->send_cmd = NULL;
835		clear_bit(QLC_BC_VF_SEND, &vf->state);
836	}
837	return ret;
838}
839
840static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
841					 struct qlcnic_vf_info *vf,
842					 work_func_t func)
843{
844	if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
845	    vf->adapter->need_fw_reset)
846		return;
847
848	queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
849}
850
851static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
852{
853	struct completion *cmpl = &trans->resp_cmpl;
854
855	if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
856		trans->trans_state = QLC_END;
857	else
858		trans->trans_state = QLC_ABORT;
859
860	return;
861}
862
863static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
864					    u8 type)
865{
866	if (type == QLC_BC_RESPONSE) {
867		trans->curr_rsp_frag++;
868		if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
869			trans->trans_state = QLC_INIT;
870		else
871			trans->trans_state = QLC_END;
872	} else {
873		trans->curr_req_frag++;
874		if (trans->curr_req_frag < trans->req_hdr->num_frags)
875			trans->trans_state = QLC_INIT;
876		else
877			trans->trans_state = QLC_WAIT_FOR_RESP;
878	}
879}
880
881static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
882					       u8 type)
883{
884	struct qlcnic_vf_info *vf = trans->vf;
885	struct completion *cmpl = &vf->ch_free_cmpl;
886
887	if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
888		trans->trans_state = QLC_ABORT;
889		return;
890	}
891
892	clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
893	qlcnic_sriov_handle_multi_frags(trans, type);
894}
895
896static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
897				     u32 *hdr, u32 *pay, u32 size)
898{
899	struct qlcnic_hardware_context *ahw = adapter->ahw;
900	u32 fw_mbx;
901	u8 i, max = 2, hdr_size, j;
902
903	hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
904	max = (size / sizeof(u32)) + hdr_size;
905
906	fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
907	for (i = 2, j = 0; j < hdr_size; i++, j++)
908		*(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
909	for (; j < max; i++, j++)
910		*(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
911}
912
913static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
914{
915	int ret = -EBUSY;
916	u32 timeout = 10000;
917
918	do {
919		if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
920			ret = 0;
921			break;
922		}
923		mdelay(1);
924	} while (--timeout);
925
926	return ret;
927}
928
929static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
930{
931	struct qlcnic_vf_info *vf = trans->vf;
932	u32 pay_size, hdr_size;
933	u32 *hdr, *pay;
934	int ret;
935	u8 pci_func = trans->func_id;
936
937	if (__qlcnic_sriov_issue_bc_post(vf))
938		return -EBUSY;
939
940	if (type == QLC_BC_COMMAND) {
941		hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
942		pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
943		hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
944		pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
945						       trans->curr_req_frag);
946		pay_size = (pay_size / sizeof(u32));
947	} else {
948		hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
949		pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
950		hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
951		pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
952						       trans->curr_rsp_frag);
953		pay_size = (pay_size / sizeof(u32));
954	}
955
956	ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
957				       pci_func, pay_size);
958	return ret;
959}
960
961static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
962				      struct qlcnic_vf_info *vf, u8 type)
963{
964	bool flag = true;
965	int err = -EIO;
966
967	while (flag) {
968		if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
969		    vf->adapter->need_fw_reset)
970			trans->trans_state = QLC_ABORT;
971
972		switch (trans->trans_state) {
973		case QLC_INIT:
974			trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
975			if (qlcnic_sriov_issue_bc_post(trans, type))
976				trans->trans_state = QLC_ABORT;
977			break;
978		case QLC_WAIT_FOR_CHANNEL_FREE:
979			qlcnic_sriov_wait_for_channel_free(trans, type);
980			break;
981		case QLC_WAIT_FOR_RESP:
982			qlcnic_sriov_wait_for_resp(trans);
983			break;
984		case QLC_END:
985			err = 0;
986			flag = false;
987			break;
988		case QLC_ABORT:
989			err = -EIO;
990			flag = false;
991			clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
992			break;
993		default:
994			err = -EIO;
995			flag = false;
996		}
997	}
998	return err;
999}
1000
1001static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
1002				    struct qlcnic_bc_trans *trans, int pci_func)
1003{
1004	struct qlcnic_vf_info *vf;
1005	int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
1006
1007	if (index < 0)
1008		return -EIO;
1009
1010	vf = &adapter->ahw->sriov->vf_info[index];
1011	trans->vf = vf;
1012	trans->func_id = pci_func;
1013
1014	if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1015		if (qlcnic_sriov_pf_check(adapter))
1016			return -EIO;
1017		if (qlcnic_sriov_vf_check(adapter) &&
1018		    trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1019			return -EIO;
1020	}
1021
1022	mutex_lock(&vf->send_cmd_lock);
1023	vf->send_cmd = trans;
1024	err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1025	qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1026	mutex_unlock(&vf->send_cmd_lock);
1027	return err;
1028}
1029
1030static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1031					  struct qlcnic_bc_trans *trans,
1032					  struct qlcnic_cmd_args *cmd)
1033{
1034#ifdef CONFIG_QLCNIC_SRIOV
1035	if (qlcnic_sriov_pf_check(adapter)) {
1036		qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1037		return;
1038	}
1039#endif
1040	cmd->rsp.arg[0] |= (0x9 << 25);
1041	return;
1042}
1043
1044static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1045{
1046	struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1047						 trans_work);
1048	struct qlcnic_bc_trans *trans = NULL;
1049	struct qlcnic_adapter *adapter  = vf->adapter;
1050	struct qlcnic_cmd_args cmd;
1051	u8 req;
1052
1053	if (adapter->need_fw_reset)
1054		return;
1055
1056	if (test_bit(QLC_BC_VF_FLR, &vf->state))
1057		return;
1058
1059	memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1060	trans = list_first_entry(&vf->rcv_act.wait_list,
1061				 struct qlcnic_bc_trans, list);
1062	adapter = vf->adapter;
1063
1064	if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1065					QLC_BC_RESPONSE))
1066		goto cleanup_trans;
1067
1068	__qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1069	trans->trans_state = QLC_INIT;
1070	__qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1071
1072cleanup_trans:
1073	qlcnic_free_mbx_args(&cmd);
1074	req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1075	qlcnic_sriov_cleanup_transaction(trans);
1076	if (req)
1077		qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1078					     qlcnic_sriov_process_bc_cmd);
1079}
1080
1081static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1082					struct qlcnic_vf_info *vf)
1083{
1084	struct qlcnic_bc_trans *trans;
1085	u32 pay_size;
1086
1087	if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1088		return;
1089
1090	trans = vf->send_cmd;
1091
1092	if (trans == NULL)
1093		goto clear_send;
1094
1095	if (trans->trans_id != hdr->seq_id)
1096		goto clear_send;
1097
1098	pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1099					       trans->curr_rsp_frag);
1100	qlcnic_sriov_pull_bc_msg(vf->adapter,
1101				 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1102				 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1103				 pay_size);
1104	if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1105		goto clear_send;
1106
1107	complete(&trans->resp_cmpl);
1108
1109clear_send:
1110	clear_bit(QLC_BC_VF_SEND, &vf->state);
1111}
1112
1113int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1114				struct qlcnic_vf_info *vf,
1115				struct qlcnic_bc_trans *trans)
1116{
1117	struct qlcnic_trans_list *t_list = &vf->rcv_act;
1118
1119	t_list->count++;
1120	list_add_tail(&trans->list, &t_list->wait_list);
1121	if (t_list->count == 1)
1122		qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1123					     qlcnic_sriov_process_bc_cmd);
1124	return 0;
1125}
1126
1127static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1128				     struct qlcnic_vf_info *vf,
1129				     struct qlcnic_bc_trans *trans)
1130{
1131	struct qlcnic_trans_list *t_list = &vf->rcv_act;
1132
1133	spin_lock(&t_list->lock);
1134
1135	__qlcnic_sriov_add_act_list(sriov, vf, trans);
1136
1137	spin_unlock(&t_list->lock);
1138	return 0;
1139}
1140
1141static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1142					      struct qlcnic_vf_info *vf,
1143					      struct qlcnic_bc_hdr *hdr)
1144{
1145	struct qlcnic_bc_trans *trans = NULL;
1146	struct list_head *node;
1147	u32 pay_size, curr_frag;
1148	u8 found = 0, active = 0;
1149
1150	spin_lock(&vf->rcv_pend.lock);
1151	if (vf->rcv_pend.count > 0) {
1152		list_for_each(node, &vf->rcv_pend.wait_list) {
1153			trans = list_entry(node, struct qlcnic_bc_trans, list);
1154			if (trans->trans_id == hdr->seq_id) {
1155				found = 1;
1156				break;
1157			}
1158		}
1159	}
1160
1161	if (found) {
1162		curr_frag = trans->curr_req_frag;
1163		pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1164						       curr_frag);
1165		qlcnic_sriov_pull_bc_msg(vf->adapter,
1166					 (u32 *)(trans->req_hdr + curr_frag),
1167					 (u32 *)(trans->req_pay + curr_frag),
1168					 pay_size);
1169		trans->curr_req_frag++;
1170		if (trans->curr_req_frag >= hdr->num_frags) {
1171			vf->rcv_pend.count--;
1172			list_del(&trans->list);
1173			active = 1;
1174		}
1175	}
1176	spin_unlock(&vf->rcv_pend.lock);
1177
1178	if (active)
1179		if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1180			qlcnic_sriov_cleanup_transaction(trans);
1181
1182	return;
1183}
1184
1185static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1186				       struct qlcnic_bc_hdr *hdr,
1187				       struct qlcnic_vf_info *vf)
1188{
1189	struct qlcnic_bc_trans *trans;
1190	struct qlcnic_adapter *adapter = vf->adapter;
1191	struct qlcnic_cmd_args cmd;
1192	u32 pay_size;
1193	int err;
1194	u8 cmd_op;
1195
1196	if (adapter->need_fw_reset)
1197		return;
1198
1199	if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1200	    hdr->op_type != QLC_BC_CMD &&
1201	    hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1202		return;
1203
1204	if (hdr->frag_num > 1) {
1205		qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1206		return;
1207	}
1208
1209	memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1210	cmd_op = hdr->cmd_op;
1211	if (qlcnic_sriov_alloc_bc_trans(&trans))
1212		return;
1213
1214	if (hdr->op_type == QLC_BC_CMD)
1215		err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1216	else
1217		err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1218
1219	if (err) {
1220		qlcnic_sriov_cleanup_transaction(trans);
1221		return;
1222	}
1223
1224	cmd.op_type = hdr->op_type;
1225	if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1226					QLC_BC_COMMAND)) {
1227		qlcnic_free_mbx_args(&cmd);
1228		qlcnic_sriov_cleanup_transaction(trans);
1229		return;
1230	}
1231
1232	pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1233					 trans->curr_req_frag);
1234	qlcnic_sriov_pull_bc_msg(vf->adapter,
1235				 (u32 *)(trans->req_hdr + trans->curr_req_frag),
1236				 (u32 *)(trans->req_pay + trans->curr_req_frag),
1237				 pay_size);
1238	trans->func_id = vf->pci_func;
1239	trans->vf = vf;
1240	trans->trans_id = hdr->seq_id;
1241	trans->curr_req_frag++;
1242
1243	if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1244		return;
1245
1246	if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1247		if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1248			qlcnic_free_mbx_args(&cmd);
1249			qlcnic_sriov_cleanup_transaction(trans);
1250		}
1251	} else {
1252		spin_lock(&vf->rcv_pend.lock);
1253		list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1254		vf->rcv_pend.count++;
1255		spin_unlock(&vf->rcv_pend.lock);
1256	}
1257}
1258
1259static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1260					  struct qlcnic_vf_info *vf)
1261{
1262	struct qlcnic_bc_hdr hdr;
1263	u32 *ptr = (u32 *)&hdr;
1264	u8 msg_type, i;
1265
1266	for (i = 2; i < 6; i++)
1267		ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1268	msg_type = hdr.msg_type;
1269
1270	switch (msg_type) {
1271	case QLC_BC_COMMAND:
1272		qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1273		break;
1274	case QLC_BC_RESPONSE:
1275		qlcnic_sriov_handle_bc_resp(&hdr, vf);
1276		break;
1277	}
1278}
1279
1280static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1281					  struct qlcnic_vf_info *vf)
1282{
1283	struct qlcnic_adapter *adapter = vf->adapter;
1284
1285	if (qlcnic_sriov_pf_check(adapter))
1286		qlcnic_sriov_pf_handle_flr(sriov, vf);
1287	else
1288		dev_err(&adapter->pdev->dev,
1289			"Invalid event to VF. VF should not get FLR event\n");
1290}
1291
1292void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1293{
1294	struct qlcnic_vf_info *vf;
1295	struct qlcnic_sriov *sriov;
1296	int index;
1297	u8 pci_func;
1298
1299	sriov = adapter->ahw->sriov;
1300	pci_func = qlcnic_sriov_target_func_id(event);
1301	index = qlcnic_sriov_func_to_index(adapter, pci_func);
1302
1303	if (index < 0)
1304		return;
1305
1306	vf = &sriov->vf_info[index];
1307	vf->pci_func = pci_func;
1308
1309	if (qlcnic_sriov_channel_free_check(event))
1310		complete(&vf->ch_free_cmpl);
1311
1312	if (qlcnic_sriov_flr_check(event)) {
1313		qlcnic_sriov_handle_flr_event(sriov, vf);
1314		return;
1315	}
1316
1317	if (qlcnic_sriov_bc_msg_check(event))
1318		qlcnic_sriov_handle_msg_event(sriov, vf);
1319}
1320
1321int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1322{
1323	struct qlcnic_cmd_args cmd;
1324	int err;
1325
1326	if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1327		return 0;
1328
1329	if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1330		return -ENOMEM;
1331
1332	if (enable)
1333		cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1334
1335	err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1336
1337	if (err != QLCNIC_RCODE_SUCCESS) {
1338		dev_err(&adapter->pdev->dev,
1339			"Failed to %s bc events, err=%d\n",
1340			(enable ? "enable" : "disable"), err);
1341	}
1342
1343	qlcnic_free_mbx_args(&cmd);
1344	return err;
1345}
1346
1347static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1348				     struct qlcnic_bc_trans *trans)
1349{
1350	u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1351	u32 state;
1352
1353	state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1354	if (state == QLC_83XX_IDC_DEV_READY) {
1355		msleep(20);
1356		clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1357		trans->trans_state = QLC_INIT;
1358		if (++adapter->fw_fail_cnt > max)
1359			return -EIO;
1360		else
1361			return 0;
1362	}
1363
1364	return -EIO;
1365}
1366
1367static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1368				  struct qlcnic_cmd_args *cmd)
1369{
1370	struct qlcnic_hardware_context *ahw = adapter->ahw;
1371	struct qlcnic_mailbox *mbx = ahw->mailbox;
1372	struct device *dev = &adapter->pdev->dev;
1373	struct qlcnic_bc_trans *trans;
1374	int err;
1375	u32 rsp_data, opcode, mbx_err_code, rsp;
1376	u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1377	u8 func = ahw->pci_func;
1378
1379	rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1380	if (rsp)
1381		goto free_cmd;
1382
1383	rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1384	if (rsp)
1385		goto cleanup_transaction;
1386
1387retry:
1388	if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1389		rsp = -EIO;
1390		QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1391		      QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1392		goto err_out;
1393	}
1394
1395	err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1396	if (err) {
1397		dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1398			(cmd->req.arg[0] & 0xffff), func);
1399		rsp = QLCNIC_RCODE_TIMEOUT;
1400
1401		/* After adapter reset PF driver may take some time to
1402		 * respond to VF's request. Retry request till maximum retries.
1403		 */
1404		if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1405		    !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1406			goto retry;
1407
1408		goto err_out;
1409	}
1410
1411	rsp_data = cmd->rsp.arg[0];
1412	mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1413	opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1414
1415	if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1416	    (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1417		rsp = QLCNIC_RCODE_SUCCESS;
1418	} else {
1419		if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1420			rsp = QLCNIC_RCODE_SUCCESS;
1421		} else {
1422			rsp = mbx_err_code;
1423			if (!rsp)
1424				rsp = 1;
1425
1426			dev_err(dev,
1427				"MBX command 0x%x failed with err:0x%x for VF %d\n",
1428				opcode, mbx_err_code, func);
1429		}
1430	}
1431
1432err_out:
1433	if (rsp == QLCNIC_RCODE_TIMEOUT) {
1434		ahw->reset_context = 1;
1435		adapter->need_fw_reset = 1;
1436		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1437	}
1438
1439cleanup_transaction:
1440	qlcnic_sriov_cleanup_transaction(trans);
1441
1442free_cmd:
1443	if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
1444		qlcnic_free_mbx_args(cmd);
1445		kfree(cmd);
1446	}
1447
1448	return rsp;
1449}
1450
1451
1452static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1453				  struct qlcnic_cmd_args *cmd)
1454{
1455	if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
1456		return qlcnic_sriov_async_issue_cmd(adapter, cmd);
1457	else
1458		return __qlcnic_sriov_issue_cmd(adapter, cmd);
1459}
1460
1461static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1462{
1463	struct qlcnic_cmd_args cmd;
1464	struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1465	int ret;
1466
1467	memset(&cmd, 0, sizeof(cmd));
1468	if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1469		return -ENOMEM;
1470
1471	ret = qlcnic_issue_cmd(adapter, &cmd);
1472	if (ret) {
1473		dev_err(&adapter->pdev->dev,
1474			"Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1475			ret);
1476		goto out;
1477	}
1478
1479	cmd_op = (cmd.rsp.arg[0] & 0xff);
1480	if (cmd.rsp.arg[0] >> 25 == 2)
1481		return 2;
1482	if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1483		set_bit(QLC_BC_VF_STATE, &vf->state);
1484	else
1485		clear_bit(QLC_BC_VF_STATE, &vf->state);
1486
1487out:
1488	qlcnic_free_mbx_args(&cmd);
1489	return ret;
1490}
1491
1492static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
1493				  enum qlcnic_mac_type mac_type)
1494{
1495	struct qlcnic_adapter *adapter = netdev_priv(netdev);
1496	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1497	struct qlcnic_vf_info *vf;
1498	u16 vlan_id;
1499	int i;
1500
1501	vf = &adapter->ahw->sriov->vf_info[0];
1502
1503	if (!qlcnic_sriov_check_any_vlan(vf)) {
1504		qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1505	} else {
1506		spin_lock(&vf->vlan_list_lock);
1507		for (i = 0; i < sriov->num_allowed_vlans; i++) {
1508			vlan_id = vf->sriov_vlans[i];
1509			if (vlan_id)
1510				qlcnic_nic_add_mac(adapter, mac, vlan_id,
1511						   mac_type);
1512		}
1513		spin_unlock(&vf->vlan_list_lock);
1514		if (qlcnic_84xx_check(adapter))
1515			qlcnic_nic_add_mac(adapter, mac, 0, mac_type);
1516	}
1517}
1518
1519void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1520{
1521	struct list_head *head = &bc->async_list;
1522	struct qlcnic_async_work_list *entry;
1523
1524	flush_workqueue(bc->bc_async_wq);
1525	while (!list_empty(head)) {
1526		entry = list_entry(head->next, struct qlcnic_async_work_list,
1527				   list);
1528		cancel_work_sync(&entry->work);
1529		list_del(&entry->list);
1530		kfree(entry);
1531	}
1532}
1533
1534void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1535{
1536	struct qlcnic_adapter *adapter = netdev_priv(netdev);
1537	struct qlcnic_hardware_context *ahw = adapter->ahw;
1538	static const u8 bcast_addr[ETH_ALEN] = {
1539		0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1540	};
1541	struct netdev_hw_addr *ha;
1542	u32 mode = VPORT_MISS_MODE_DROP;
1543
1544	if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1545		return;
1546
1547	if (netdev->flags & IFF_PROMISC) {
1548		if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1549			mode = VPORT_MISS_MODE_ACCEPT_ALL;
1550	} else if ((netdev->flags & IFF_ALLMULTI) ||
1551		   (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1552		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1553	} else {
1554		qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC);
1555		if (!netdev_mc_empty(netdev)) {
1556			qlcnic_flush_mcast_mac(adapter);
1557			netdev_for_each_mc_addr(ha, netdev)
1558				qlcnic_vf_add_mc_list(netdev, ha->addr,
1559						      QLCNIC_MULTICAST_MAC);
1560		}
1561	}
1562
1563	/* configure unicast MAC address, if there is not sufficient space
1564	 * to store all the unicast addresses then enable promiscuous mode
1565	 */
1566	if (netdev_uc_count(netdev) > ahw->max_uc_count) {
1567		mode = VPORT_MISS_MODE_ACCEPT_ALL;
1568	} else if (!netdev_uc_empty(netdev)) {
1569		netdev_for_each_uc_addr(ha, netdev)
1570			qlcnic_vf_add_mc_list(netdev, ha->addr,
1571					      QLCNIC_UNICAST_MAC);
1572	}
1573
1574	if (adapter->pdev->is_virtfn) {
1575		if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
1576		    !adapter->fdb_mac_learn) {
1577			qlcnic_alloc_lb_filters_mem(adapter);
1578			adapter->drv_mac_learn = 1;
1579			adapter->rx_mac_learn = true;
1580		} else {
1581			adapter->drv_mac_learn = 0;
1582			adapter->rx_mac_learn = false;
1583		}
1584	}
1585
1586	qlcnic_nic_set_promisc(adapter, mode);
1587}
1588
1589static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
1590{
1591	struct qlcnic_async_work_list *entry;
1592	struct qlcnic_adapter *adapter;
1593	struct qlcnic_cmd_args *cmd;
1594
1595	entry = container_of(work, struct qlcnic_async_work_list, work);
1596	adapter = entry->ptr;
1597	cmd = entry->cmd;
1598	__qlcnic_sriov_issue_cmd(adapter, cmd);
1599	return;
1600}
1601
1602static struct qlcnic_async_work_list *
1603qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1604{
1605	struct list_head *node;
1606	struct qlcnic_async_work_list *entry = NULL;
1607	u8 empty = 0;
1608
1609	list_for_each(node, &bc->async_list) {
1610		entry = list_entry(node, struct qlcnic_async_work_list, list);
1611		if (!work_pending(&entry->work)) {
1612			empty = 1;
1613			break;
1614		}
1615	}
1616
1617	if (!empty) {
1618		entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1619				GFP_ATOMIC);
1620		if (entry == NULL)
1621			return NULL;
1622		list_add_tail(&entry->list, &bc->async_list);
1623	}
1624
1625	return entry;
1626}
1627
1628static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
1629					    work_func_t func, void *data,
1630					    struct qlcnic_cmd_args *cmd)
1631{
1632	struct qlcnic_async_work_list *entry = NULL;
1633
1634	entry = qlcnic_sriov_get_free_node_async_work(bc);
1635	if (!entry)
1636		return;
1637
1638	entry->ptr = data;
1639	entry->cmd = cmd;
1640	INIT_WORK(&entry->work, func);
1641	queue_work(bc->bc_async_wq, &entry->work);
1642}
1643
1644static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
1645					struct qlcnic_cmd_args *cmd)
1646{
1647
1648	struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1649
1650	if (adapter->need_fw_reset)
1651		return -EIO;
1652
1653	qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
1654					adapter, cmd);
1655	return 0;
1656}
1657
1658static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1659{
1660	int err;
1661
1662	adapter->need_fw_reset = 0;
1663	qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1664	qlcnic_83xx_enable_mbx_interrupt(adapter);
1665
1666	err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1667	if (err)
1668		return err;
1669
1670	err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1671	if (err)
1672		goto err_out_cleanup_bc_intr;
1673
1674	err = qlcnic_sriov_vf_init_driver(adapter);
1675	if (err)
1676		goto err_out_term_channel;
1677
1678	return 0;
1679
1680err_out_term_channel:
1681	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1682
1683err_out_cleanup_bc_intr:
1684	qlcnic_sriov_cfg_bc_intr(adapter, 0);
1685	return err;
1686}
1687
1688static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1689{
1690	struct net_device *netdev = adapter->netdev;
1691
1692	if (netif_running(netdev)) {
1693		if (!qlcnic_up(adapter, netdev))
1694			qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1695	}
1696
1697	netif_device_attach(netdev);
1698}
1699
1700static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1701{
1702	struct qlcnic_hardware_context *ahw = adapter->ahw;
1703	struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1704	struct net_device *netdev = adapter->netdev;
1705	u8 i, max_ints = ahw->num_msix - 1;
1706
1707	netif_device_detach(netdev);
1708	qlcnic_83xx_detach_mailbox_work(adapter);
1709	qlcnic_83xx_disable_mbx_intr(adapter);
1710
1711	if (netif_running(netdev))
1712		qlcnic_down(adapter, netdev);
1713
1714	for (i = 0; i < max_ints; i++) {
1715		intr_tbl[i].id = i;
1716		intr_tbl[i].enabled = 0;
1717		intr_tbl[i].src = 0;
1718	}
1719	ahw->reset_context = 0;
1720}
1721
1722static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1723{
1724	struct qlcnic_hardware_context *ahw = adapter->ahw;
1725	struct device *dev = &adapter->pdev->dev;
1726	struct qlc_83xx_idc *idc = &ahw->idc;
1727	u8 func = ahw->pci_func;
1728	u32 state;
1729
1730	if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1731	    (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1732		if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1733			qlcnic_sriov_vf_attach(adapter);
1734			adapter->fw_fail_cnt = 0;
1735			dev_info(dev,
1736				 "%s: Reinitialization of VF 0x%x done after FW reset\n",
1737				 __func__, func);
1738		} else {
1739			dev_err(dev,
1740				"%s: Reinitialization of VF 0x%x failed after FW reset\n",
1741				__func__, func);
1742			state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1743			dev_info(dev, "Current state 0x%x after FW reset\n",
1744				 state);
1745		}
1746	}
1747
1748	return 0;
1749}
1750
1751static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1752{
1753	struct qlcnic_hardware_context *ahw = adapter->ahw;
1754	struct qlcnic_mailbox *mbx = ahw->mailbox;
1755	struct device *dev = &adapter->pdev->dev;
1756	struct qlc_83xx_idc *idc = &ahw->idc;
1757	u8 func = ahw->pci_func;
1758	u32 state;
1759
1760	adapter->reset_ctx_cnt++;
1761
1762	/* Skip the context reset and check if FW is hung */
1763	if (adapter->reset_ctx_cnt < 3) {
1764		adapter->need_fw_reset = 1;
1765		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1766		dev_info(dev,
1767			 "Resetting context, wait here to check if FW is in failed state\n");
1768		return 0;
1769	}
1770
1771	/* Check if number of resets exceed the threshold.
1772	 * If it exceeds the threshold just fail the VF.
1773	 */
1774	if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1775		clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1776		adapter->tx_timeo_cnt = 0;
1777		adapter->fw_fail_cnt = 0;
1778		adapter->reset_ctx_cnt = 0;
1779		qlcnic_sriov_vf_detach(adapter);
1780		dev_err(dev,
1781			"Device context resets have exceeded the threshold, device interface will be shutdown\n");
1782		return -EIO;
1783	}
1784
1785	dev_info(dev, "Resetting context of VF 0x%x\n", func);
1786	dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1787		 __func__, adapter->reset_ctx_cnt, func);
1788	set_bit(__QLCNIC_RESETTING, &adapter->state);
1789	adapter->need_fw_reset = 1;
1790	clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1791	qlcnic_sriov_vf_detach(adapter);
1792	adapter->need_fw_reset = 0;
1793
1794	if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1795		qlcnic_sriov_vf_attach(adapter);
1796		adapter->tx_timeo_cnt = 0;
1797		adapter->reset_ctx_cnt = 0;
1798		adapter->fw_fail_cnt = 0;
1799		dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1800	} else {
1801		dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1802			__func__, func);
1803		state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1804		dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1805	}
1806
1807	return 0;
1808}
1809
1810static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1811{
1812	struct qlcnic_hardware_context *ahw = adapter->ahw;
1813	int ret = 0;
1814
1815	if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1816		ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1817	else if (ahw->reset_context)
1818		ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1819
1820	clear_bit(__QLCNIC_RESETTING, &adapter->state);
1821	return ret;
1822}
1823
1824static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1825{
1826	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1827
1828	dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1829	if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1830		qlcnic_sriov_vf_detach(adapter);
1831
1832	clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1833	clear_bit(__QLCNIC_RESETTING, &adapter->state);
1834	return -EIO;
1835}
1836
1837static int
1838qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1839{
1840	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1841	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1842
1843	dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1844	if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1845		set_bit(__QLCNIC_RESETTING, &adapter->state);
1846		adapter->tx_timeo_cnt = 0;
1847		adapter->reset_ctx_cnt = 0;
1848		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1849		qlcnic_sriov_vf_detach(adapter);
1850	}
1851
1852	return 0;
1853}
1854
1855static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1856{
1857	struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1858	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1859	u8 func = adapter->ahw->pci_func;
1860
1861	if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1862		dev_err(&adapter->pdev->dev,
1863			"Firmware hang detected by VF 0x%x\n", func);
1864		set_bit(__QLCNIC_RESETTING, &adapter->state);
1865		adapter->tx_timeo_cnt = 0;
1866		adapter->reset_ctx_cnt = 0;
1867		clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1868		qlcnic_sriov_vf_detach(adapter);
1869	}
1870	return 0;
1871}
1872
1873static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1874{
1875	dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1876	return 0;
1877}
1878
1879static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
1880{
1881	if (adapter->fhash.fnum)
1882		qlcnic_prune_lb_filters(adapter);
1883}
1884
1885static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1886{
1887	struct qlcnic_adapter *adapter;
1888	struct qlc_83xx_idc *idc;
1889	int ret = 0;
1890
1891	adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1892	idc = &adapter->ahw->idc;
1893	idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1894
1895	switch (idc->curr_state) {
1896	case QLC_83XX_IDC_DEV_READY:
1897		ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1898		break;
1899	case QLC_83XX_IDC_DEV_NEED_RESET:
1900	case QLC_83XX_IDC_DEV_INIT:
1901		ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1902		break;
1903	case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1904		ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1905		break;
1906	case QLC_83XX_IDC_DEV_FAILED:
1907		ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1908		break;
1909	case QLC_83XX_IDC_DEV_QUISCENT:
1910		break;
1911	default:
1912		ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1913	}
1914
1915	idc->prev_state = idc->curr_state;
1916	qlcnic_sriov_vf_periodic_tasks(adapter);
1917
1918	if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1919		qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1920				     idc->delay);
1921}
1922
1923static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1924{
1925	while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1926		msleep(20);
1927
1928	clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1929	clear_bit(__QLCNIC_RESETTING, &adapter->state);
1930	cancel_delayed_work_sync(&adapter->fw_work);
1931}
1932
1933static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1934				      struct qlcnic_vf_info *vf, u16 vlan_id)
1935{
1936	int i, err = -EINVAL;
1937
1938	if (!vf->sriov_vlans)
1939		return err;
1940
1941	spin_lock_bh(&vf->vlan_list_lock);
1942
1943	for (i = 0; i < sriov->num_allowed_vlans; i++) {
1944		if (vf->sriov_vlans[i] == vlan_id) {
1945			err = 0;
1946			break;
1947		}
1948	}
1949
1950	spin_unlock_bh(&vf->vlan_list_lock);
1951	return err;
1952}
1953
1954static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1955					   struct qlcnic_vf_info *vf)
1956{
1957	int err = 0;
1958
1959	spin_lock_bh(&vf->vlan_list_lock);
1960
1961	if (vf->num_vlan >= sriov->num_allowed_vlans)
1962		err = -EINVAL;
1963
1964	spin_unlock_bh(&vf->vlan_list_lock);
1965	return err;
1966}
1967
1968static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1969					  u16 vid, u8 enable)
1970{
1971	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1972	struct qlcnic_vf_info *vf;
1973	bool vlan_exist;
1974	u8 allowed = 0;
1975	int i;
1976
1977	vf = &adapter->ahw->sriov->vf_info[0];
1978	vlan_exist = qlcnic_sriov_check_any_vlan(vf);
1979	if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1980		return -EINVAL;
1981
1982	if (enable) {
1983		if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1984			return -EINVAL;
1985
1986		if (qlcnic_sriov_validate_num_vlans(sriov, vf))
1987			return -EINVAL;
1988
1989		if (sriov->any_vlan) {
1990			for (i = 0; i < sriov->num_allowed_vlans; i++) {
1991				if (sriov->allowed_vlans[i] == vid)
1992					allowed = 1;
1993			}
1994
1995			if (!allowed)
1996				return -EINVAL;
1997		}
1998	} else {
1999		if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
2000			return -EINVAL;
2001	}
2002
2003	return 0;
2004}
2005
2006static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
2007					enum qlcnic_vlan_operations opcode)
2008{
2009	struct qlcnic_adapter *adapter = vf->adapter;
2010	struct qlcnic_sriov *sriov;
2011
2012	sriov = adapter->ahw->sriov;
2013
2014	if (!vf->sriov_vlans)
2015		return;
2016
2017	spin_lock_bh(&vf->vlan_list_lock);
2018
2019	switch (opcode) {
2020	case QLC_VLAN_ADD:
2021		qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
2022		break;
2023	case QLC_VLAN_DELETE:
2024		qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
2025		break;
2026	default:
2027		netdev_err(adapter->netdev, "Invalid VLAN operation\n");
2028	}
2029
2030	spin_unlock_bh(&vf->vlan_list_lock);
2031	return;
2032}
2033
2034int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
2035				   u16 vid, u8 enable)
2036{
2037	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2038	struct net_device *netdev = adapter->netdev;
2039	struct qlcnic_vf_info *vf;
2040	struct qlcnic_cmd_args cmd;
2041	int ret;
2042
2043	memset(&cmd, 0, sizeof(cmd));
2044	if (vid == 0)
2045		return 0;
2046
2047	vf = &adapter->ahw->sriov->vf_info[0];
2048	ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
2049	if (ret)
2050		return ret;
2051
2052	ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2053					     QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2054	if (ret)
2055		return ret;
2056
2057	cmd.req.arg[1] = (enable & 1) | vid << 16;
2058
2059	qlcnic_sriov_cleanup_async_list(&sriov->bc);
2060	ret = qlcnic_issue_cmd(adapter, &cmd);
2061	if (ret) {
2062		dev_err(&adapter->pdev->dev,
2063			"Failed to configure guest VLAN, err=%d\n", ret);
2064	} else {
2065		netif_addr_lock_bh(netdev);
2066		qlcnic_free_mac_list(adapter);
2067		netif_addr_unlock_bh(netdev);
2068
2069		if (enable)
2070			qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2071		else
2072			qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2073
2074		netif_addr_lock_bh(netdev);
2075		qlcnic_set_multi(netdev);
2076		netif_addr_unlock_bh(netdev);
2077	}
2078
2079	qlcnic_free_mbx_args(&cmd);
2080	return ret;
2081}
2082
2083static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2084{
2085	struct list_head *head = &adapter->mac_list;
2086	struct qlcnic_mac_vlan_list *cur;
2087
2088	while (!list_empty(head)) {
2089		cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2090		qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2091					  QLCNIC_MAC_DEL);
2092		list_del(&cur->list);
2093		kfree(cur);
2094	}
2095}
2096
2097
2098static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2099{
2100	struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2101	struct net_device *netdev = adapter->netdev;
2102	int retval;
2103
2104	netif_device_detach(netdev);
2105	qlcnic_cancel_idc_work(adapter);
2106
2107	if (netif_running(netdev))
2108		qlcnic_down(adapter, netdev);
2109
2110	qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2111	qlcnic_sriov_cfg_bc_intr(adapter, 0);
2112	qlcnic_83xx_disable_mbx_intr(adapter);
2113	cancel_delayed_work_sync(&adapter->idc_aen_work);
2114
2115	retval = pci_save_state(pdev);
2116	if (retval)
2117		return retval;
2118
2119	return 0;
2120}
2121
2122static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2123{
2124	struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2125	struct net_device *netdev = adapter->netdev;
2126	int err;
2127
2128	set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2129	qlcnic_83xx_enable_mbx_interrupt(adapter);
2130	err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2131	if (err)
2132		return err;
2133
2134	err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2135	if (!err) {
2136		if (netif_running(netdev)) {
2137			err = qlcnic_up(adapter, netdev);
2138			if (!err)
2139				qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2140		}
2141	}
2142
2143	netif_device_attach(netdev);
2144	qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2145			     idc->delay);
2146	return err;
2147}
2148
2149void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2150{
2151	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2152	struct qlcnic_vf_info *vf;
2153	int i;
2154
2155	for (i = 0; i < sriov->num_vfs; i++) {
2156		vf = &sriov->vf_info[i];
2157		vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2158					  sizeof(*vf->sriov_vlans), GFP_KERNEL);
2159	}
2160}
2161
2162void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2163{
2164	struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2165	struct qlcnic_vf_info *vf;
2166	int i;
2167
2168	for (i = 0; i < sriov->num_vfs; i++) {
2169		vf = &sriov->vf_info[i];
2170		kfree(vf->sriov_vlans);
2171		vf->sriov_vlans = NULL;
2172	}
2173}
2174
2175void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2176			      struct qlcnic_vf_info *vf, u16 vlan_id)
2177{
2178	int i;
2179
2180	for (i = 0; i < sriov->num_allowed_vlans; i++) {
2181		if (!vf->sriov_vlans[i]) {
2182			vf->sriov_vlans[i] = vlan_id;
2183			vf->num_vlan++;
2184			return;
2185		}
2186	}
2187}
2188
2189void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2190			      struct qlcnic_vf_info *vf, u16 vlan_id)
2191{
2192	int i;
2193
2194	for (i = 0; i < sriov->num_allowed_vlans; i++) {
2195		if (vf->sriov_vlans[i] == vlan_id) {
2196			vf->sriov_vlans[i] = 0;
2197			vf->num_vlan--;
2198			return;
2199		}
2200	}
2201}
2202
2203bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2204{
2205	bool err = false;
2206
2207	spin_lock_bh(&vf->vlan_list_lock);
2208
2209	if (vf->num_vlan)
2210		err = true;
2211
2212	spin_unlock_bh(&vf->vlan_list_lock);
2213	return err;
2214}
2215