1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic.h"
9
10static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
11	{QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
12	{QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
13	{QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
14	{QLCNIC_CMD_DESTROY_TX_CTX, 3, 1},
15	{QLCNIC_CMD_INTRPT_TEST, 4, 1},
16	{QLCNIC_CMD_SET_MTU, 4, 1},
17	{QLCNIC_CMD_READ_PHY, 4, 2},
18	{QLCNIC_CMD_WRITE_PHY, 5, 1},
19	{QLCNIC_CMD_READ_HW_REG, 4, 1},
20	{QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
21	{QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
22	{QLCNIC_CMD_READ_MAX_MTU, 4, 2},
23	{QLCNIC_CMD_READ_MAX_LRO, 4, 2},
24	{QLCNIC_CMD_MAC_ADDRESS, 4, 3},
25	{QLCNIC_CMD_GET_PCI_INFO, 4, 1},
26	{QLCNIC_CMD_GET_NIC_INFO, 4, 1},
27	{QLCNIC_CMD_SET_NIC_INFO, 4, 1},
28	{QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
29	{QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
30	{QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
31	{QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
32	{QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
33	{QLCNIC_CMD_GET_MAC_STATS, 4, 1},
34	{QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
35	{QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1},
36	{QLCNIC_CMD_CONFIG_PORT, 4, 1},
37	{QLCNIC_CMD_TEMP_SIZE, 4, 4},
38	{QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39	{QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
40	{QLCNIC_CMD_GET_LED_STATUS, 4, 2},
41	{QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
42	{QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
43	{QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
44};
45
46static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
47{
48	return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
49	       (0xcafe << 16);
50}
51
52/* Allocate mailbox registers */
53int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
54			       struct qlcnic_adapter *adapter, u32 type)
55{
56	int i, size;
57	const struct qlcnic_mailbox_metadata *mbx_tbl;
58
59	mbx_tbl = qlcnic_mbx_tbl;
60	size = ARRAY_SIZE(qlcnic_mbx_tbl);
61	for (i = 0; i < size; i++) {
62		if (type == mbx_tbl[i].cmd) {
63			mbx->req.num = mbx_tbl[i].in_args;
64			mbx->rsp.num = mbx_tbl[i].out_args;
65			mbx->req.arg = kcalloc(mbx->req.num,
66					       sizeof(u32), GFP_ATOMIC);
67			if (!mbx->req.arg)
68				return -ENOMEM;
69			mbx->rsp.arg = kcalloc(mbx->rsp.num,
70					       sizeof(u32), GFP_ATOMIC);
71			if (!mbx->rsp.arg) {
72				kfree(mbx->req.arg);
73				mbx->req.arg = NULL;
74				return -ENOMEM;
75			}
76			memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
77			memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
78			mbx->req.arg[0] = type;
79			break;
80		}
81	}
82	return 0;
83}
84
85/* Free up mailbox registers */
86void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
87{
88	kfree(cmd->req.arg);
89	cmd->req.arg = NULL;
90	kfree(cmd->rsp.arg);
91	cmd->rsp.arg = NULL;
92}
93
94static u32
95qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
96{
97	u32 rsp;
98	int timeout = 0, err = 0;
99
100	do {
101		/* give atleast 1ms for firmware to respond */
102		mdelay(1);
103
104		if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
105			return QLCNIC_CDRP_RSP_TIMEOUT;
106
107		rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
108	} while (!QLCNIC_CDRP_IS_RSP(rsp));
109
110	return rsp;
111}
112
113int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
114			  struct qlcnic_cmd_args *cmd)
115{
116	int i, err = 0;
117	u32 rsp;
118	u32 signature;
119	struct pci_dev *pdev = adapter->pdev;
120	struct qlcnic_hardware_context *ahw = adapter->ahw;
121	const char *fmt;
122
123	signature = qlcnic_get_cmd_signature(ahw);
124
125	/* Acquire semaphore before accessing CRB */
126	if (qlcnic_api_lock(adapter)) {
127		cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
128		return cmd->rsp.arg[0];
129	}
130
131	QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
132	for (i = 1; i < cmd->req.num; i++)
133		QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
134	QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
135		QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
136	rsp = qlcnic_poll_rsp(adapter);
137
138	if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
139		dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
140		cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
141	} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
142		cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
143		switch (cmd->rsp.arg[0]) {
144		case QLCNIC_RCODE_INVALID_ARGS:
145			fmt = "CDRP invalid args: [%d]\n";
146			break;
147		case QLCNIC_RCODE_NOT_SUPPORTED:
148		case QLCNIC_RCODE_NOT_IMPL:
149			fmt = "CDRP command not supported: [%d]\n";
150			break;
151		case QLCNIC_RCODE_NOT_PERMITTED:
152			fmt = "CDRP requested action not permitted: [%d]\n";
153			break;
154		case QLCNIC_RCODE_INVALID:
155			fmt = "CDRP invalid or unknown cmd received: [%d]\n";
156			break;
157		case QLCNIC_RCODE_TIMEOUT:
158			fmt = "CDRP command timeout: [%d]\n";
159			break;
160		default:
161			fmt = "CDRP command failed: [%d]\n";
162			break;
163		}
164		dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
165		qlcnic_dump_mbx(adapter, cmd);
166	} else if (rsp == QLCNIC_CDRP_RSP_OK)
167		cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
168
169	for (i = 1; i < cmd->rsp.num; i++)
170		cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
171
172	/* Release semaphore */
173	qlcnic_api_unlock(adapter);
174	return cmd->rsp.arg[0];
175}
176
177int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
178{
179	struct qlcnic_cmd_args cmd;
180	u32 arg1, arg2, arg3;
181	char drv_string[12];
182	int err = 0;
183
184	memset(drv_string, 0, sizeof(drv_string));
185	snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
186		 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
187		 _QLCNIC_LINUX_SUBVERSION);
188
189	err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
190	if (err)
191		return err;
192
193	memcpy(&arg1, drv_string, sizeof(u32));
194	memcpy(&arg2, drv_string + 4, sizeof(u32));
195	memcpy(&arg3, drv_string + 8, sizeof(u32));
196
197	cmd.req.arg[1] = arg1;
198	cmd.req.arg[2] = arg2;
199	cmd.req.arg[3] = arg3;
200
201	err = qlcnic_issue_cmd(adapter, &cmd);
202	if (err) {
203		dev_info(&adapter->pdev->dev,
204			 "Failed to set driver version in firmware\n");
205		err = -EIO;
206	}
207	qlcnic_free_mbx_args(&cmd);
208	return err;
209}
210
211int
212qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
213{
214	int err = 0;
215	struct qlcnic_cmd_args cmd;
216	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
217
218	if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
219		return err;
220	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
221	if (err)
222		return err;
223
224	cmd.req.arg[1] = recv_ctx->context_id;
225	cmd.req.arg[2] = mtu;
226
227	err = qlcnic_issue_cmd(adapter, &cmd);
228	if (err) {
229		dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
230		err = -EIO;
231	}
232	qlcnic_free_mbx_args(&cmd);
233	return err;
234}
235
236int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
237{
238	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
239	struct qlcnic_hardware_context *ahw = adapter->ahw;
240	dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
241	struct net_device *netdev = adapter->netdev;
242	u32 temp_intr_crb_mode, temp_rds_crb_mode;
243	struct qlcnic_cardrsp_rds_ring *prsp_rds;
244	struct qlcnic_cardrsp_sds_ring *prsp_sds;
245	struct qlcnic_hostrq_rds_ring *prq_rds;
246	struct qlcnic_hostrq_sds_ring *prq_sds;
247	struct qlcnic_host_rds_ring *rds_ring;
248	struct qlcnic_host_sds_ring *sds_ring;
249	struct qlcnic_cardrsp_rx_ctx *prsp;
250	struct qlcnic_hostrq_rx_ctx *prq;
251	u8 i, nrds_rings, nsds_rings;
252	struct qlcnic_cmd_args cmd;
253	size_t rq_size, rsp_size;
254	u32 cap, reg, val, reg2;
255	u64 phys_addr;
256	u16 temp_u16;
257	void *addr;
258	int err;
259
260	nrds_rings = adapter->max_rds_rings;
261	nsds_rings = adapter->drv_sds_rings;
262
263	rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
264				   nsds_rings);
265	rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
266				     nsds_rings);
267
268	addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
269				  &hostrq_phys_addr, GFP_KERNEL);
270	if (addr == NULL)
271		return -ENOMEM;
272	prq = addr;
273
274	addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
275			&cardrsp_phys_addr, GFP_KERNEL);
276	if (addr == NULL) {
277		err = -ENOMEM;
278		goto out_free_rq;
279	}
280	prsp = addr;
281
282	prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
283
284	cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
285						| QLCNIC_CAP0_VALIDOFF);
286	cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
287
288	if (qlcnic_check_multi_tx(adapter) &&
289	    !adapter->ahw->diag_test) {
290		cap |= QLCNIC_CAP0_TX_MULTI;
291	} else {
292		temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
293		prq->valid_field_offset = cpu_to_le16(temp_u16);
294		prq->txrx_sds_binding = nsds_rings - 1;
295		temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
296		prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
297		temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
298		prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
299	}
300
301	prq->capabilities[0] = cpu_to_le32(cap);
302
303	prq->num_rds_rings = cpu_to_le16(nrds_rings);
304	prq->num_sds_rings = cpu_to_le16(nsds_rings);
305	prq->rds_ring_offset = 0;
306
307	val = le32_to_cpu(prq->rds_ring_offset) +
308		(sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
309	prq->sds_ring_offset = cpu_to_le32(val);
310
311	prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
312			le32_to_cpu(prq->rds_ring_offset));
313
314	for (i = 0; i < nrds_rings; i++) {
315		rds_ring = &recv_ctx->rds_rings[i];
316		rds_ring->producer = 0;
317		prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
318		prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
319		prq_rds[i].ring_kind = cpu_to_le32(i);
320		prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
321	}
322
323	prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
324			le32_to_cpu(prq->sds_ring_offset));
325
326	for (i = 0; i < nsds_rings; i++) {
327		sds_ring = &recv_ctx->sds_rings[i];
328		sds_ring->consumer = 0;
329		memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
330		prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
331		prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
332		if (qlcnic_check_multi_tx(adapter) &&
333		    !adapter->ahw->diag_test)
334			prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
335		else
336			prq_sds[i].msi_index = cpu_to_le16(i);
337	}
338
339	phys_addr = hostrq_phys_addr;
340	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
341	if (err)
342		goto out_free_rsp;
343
344	cmd.req.arg[1] = MSD(phys_addr);
345	cmd.req.arg[2] = LSD(phys_addr);
346	cmd.req.arg[3] = rq_size;
347	err = qlcnic_issue_cmd(adapter, &cmd);
348	if (err) {
349		dev_err(&adapter->pdev->dev,
350			"Failed to create rx ctx in firmware%d\n", err);
351		goto out_free_rsp;
352	}
353
354	prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
355			 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
356
357	for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
358		rds_ring = &recv_ctx->rds_rings[i];
359		reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
360		rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
361	}
362
363	prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
364			&prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
365
366	for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
367		sds_ring = &recv_ctx->sds_rings[i];
368		reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
369		if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
370			reg2 = ahw->intr_tbl[i].src;
371		else
372			reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
373
374		sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
375		sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
376	}
377
378	recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
379	recv_ctx->context_id = le16_to_cpu(prsp->context_id);
380	recv_ctx->virt_port = prsp->virt_port;
381
382	netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
383		    recv_ctx->context_id, recv_ctx->state);
384	qlcnic_free_mbx_args(&cmd);
385
386out_free_rsp:
387	dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
388			  cardrsp_phys_addr);
389out_free_rq:
390	dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
391
392	return err;
393}
394
395void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
396{
397	int err;
398	struct qlcnic_cmd_args cmd;
399	struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
400
401	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
402	if (err)
403		return;
404
405	cmd.req.arg[1] = recv_ctx->context_id;
406	err = qlcnic_issue_cmd(adapter, &cmd);
407	if (err)
408		dev_err(&adapter->pdev->dev,
409			"Failed to destroy rx ctx in firmware\n");
410
411	recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
412	qlcnic_free_mbx_args(&cmd);
413}
414
415int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
416				     struct qlcnic_host_tx_ring *tx_ring,
417				     int ring)
418{
419	struct qlcnic_hardware_context *ahw = adapter->ahw;
420	struct net_device *netdev = adapter->netdev;
421	struct qlcnic_hostrq_tx_ctx	*prq;
422	struct qlcnic_hostrq_cds_ring	*prq_cds;
423	struct qlcnic_cardrsp_tx_ctx	*prsp;
424	struct qlcnic_cmd_args cmd;
425	u32 temp, intr_mask, temp_int_crb_mode;
426	dma_addr_t rq_phys_addr, rsp_phys_addr;
427	int temp_nsds_rings, index, err;
428	void *rq_addr, *rsp_addr;
429	size_t rq_size, rsp_size;
430	u64 phys_addr;
431	u16 msix_id;
432
433	/* reset host resources */
434	tx_ring->producer = 0;
435	tx_ring->sw_consumer = 0;
436	*(tx_ring->hw_consumer) = 0;
437
438	rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
439	rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
440				      &rq_phys_addr, GFP_KERNEL);
441	if (!rq_addr)
442		return -ENOMEM;
443
444	rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
445	rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
446				       &rsp_phys_addr, GFP_KERNEL);
447	if (!rsp_addr) {
448		err = -ENOMEM;
449		goto out_free_rq;
450	}
451
452	prq = rq_addr;
453	prsp = rsp_addr;
454
455	prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
456
457	temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
458		QLCNIC_CAP0_LSO);
459	if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
460		temp |= QLCNIC_CAP0_TX_MULTI;
461
462	prq->capabilities[0] = cpu_to_le32(temp);
463
464	if (qlcnic_check_multi_tx(adapter) &&
465	    !adapter->ahw->diag_test) {
466		temp_nsds_rings = adapter->drv_sds_rings;
467		index = temp_nsds_rings + ring;
468		msix_id = ahw->intr_tbl[index].id;
469		prq->msi_index = cpu_to_le16(msix_id);
470	} else {
471		temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
472		prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
473		prq->msi_index = 0;
474	}
475
476	prq->interrupt_ctl = 0;
477	prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
478
479	prq_cds = &prq->cds_ring;
480
481	prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
482	prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
483
484	phys_addr = rq_phys_addr;
485
486	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
487	if (err)
488		goto out_free_rsp;
489
490	cmd.req.arg[1] = MSD(phys_addr);
491	cmd.req.arg[2] = LSD(phys_addr);
492	cmd.req.arg[3] = rq_size;
493	err = qlcnic_issue_cmd(adapter, &cmd);
494
495	if (err == QLCNIC_RCODE_SUCCESS) {
496		tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
497		temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
498		tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
499		tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
500		if (qlcnic_check_multi_tx(adapter) &&
501		    !adapter->ahw->diag_test &&
502		    (adapter->flags & QLCNIC_MSIX_ENABLED)) {
503			index = adapter->drv_sds_rings + ring;
504			intr_mask = ahw->intr_tbl[index].src;
505			tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
506		}
507
508		netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
509			    tx_ring->ctx_id, tx_ring->state);
510	} else {
511		netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
512			   err);
513		err = -EIO;
514	}
515	qlcnic_free_mbx_args(&cmd);
516
517out_free_rsp:
518	dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
519			  rsp_phys_addr);
520out_free_rq:
521	dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
522
523	return err;
524}
525
526void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
527				   struct qlcnic_host_tx_ring *tx_ring)
528{
529	struct qlcnic_cmd_args cmd;
530	int ret;
531
532	ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
533	if (ret)
534		return;
535
536	cmd.req.arg[1] = tx_ring->ctx_id;
537	if (qlcnic_issue_cmd(adapter, &cmd))
538		dev_err(&adapter->pdev->dev,
539			"Failed to destroy tx ctx in firmware\n");
540	qlcnic_free_mbx_args(&cmd);
541}
542
543int
544qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
545{
546	int err;
547	struct qlcnic_cmd_args cmd;
548
549	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
550	if (err)
551		return err;
552
553	cmd.req.arg[1] = config;
554	err = qlcnic_issue_cmd(adapter, &cmd);
555	qlcnic_free_mbx_args(&cmd);
556	return err;
557}
558
559int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
560{
561	void *addr;
562	int err, ring;
563	struct qlcnic_recv_context *recv_ctx;
564	struct qlcnic_host_rds_ring *rds_ring;
565	struct qlcnic_host_sds_ring *sds_ring;
566	struct qlcnic_host_tx_ring *tx_ring;
567	__le32 *ptr;
568
569	struct pci_dev *pdev = adapter->pdev;
570
571	recv_ctx = adapter->recv_ctx;
572
573	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
574		tx_ring = &adapter->tx_ring[ring];
575		ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
576						   &tx_ring->hw_cons_phys_addr,
577						   GFP_KERNEL);
578		if (ptr == NULL)
579			return -ENOMEM;
580
581		tx_ring->hw_consumer = ptr;
582		/* cmd desc ring */
583		addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
584					  &tx_ring->phys_addr,
585					  GFP_KERNEL);
586		if (addr == NULL) {
587			err = -ENOMEM;
588			goto err_out_free;
589		}
590
591		tx_ring->desc_head = addr;
592	}
593
594	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
595		rds_ring = &recv_ctx->rds_rings[ring];
596		addr = dma_alloc_coherent(&adapter->pdev->dev,
597					  RCV_DESC_RINGSIZE(rds_ring),
598					  &rds_ring->phys_addr, GFP_KERNEL);
599		if (addr == NULL) {
600			err = -ENOMEM;
601			goto err_out_free;
602		}
603		rds_ring->desc_head = addr;
604
605	}
606
607	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
608		sds_ring = &recv_ctx->sds_rings[ring];
609
610		addr = dma_alloc_coherent(&adapter->pdev->dev,
611					  STATUS_DESC_RINGSIZE(sds_ring),
612					  &sds_ring->phys_addr, GFP_KERNEL);
613		if (addr == NULL) {
614			err = -ENOMEM;
615			goto err_out_free;
616		}
617		sds_ring->desc_head = addr;
618	}
619
620	return 0;
621
622err_out_free:
623	qlcnic_free_hw_resources(adapter);
624	return err;
625}
626
627int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
628{
629	int i, err, ring;
630
631	if (dev->flags & QLCNIC_NEED_FLR) {
632		pci_reset_function(dev->pdev);
633		dev->flags &= ~QLCNIC_NEED_FLR;
634	}
635
636	if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
637		if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
638			err = qlcnic_83xx_config_intrpt(dev, 1);
639			if (err)
640				return err;
641		}
642	}
643
644	if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
645	    qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
646		err = qlcnic_82xx_mq_intrpt(dev, 1);
647		if (err)
648			return err;
649	}
650
651	err = qlcnic_fw_cmd_create_rx_ctx(dev);
652	if (err)
653		goto err_out;
654
655	for (ring = 0; ring < dev->drv_tx_rings; ring++) {
656		err = qlcnic_fw_cmd_create_tx_ctx(dev,
657						  &dev->tx_ring[ring],
658						  ring);
659		if (err) {
660			qlcnic_fw_cmd_del_rx_ctx(dev);
661			if (ring == 0)
662				goto err_out;
663
664			for (i = 0; i < ring; i++)
665				qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
666
667			goto err_out;
668		}
669	}
670
671	set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
672
673	return 0;
674
675err_out:
676	if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
677	    qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
678			qlcnic_82xx_config_intrpt(dev, 0);
679
680	if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
681		if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
682			qlcnic_83xx_config_intrpt(dev, 0);
683	}
684
685	return err;
686}
687
688void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
689{
690	int ring;
691
692	if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
693		qlcnic_fw_cmd_del_rx_ctx(adapter);
694		for (ring = 0; ring < adapter->drv_tx_rings; ring++)
695			qlcnic_fw_cmd_del_tx_ctx(adapter,
696						 &adapter->tx_ring[ring]);
697
698		if (qlcnic_82xx_check(adapter) &&
699		    (adapter->flags & QLCNIC_MSIX_ENABLED) &&
700		    qlcnic_check_multi_tx(adapter) &&
701		    !adapter->ahw->diag_test)
702				qlcnic_82xx_config_intrpt(adapter, 0);
703
704		if (qlcnic_83xx_check(adapter) &&
705		    (adapter->flags & QLCNIC_MSIX_ENABLED)) {
706			if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
707				qlcnic_83xx_config_intrpt(adapter, 0);
708		}
709		/* Allow dma queues to drain after context reset */
710		mdelay(20);
711	}
712}
713
714void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
715{
716	struct qlcnic_recv_context *recv_ctx;
717	struct qlcnic_host_rds_ring *rds_ring;
718	struct qlcnic_host_sds_ring *sds_ring;
719	struct qlcnic_host_tx_ring *tx_ring;
720	int ring;
721
722	recv_ctx = adapter->recv_ctx;
723
724	for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
725		tx_ring = &adapter->tx_ring[ring];
726		if (tx_ring->hw_consumer != NULL) {
727			dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
728					  tx_ring->hw_consumer,
729					  tx_ring->hw_cons_phys_addr);
730
731			tx_ring->hw_consumer = NULL;
732		}
733
734		if (tx_ring->desc_head != NULL) {
735			dma_free_coherent(&adapter->pdev->dev,
736					  TX_DESC_RINGSIZE(tx_ring),
737					  tx_ring->desc_head,
738					  tx_ring->phys_addr);
739			tx_ring->desc_head = NULL;
740		}
741	}
742
743	for (ring = 0; ring < adapter->max_rds_rings; ring++) {
744		rds_ring = &recv_ctx->rds_rings[ring];
745
746		if (rds_ring->desc_head != NULL) {
747			dma_free_coherent(&adapter->pdev->dev,
748					RCV_DESC_RINGSIZE(rds_ring),
749					rds_ring->desc_head,
750					rds_ring->phys_addr);
751			rds_ring->desc_head = NULL;
752		}
753	}
754
755	for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
756		sds_ring = &recv_ctx->sds_rings[ring];
757
758		if (sds_ring->desc_head != NULL) {
759			dma_free_coherent(&adapter->pdev->dev,
760				STATUS_DESC_RINGSIZE(sds_ring),
761				sds_ring->desc_head,
762				sds_ring->phys_addr);
763			sds_ring->desc_head = NULL;
764		}
765	}
766}
767
768int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
769{
770	struct qlcnic_hardware_context *ahw = adapter->ahw;
771	struct net_device *netdev = adapter->netdev;
772	struct qlcnic_cmd_args cmd;
773	u32 type, val;
774	int i, err = 0;
775
776	for (i = 0; i < ahw->num_msix; i++) {
777		qlcnic_alloc_mbx_args(&cmd, adapter,
778				      QLCNIC_CMD_MQ_TX_CONFIG_INTR);
779		type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
780		val = type | (ahw->intr_tbl[i].type << 4);
781		if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
782			val |= (ahw->intr_tbl[i].id << 16);
783		cmd.req.arg[1] = val;
784		err = qlcnic_issue_cmd(adapter, &cmd);
785		if (err) {
786			netdev_err(netdev, "Failed to %s interrupts %d\n",
787				   op_type == QLCNIC_INTRPT_ADD ? "Add" :
788				   "Delete", err);
789			qlcnic_free_mbx_args(&cmd);
790			return err;
791		}
792		val = cmd.rsp.arg[1];
793		if (LSB(val)) {
794			netdev_info(netdev,
795				    "failed to configure interrupt for %d\n",
796				    ahw->intr_tbl[i].id);
797			continue;
798		}
799		if (op_type) {
800			ahw->intr_tbl[i].id = MSW(val);
801			ahw->intr_tbl[i].enabled = 1;
802			ahw->intr_tbl[i].src = cmd.rsp.arg[2];
803		} else {
804			ahw->intr_tbl[i].id = i;
805			ahw->intr_tbl[i].enabled = 0;
806			ahw->intr_tbl[i].src = 0;
807		}
808		qlcnic_free_mbx_args(&cmd);
809	}
810
811	return err;
812}
813
814int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
815				u8 function)
816{
817	int err, i;
818	struct qlcnic_cmd_args cmd;
819	u32 mac_low, mac_high;
820
821	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
822	if (err)
823		return err;
824
825	cmd.req.arg[1] = function | BIT_8;
826	err = qlcnic_issue_cmd(adapter, &cmd);
827
828	if (err == QLCNIC_RCODE_SUCCESS) {
829		mac_low = cmd.rsp.arg[1];
830		mac_high = cmd.rsp.arg[2];
831
832		for (i = 0; i < 2; i++)
833			mac[i] = (u8) (mac_high >> ((1 - i) * 8));
834		for (i = 2; i < 6; i++)
835			mac[i] = (u8) (mac_low >> ((5 - i) * 8));
836	} else {
837		dev_err(&adapter->pdev->dev,
838			"Failed to get mac address%d\n", err);
839		err = -EIO;
840	}
841	qlcnic_free_mbx_args(&cmd);
842	return err;
843}
844
845/* Get info of a NIC partition */
846int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
847			     struct qlcnic_info *npar_info, u8 func_id)
848{
849	int	err;
850	dma_addr_t nic_dma_t;
851	const struct qlcnic_info_le *nic_info;
852	void *nic_info_addr;
853	struct qlcnic_cmd_args cmd;
854	size_t  nic_size = sizeof(struct qlcnic_info_le);
855
856	nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
857					    &nic_dma_t, GFP_KERNEL);
858	if (!nic_info_addr)
859		return -ENOMEM;
860
861	nic_info = nic_info_addr;
862
863	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
864	if (err)
865		goto out_free_dma;
866
867	cmd.req.arg[1] = MSD(nic_dma_t);
868	cmd.req.arg[2] = LSD(nic_dma_t);
869	cmd.req.arg[3] = (func_id << 16 | nic_size);
870	err = qlcnic_issue_cmd(adapter, &cmd);
871	if (err != QLCNIC_RCODE_SUCCESS) {
872		dev_err(&adapter->pdev->dev,
873			"Failed to get nic info%d\n", err);
874		err = -EIO;
875	} else {
876		npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
877		npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
878		npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
879		npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
880		npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
881		npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
882		npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
883		npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
884		npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
885		npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
886	}
887
888	qlcnic_free_mbx_args(&cmd);
889out_free_dma:
890	dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
891			  nic_dma_t);
892
893	return err;
894}
895
896/* Configure a NIC partition */
897int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
898			     struct qlcnic_info *nic)
899{
900	int err = -EIO;
901	dma_addr_t nic_dma_t;
902	void *nic_info_addr;
903	struct qlcnic_cmd_args cmd;
904	struct qlcnic_info_le *nic_info;
905	size_t nic_size = sizeof(struct qlcnic_info_le);
906
907	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
908		return err;
909
910	nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
911					    &nic_dma_t, GFP_KERNEL);
912	if (!nic_info_addr)
913		return -ENOMEM;
914
915	nic_info = nic_info_addr;
916
917	nic_info->pci_func = cpu_to_le16(nic->pci_func);
918	nic_info->op_mode = cpu_to_le16(nic->op_mode);
919	nic_info->phys_port = cpu_to_le16(nic->phys_port);
920	nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
921	nic_info->capabilities = cpu_to_le32(nic->capabilities);
922	nic_info->max_mac_filters = nic->max_mac_filters;
923	nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
924	nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
925	nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
926	nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
927
928	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
929	if (err)
930		goto out_free_dma;
931
932	cmd.req.arg[1] = MSD(nic_dma_t);
933	cmd.req.arg[2] = LSD(nic_dma_t);
934	cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
935	err = qlcnic_issue_cmd(adapter, &cmd);
936
937	if (err != QLCNIC_RCODE_SUCCESS) {
938		dev_err(&adapter->pdev->dev,
939			"Failed to set nic info%d\n", err);
940		err = -EIO;
941	}
942
943	qlcnic_free_mbx_args(&cmd);
944out_free_dma:
945	dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
946			  nic_dma_t);
947
948	return err;
949}
950
951/* Get PCI Info of a partition */
952int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
953			     struct qlcnic_pci_info *pci_info)
954{
955	struct qlcnic_hardware_context *ahw = adapter->ahw;
956	size_t npar_size = sizeof(struct qlcnic_pci_info_le);
957	size_t pci_size = npar_size * ahw->max_vnic_func;
958	u16 nic = 0, fcoe = 0, iscsi = 0;
959	struct qlcnic_pci_info_le *npar;
960	struct qlcnic_cmd_args cmd;
961	dma_addr_t pci_info_dma_t;
962	void *pci_info_addr;
963	int err = 0, i;
964
965	pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
966					    &pci_info_dma_t, GFP_KERNEL);
967	if (!pci_info_addr)
968		return -ENOMEM;
969
970	npar = pci_info_addr;
971	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
972	if (err)
973		goto out_free_dma;
974
975	cmd.req.arg[1] = MSD(pci_info_dma_t);
976	cmd.req.arg[2] = LSD(pci_info_dma_t);
977	cmd.req.arg[3] = pci_size;
978	err = qlcnic_issue_cmd(adapter, &cmd);
979
980	ahw->total_nic_func = 0;
981	if (err == QLCNIC_RCODE_SUCCESS) {
982		for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
983			pci_info->id = le16_to_cpu(npar->id);
984			pci_info->active = le16_to_cpu(npar->active);
985			if (!pci_info->active)
986				continue;
987			pci_info->type = le16_to_cpu(npar->type);
988			err = qlcnic_get_pci_func_type(adapter, pci_info->type,
989						       &nic, &fcoe, &iscsi);
990			pci_info->default_port =
991				le16_to_cpu(npar->default_port);
992			pci_info->tx_min_bw =
993				le16_to_cpu(npar->tx_min_bw);
994			pci_info->tx_max_bw =
995				le16_to_cpu(npar->tx_max_bw);
996			memcpy(pci_info->mac, npar->mac, ETH_ALEN);
997		}
998	} else {
999		dev_err(&adapter->pdev->dev,
1000			"Failed to get PCI Info%d\n", err);
1001		err = -EIO;
1002	}
1003
1004	ahw->total_nic_func = nic;
1005	ahw->total_pci_func = nic + fcoe + iscsi;
1006	if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
1007		dev_err(&adapter->pdev->dev,
1008			"%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
1009			__func__, ahw->total_nic_func, ahw->total_pci_func);
1010		err = -EIO;
1011	}
1012	qlcnic_free_mbx_args(&cmd);
1013out_free_dma:
1014	dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
1015		pci_info_dma_t);
1016
1017	return err;
1018}
1019
1020/* Configure eSwitch for port mirroring */
1021int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
1022				 u8 enable_mirroring, u8 pci_func)
1023{
1024	struct device *dev = &adapter->pdev->dev;
1025	struct qlcnic_cmd_args cmd;
1026	int err = -EIO;
1027	u32 arg1;
1028
1029	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
1030	    !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
1031		dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1032			__func__);
1033		return err;
1034	}
1035
1036	arg1 = id | (enable_mirroring ? BIT_4 : 0);
1037	arg1 |= pci_func << 8;
1038
1039	err = qlcnic_alloc_mbx_args(&cmd, adapter,
1040				    QLCNIC_CMD_SET_PORTMIRRORING);
1041	if (err)
1042		return err;
1043
1044	cmd.req.arg[1] = arg1;
1045	err = qlcnic_issue_cmd(adapter, &cmd);
1046
1047	if (err != QLCNIC_RCODE_SUCCESS)
1048		dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
1049			pci_func, id);
1050	else
1051		dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
1052			 pci_func, id);
1053	qlcnic_free_mbx_args(&cmd);
1054
1055	return err;
1056}
1057
1058int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
1059		const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1060
1061	size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
1062	struct qlcnic_esw_stats_le *stats;
1063	dma_addr_t stats_dma_t;
1064	void *stats_addr;
1065	u32 arg1;
1066	struct qlcnic_cmd_args cmd;
1067	int err;
1068
1069	if (esw_stats == NULL)
1070		return -ENOMEM;
1071
1072	if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
1073	    (func != adapter->ahw->pci_func)) {
1074		dev_err(&adapter->pdev->dev,
1075			"Not privilege to query stats for func=%d", func);
1076		return -EIO;
1077	}
1078
1079	stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
1080					 &stats_dma_t, GFP_KERNEL);
1081	if (!stats_addr)
1082		return -ENOMEM;
1083
1084	arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
1085	arg1 |= rx_tx << 15 | stats_size << 16;
1086
1087	err = qlcnic_alloc_mbx_args(&cmd, adapter,
1088				    QLCNIC_CMD_GET_ESWITCH_STATS);
1089	if (err)
1090		goto out_free_dma;
1091
1092	cmd.req.arg[1] = arg1;
1093	cmd.req.arg[2] = MSD(stats_dma_t);
1094	cmd.req.arg[3] = LSD(stats_dma_t);
1095	err = qlcnic_issue_cmd(adapter, &cmd);
1096
1097	if (!err) {
1098		stats = stats_addr;
1099		esw_stats->context_id = le16_to_cpu(stats->context_id);
1100		esw_stats->version = le16_to_cpu(stats->version);
1101		esw_stats->size = le16_to_cpu(stats->size);
1102		esw_stats->multicast_frames =
1103				le64_to_cpu(stats->multicast_frames);
1104		esw_stats->broadcast_frames =
1105				le64_to_cpu(stats->broadcast_frames);
1106		esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
1107		esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
1108		esw_stats->local_frames = le64_to_cpu(stats->local_frames);
1109		esw_stats->errors = le64_to_cpu(stats->errors);
1110		esw_stats->numbytes = le64_to_cpu(stats->numbytes);
1111	}
1112
1113	qlcnic_free_mbx_args(&cmd);
1114out_free_dma:
1115	dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1116			  stats_dma_t);
1117
1118	return err;
1119}
1120
1121/* This routine will retrieve the MAC statistics from firmware */
1122int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1123		struct qlcnic_mac_statistics *mac_stats)
1124{
1125	struct qlcnic_mac_statistics_le *stats;
1126	struct qlcnic_cmd_args cmd;
1127	size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
1128	dma_addr_t stats_dma_t;
1129	void *stats_addr;
1130	int err;
1131
1132	if (mac_stats == NULL)
1133		return -ENOMEM;
1134
1135	stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
1136					 &stats_dma_t, GFP_KERNEL);
1137	if (!stats_addr)
1138		return -ENOMEM;
1139
1140	err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
1141	if (err)
1142		goto out_free_dma;
1143
1144	cmd.req.arg[1] = stats_size << 16;
1145	cmd.req.arg[2] = MSD(stats_dma_t);
1146	cmd.req.arg[3] = LSD(stats_dma_t);
1147	err = qlcnic_issue_cmd(adapter, &cmd);
1148	if (!err) {
1149		stats = stats_addr;
1150		mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
1151		mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
1152		mac_stats->mac_tx_mcast_pkts =
1153					le64_to_cpu(stats->mac_tx_mcast_pkts);
1154		mac_stats->mac_tx_bcast_pkts =
1155					le64_to_cpu(stats->mac_tx_bcast_pkts);
1156		mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
1157		mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
1158		mac_stats->mac_rx_mcast_pkts =
1159					le64_to_cpu(stats->mac_rx_mcast_pkts);
1160		mac_stats->mac_rx_length_error =
1161				le64_to_cpu(stats->mac_rx_length_error);
1162		mac_stats->mac_rx_length_small =
1163				le64_to_cpu(stats->mac_rx_length_small);
1164		mac_stats->mac_rx_length_large =
1165				le64_to_cpu(stats->mac_rx_length_large);
1166		mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
1167		mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
1168		mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
1169	} else {
1170		dev_err(&adapter->pdev->dev,
1171			"%s: Get mac stats failed, err=%d.\n", __func__, err);
1172	}
1173
1174	qlcnic_free_mbx_args(&cmd);
1175
1176out_free_dma:
1177	dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1178			  stats_dma_t);
1179
1180	return err;
1181}
1182
1183int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1184		const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1185
1186	struct __qlcnic_esw_statistics port_stats;
1187	u8 i;
1188	int ret = -EIO;
1189
1190	if (esw_stats == NULL)
1191		return -ENOMEM;
1192	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1193		return -EIO;
1194	if (adapter->npars == NULL)
1195		return -EIO;
1196
1197	memset(esw_stats, 0, sizeof(u64));
1198	esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
1199	esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
1200	esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
1201	esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
1202	esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
1203	esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
1204	esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1205	esw_stats->context_id = eswitch;
1206
1207	for (i = 0; i < adapter->ahw->total_nic_func; i++) {
1208		if (adapter->npars[i].phy_port != eswitch)
1209			continue;
1210
1211		memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1212		if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1213					  rx_tx, &port_stats))
1214			continue;
1215
1216		esw_stats->size = port_stats.size;
1217		esw_stats->version = port_stats.version;
1218		QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1219						port_stats.unicast_frames);
1220		QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1221						port_stats.multicast_frames);
1222		QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1223						port_stats.broadcast_frames);
1224		QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1225						port_stats.dropped_frames);
1226		QLCNIC_ADD_ESW_STATS(esw_stats->errors,
1227						port_stats.errors);
1228		QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
1229						port_stats.local_frames);
1230		QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
1231						port_stats.numbytes);
1232		ret = 0;
1233	}
1234	return ret;
1235}
1236
1237int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1238		const u8 port, const u8 rx_tx)
1239{
1240	struct qlcnic_hardware_context *ahw = adapter->ahw;
1241	struct qlcnic_cmd_args cmd;
1242	int err;
1243	u32 arg1;
1244
1245	if (ahw->op_mode != QLCNIC_MGMT_FUNC)
1246		return -EIO;
1247
1248	if (func_esw == QLCNIC_STATS_PORT) {
1249		if (port >= ahw->max_vnic_func)
1250			goto err_ret;
1251	} else if (func_esw == QLCNIC_STATS_ESWITCH) {
1252		if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1253			goto err_ret;
1254	} else {
1255		goto err_ret;
1256	}
1257
1258	if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1259		goto err_ret;
1260
1261	arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1262	arg1 |= BIT_14 | rx_tx << 15;
1263
1264	err = qlcnic_alloc_mbx_args(&cmd, adapter,
1265				    QLCNIC_CMD_GET_ESWITCH_STATS);
1266	if (err)
1267		return err;
1268
1269	cmd.req.arg[1] = arg1;
1270	err = qlcnic_issue_cmd(adapter, &cmd);
1271	qlcnic_free_mbx_args(&cmd);
1272	return err;
1273
1274err_ret:
1275	dev_err(&adapter->pdev->dev,
1276		"Invalid args func_esw %d port %d rx_ctx %d\n",
1277		func_esw, port, rx_tx);
1278	return -EIO;
1279}
1280
1281static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1282					    u32 *arg1, u32 *arg2)
1283{
1284	struct device *dev = &adapter->pdev->dev;
1285	struct qlcnic_cmd_args cmd;
1286	u8 pci_func = *arg1 >> 8;
1287	int err;
1288
1289	err = qlcnic_alloc_mbx_args(&cmd, adapter,
1290				    QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1291	if (err)
1292		return err;
1293
1294	cmd.req.arg[1] = *arg1;
1295	err = qlcnic_issue_cmd(adapter, &cmd);
1296	*arg1 = cmd.rsp.arg[1];
1297	*arg2 = cmd.rsp.arg[2];
1298	qlcnic_free_mbx_args(&cmd);
1299
1300	if (err == QLCNIC_RCODE_SUCCESS)
1301		dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
1302			 pci_func);
1303	else
1304		dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
1305			pci_func);
1306	return err;
1307}
1308/* Configure eSwitch port
1309op_mode = 0 for setting default port behavior
1310op_mode = 1 for setting  vlan id
1311op_mode = 2 for deleting vlan id
1312op_type = 0 for vlan_id
1313op_type = 1 for port vlan_id
1314*/
1315int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1316		struct qlcnic_esw_func_cfg *esw_cfg)
1317{
1318	struct device *dev = &adapter->pdev->dev;
1319	struct qlcnic_cmd_args cmd;
1320	int err = -EIO, index;
1321	u32 arg1, arg2 = 0;
1322	u8 pci_func;
1323
1324	if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
1325		dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1326			__func__);
1327		return err;
1328	}
1329
1330	pci_func = esw_cfg->pci_func;
1331	index = qlcnic_is_valid_nic_func(adapter, pci_func);
1332	if (index < 0)
1333		return err;
1334	arg1 = (adapter->npars[index].phy_port & BIT_0);
1335	arg1 |= (pci_func << 8);
1336
1337	if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1338		return err;
1339	arg1 &= ~(0x0ff << 8);
1340	arg1 |= (pci_func << 8);
1341	arg1 &= ~(BIT_2 | BIT_3);
1342	switch (esw_cfg->op_mode) {
1343	case QLCNIC_PORT_DEFAULTS:
1344		arg1 |= (BIT_4 | BIT_6 | BIT_7);
1345		arg2 |= (BIT_0 | BIT_1);
1346		if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1347			arg2 |= (BIT_2 | BIT_3);
1348		if (!(esw_cfg->discard_tagged))
1349			arg1 &= ~BIT_4;
1350		if (!(esw_cfg->promisc_mode))
1351			arg1 &= ~BIT_6;
1352		if (!(esw_cfg->mac_override))
1353			arg1 &= ~BIT_7;
1354		if (!(esw_cfg->mac_anti_spoof))
1355			arg2 &= ~BIT_0;
1356		if (!(esw_cfg->offload_flags & BIT_0))
1357			arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1358		if (!(esw_cfg->offload_flags & BIT_1))
1359			arg2 &= ~BIT_2;
1360		if (!(esw_cfg->offload_flags & BIT_2))
1361			arg2 &= ~BIT_3;
1362		break;
1363	case QLCNIC_ADD_VLAN:
1364			arg1 &= ~(0x0ffff << 16);
1365			arg1 |= (BIT_2 | BIT_5);
1366			arg1 |= (esw_cfg->vlan_id << 16);
1367			break;
1368	case QLCNIC_DEL_VLAN:
1369			arg1 |= (BIT_3 | BIT_5);
1370			arg1 &= ~(0x0ffff << 16);
1371			break;
1372	default:
1373		dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
1374			__func__, esw_cfg->op_mode);
1375		return err;
1376	}
1377
1378	err = qlcnic_alloc_mbx_args(&cmd, adapter,
1379				    QLCNIC_CMD_CONFIGURE_ESWITCH);
1380	if (err)
1381		return err;
1382
1383	cmd.req.arg[1] = arg1;
1384	cmd.req.arg[2] = arg2;
1385	err = qlcnic_issue_cmd(adapter, &cmd);
1386	qlcnic_free_mbx_args(&cmd);
1387
1388	if (err != QLCNIC_RCODE_SUCCESS)
1389		dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
1390			pci_func);
1391	else
1392		dev_info(dev, "Configured eSwitch for vNIC function %d\n",
1393			 pci_func);
1394
1395	return err;
1396}
1397
1398int
1399qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1400			struct qlcnic_esw_func_cfg *esw_cfg)
1401{
1402	u32 arg1, arg2;
1403	int index;
1404	u8 phy_port;
1405
1406	if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1407		index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1408		if (index < 0)
1409			return -EIO;
1410		phy_port = adapter->npars[index].phy_port;
1411	} else {
1412		phy_port = adapter->ahw->physical_port;
1413	}
1414	arg1 = phy_port;
1415	arg1 |= (esw_cfg->pci_func << 8);
1416	if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1417		return -EIO;
1418
1419	esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1420	esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1421	esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1422	esw_cfg->mac_override = !!(arg1 & BIT_7);
1423	esw_cfg->vlan_id = LSW(arg1 >> 16);
1424	esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1425	esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1426
1427	return 0;
1428}
1429