This source file includes following definitions.
- qlcnic_get_cmd_signature
- qlcnic_82xx_alloc_mbx_args
- qlcnic_free_mbx_args
- qlcnic_poll_rsp
- qlcnic_82xx_issue_cmd
- qlcnic_fw_cmd_set_drv_version
- qlcnic_fw_cmd_set_mtu
- qlcnic_82xx_fw_cmd_create_rx_ctx
- qlcnic_82xx_fw_cmd_del_rx_ctx
- qlcnic_82xx_fw_cmd_create_tx_ctx
- qlcnic_82xx_fw_cmd_del_tx_ctx
- qlcnic_fw_cmd_set_port
- qlcnic_alloc_hw_resources
- qlcnic_fw_create_ctx
- qlcnic_fw_destroy_ctx
- qlcnic_free_hw_resources
- qlcnic_82xx_config_intrpt
- qlcnic_82xx_get_mac_address
- qlcnic_82xx_get_nic_info
- qlcnic_82xx_set_nic_info
- qlcnic_82xx_get_pci_info
- qlcnic_config_port_mirroring
- qlcnic_get_port_stats
- qlcnic_get_mac_stats
- qlcnic_get_eswitch_stats
- qlcnic_clear_esw_stats
- __qlcnic_get_eswitch_port_config
- qlcnic_config_switch_port
- qlcnic_get_eswitch_port_config
1
2
3
4
5
6
7
8 #include "qlcnic.h"
9
10 static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
11 {QLCNIC_CMD_CREATE_RX_CTX, 4, 1},
12 {QLCNIC_CMD_DESTROY_RX_CTX, 2, 1},
13 {QLCNIC_CMD_CREATE_TX_CTX, 4, 1},
14 {QLCNIC_CMD_DESTROY_TX_CTX, 3, 1},
15 {QLCNIC_CMD_INTRPT_TEST, 4, 1},
16 {QLCNIC_CMD_SET_MTU, 4, 1},
17 {QLCNIC_CMD_READ_PHY, 4, 2},
18 {QLCNIC_CMD_WRITE_PHY, 5, 1},
19 {QLCNIC_CMD_READ_HW_REG, 4, 1},
20 {QLCNIC_CMD_GET_FLOW_CTL, 4, 2},
21 {QLCNIC_CMD_SET_FLOW_CTL, 4, 1},
22 {QLCNIC_CMD_READ_MAX_MTU, 4, 2},
23 {QLCNIC_CMD_READ_MAX_LRO, 4, 2},
24 {QLCNIC_CMD_MAC_ADDRESS, 4, 3},
25 {QLCNIC_CMD_GET_PCI_INFO, 4, 1},
26 {QLCNIC_CMD_GET_NIC_INFO, 4, 1},
27 {QLCNIC_CMD_SET_NIC_INFO, 4, 1},
28 {QLCNIC_CMD_GET_ESWITCH_CAPABILITY, 4, 3},
29 {QLCNIC_CMD_TOGGLE_ESWITCH, 4, 1},
30 {QLCNIC_CMD_GET_ESWITCH_STATUS, 4, 3},
31 {QLCNIC_CMD_SET_PORTMIRRORING, 4, 1},
32 {QLCNIC_CMD_CONFIGURE_ESWITCH, 4, 1},
33 {QLCNIC_CMD_GET_MAC_STATS, 4, 1},
34 {QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG, 4, 3},
35 {QLCNIC_CMD_GET_ESWITCH_STATS, 4, 1},
36 {QLCNIC_CMD_CONFIG_PORT, 4, 1},
37 {QLCNIC_CMD_TEMP_SIZE, 4, 4},
38 {QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
39 {QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
40 {QLCNIC_CMD_GET_LED_STATUS, 4, 2},
41 {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
42 {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
43 {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
44 };
45
46 static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
47 {
48 return (ahw->pci_func & 0xff) | ((ahw->fw_hal_version & 0xff) << 8) |
49 (0xcafe << 16);
50 }
51
52
53 int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
54 struct qlcnic_adapter *adapter, u32 type)
55 {
56 int i, size;
57 const struct qlcnic_mailbox_metadata *mbx_tbl;
58
59 mbx_tbl = qlcnic_mbx_tbl;
60 size = ARRAY_SIZE(qlcnic_mbx_tbl);
61 for (i = 0; i < size; i++) {
62 if (type == mbx_tbl[i].cmd) {
63 mbx->req.num = mbx_tbl[i].in_args;
64 mbx->rsp.num = mbx_tbl[i].out_args;
65 mbx->req.arg = kcalloc(mbx->req.num,
66 sizeof(u32), GFP_ATOMIC);
67 if (!mbx->req.arg)
68 return -ENOMEM;
69 mbx->rsp.arg = kcalloc(mbx->rsp.num,
70 sizeof(u32), GFP_ATOMIC);
71 if (!mbx->rsp.arg) {
72 kfree(mbx->req.arg);
73 mbx->req.arg = NULL;
74 return -ENOMEM;
75 }
76 mbx->req.arg[0] = type;
77 break;
78 }
79 }
80 return 0;
81 }
82
83
84 void qlcnic_free_mbx_args(struct qlcnic_cmd_args *cmd)
85 {
86 kfree(cmd->req.arg);
87 cmd->req.arg = NULL;
88 kfree(cmd->rsp.arg);
89 cmd->rsp.arg = NULL;
90 }
91
92 static u32
93 qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
94 {
95 u32 rsp;
96 int timeout = 0, err = 0;
97
98 do {
99
100 mdelay(1);
101
102 if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
103 return QLCNIC_CDRP_RSP_TIMEOUT;
104
105 rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
106 } while (!QLCNIC_CDRP_IS_RSP(rsp));
107
108 return rsp;
109 }
110
111 int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
112 struct qlcnic_cmd_args *cmd)
113 {
114 int i, err = 0;
115 u32 rsp;
116 u32 signature;
117 struct pci_dev *pdev = adapter->pdev;
118 struct qlcnic_hardware_context *ahw = adapter->ahw;
119 const char *fmt;
120
121 signature = qlcnic_get_cmd_signature(ahw);
122
123
124 if (qlcnic_api_lock(adapter)) {
125 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
126 return cmd->rsp.arg[0];
127 }
128
129 QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
130 for (i = 1; i < cmd->req.num; i++)
131 QLCWR32(adapter, QLCNIC_CDRP_ARG(i), cmd->req.arg[i]);
132 QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET,
133 QLCNIC_CDRP_FORM_CMD(cmd->req.arg[0]));
134 rsp = qlcnic_poll_rsp(adapter);
135
136 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
137 dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
138 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
139 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
140 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
141 switch (cmd->rsp.arg[0]) {
142 case QLCNIC_RCODE_INVALID_ARGS:
143 fmt = "CDRP invalid args: [%d]\n";
144 break;
145 case QLCNIC_RCODE_NOT_SUPPORTED:
146 case QLCNIC_RCODE_NOT_IMPL:
147 fmt = "CDRP command not supported: [%d]\n";
148 break;
149 case QLCNIC_RCODE_NOT_PERMITTED:
150 fmt = "CDRP requested action not permitted: [%d]\n";
151 break;
152 case QLCNIC_RCODE_INVALID:
153 fmt = "CDRP invalid or unknown cmd received: [%d]\n";
154 break;
155 case QLCNIC_RCODE_TIMEOUT:
156 fmt = "CDRP command timeout: [%d]\n";
157 break;
158 default:
159 fmt = "CDRP command failed: [%d]\n";
160 break;
161 }
162 dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
163 qlcnic_dump_mbx(adapter, cmd);
164 } else if (rsp == QLCNIC_CDRP_RSP_OK)
165 cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
166
167 for (i = 1; i < cmd->rsp.num; i++)
168 cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
169
170
171 qlcnic_api_unlock(adapter);
172 return cmd->rsp.arg[0];
173 }
174
175 int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
176 {
177 struct qlcnic_cmd_args cmd;
178 u32 arg1, arg2, arg3;
179 char drv_string[12];
180 int err = 0;
181
182 memset(drv_string, 0, sizeof(drv_string));
183 snprintf(drv_string, sizeof(drv_string), "%d"".""%d"".""%d",
184 _QLCNIC_LINUX_MAJOR, _QLCNIC_LINUX_MINOR,
185 _QLCNIC_LINUX_SUBVERSION);
186
187 err = qlcnic_alloc_mbx_args(&cmd, adapter, fw_cmd);
188 if (err)
189 return err;
190
191 memcpy(&arg1, drv_string, sizeof(u32));
192 memcpy(&arg2, drv_string + 4, sizeof(u32));
193 memcpy(&arg3, drv_string + 8, sizeof(u32));
194
195 cmd.req.arg[1] = arg1;
196 cmd.req.arg[2] = arg2;
197 cmd.req.arg[3] = arg3;
198
199 err = qlcnic_issue_cmd(adapter, &cmd);
200 if (err) {
201 dev_info(&adapter->pdev->dev,
202 "Failed to set driver version in firmware\n");
203 err = -EIO;
204 }
205 qlcnic_free_mbx_args(&cmd);
206 return err;
207 }
208
209 int
210 qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
211 {
212 int err = 0;
213 struct qlcnic_cmd_args cmd;
214 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
215
216 if (recv_ctx->state != QLCNIC_HOST_CTX_STATE_ACTIVE)
217 return err;
218 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_MTU);
219 if (err)
220 return err;
221
222 cmd.req.arg[1] = recv_ctx->context_id;
223 cmd.req.arg[2] = mtu;
224
225 err = qlcnic_issue_cmd(adapter, &cmd);
226 if (err) {
227 dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
228 err = -EIO;
229 }
230 qlcnic_free_mbx_args(&cmd);
231 return err;
232 }
233
234 int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
235 {
236 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
237 struct qlcnic_hardware_context *ahw = adapter->ahw;
238 dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
239 struct net_device *netdev = adapter->netdev;
240 u32 temp_intr_crb_mode, temp_rds_crb_mode;
241 struct qlcnic_cardrsp_rds_ring *prsp_rds;
242 struct qlcnic_cardrsp_sds_ring *prsp_sds;
243 struct qlcnic_hostrq_rds_ring *prq_rds;
244 struct qlcnic_hostrq_sds_ring *prq_sds;
245 struct qlcnic_host_rds_ring *rds_ring;
246 struct qlcnic_host_sds_ring *sds_ring;
247 struct qlcnic_cardrsp_rx_ctx *prsp;
248 struct qlcnic_hostrq_rx_ctx *prq;
249 u8 i, nrds_rings, nsds_rings;
250 struct qlcnic_cmd_args cmd;
251 size_t rq_size, rsp_size;
252 u32 cap, reg, val, reg2;
253 u64 phys_addr;
254 u16 temp_u16;
255 void *addr;
256 int err;
257
258 nrds_rings = adapter->max_rds_rings;
259 nsds_rings = adapter->drv_sds_rings;
260
261 rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
262 nsds_rings);
263 rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
264 nsds_rings);
265
266 addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
267 &hostrq_phys_addr, GFP_KERNEL);
268 if (addr == NULL)
269 return -ENOMEM;
270 prq = addr;
271
272 addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
273 &cardrsp_phys_addr, GFP_KERNEL);
274 if (addr == NULL) {
275 err = -ENOMEM;
276 goto out_free_rq;
277 }
278 prsp = addr;
279
280 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
281
282 cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN
283 | QLCNIC_CAP0_VALIDOFF);
284 cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
285
286 if (qlcnic_check_multi_tx(adapter) &&
287 !adapter->ahw->diag_test) {
288 cap |= QLCNIC_CAP0_TX_MULTI;
289 } else {
290 temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
291 prq->valid_field_offset = cpu_to_le16(temp_u16);
292 prq->txrx_sds_binding = nsds_rings - 1;
293 temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
294 prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
295 temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
296 prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
297 }
298
299 prq->capabilities[0] = cpu_to_le32(cap);
300
301 prq->num_rds_rings = cpu_to_le16(nrds_rings);
302 prq->num_sds_rings = cpu_to_le16(nsds_rings);
303 prq->rds_ring_offset = 0;
304
305 val = le32_to_cpu(prq->rds_ring_offset) +
306 (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
307 prq->sds_ring_offset = cpu_to_le32(val);
308
309 prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
310 le32_to_cpu(prq->rds_ring_offset));
311
312 for (i = 0; i < nrds_rings; i++) {
313 rds_ring = &recv_ctx->rds_rings[i];
314 rds_ring->producer = 0;
315 prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
316 prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
317 prq_rds[i].ring_kind = cpu_to_le32(i);
318 prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
319 }
320
321 prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
322 le32_to_cpu(prq->sds_ring_offset));
323
324 for (i = 0; i < nsds_rings; i++) {
325 sds_ring = &recv_ctx->sds_rings[i];
326 sds_ring->consumer = 0;
327 memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
328 prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
329 prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
330 if (qlcnic_check_multi_tx(adapter) &&
331 !adapter->ahw->diag_test)
332 prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
333 else
334 prq_sds[i].msi_index = cpu_to_le16(i);
335 }
336
337 phys_addr = hostrq_phys_addr;
338 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_RX_CTX);
339 if (err)
340 goto out_free_rsp;
341
342 cmd.req.arg[1] = MSD(phys_addr);
343 cmd.req.arg[2] = LSD(phys_addr);
344 cmd.req.arg[3] = rq_size;
345 err = qlcnic_issue_cmd(adapter, &cmd);
346 if (err) {
347 dev_err(&adapter->pdev->dev,
348 "Failed to create rx ctx in firmware%d\n", err);
349 goto out_free_rsp;
350 }
351
352 prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
353 &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
354
355 for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
356 rds_ring = &recv_ctx->rds_rings[i];
357 reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
358 rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
359 }
360
361 prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
362 &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
363
364 for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
365 sds_ring = &recv_ctx->sds_rings[i];
366 reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
367 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
368 reg2 = ahw->intr_tbl[i].src;
369 else
370 reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
371
372 sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
373 sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
374 }
375
376 recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
377 recv_ctx->context_id = le16_to_cpu(prsp->context_id);
378 recv_ctx->virt_port = prsp->virt_port;
379
380 netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
381 recv_ctx->context_id, recv_ctx->state);
382 qlcnic_free_mbx_args(&cmd);
383
384 out_free_rsp:
385 dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
386 cardrsp_phys_addr);
387 out_free_rq:
388 dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
389
390 return err;
391 }
392
393 void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
394 {
395 int err;
396 struct qlcnic_cmd_args cmd;
397 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
398
399 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX);
400 if (err)
401 return;
402
403 cmd.req.arg[1] = recv_ctx->context_id;
404 err = qlcnic_issue_cmd(adapter, &cmd);
405 if (err)
406 dev_err(&adapter->pdev->dev,
407 "Failed to destroy rx ctx in firmware\n");
408
409 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
410 qlcnic_free_mbx_args(&cmd);
411 }
412
413 int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
414 struct qlcnic_host_tx_ring *tx_ring,
415 int ring)
416 {
417 struct qlcnic_hardware_context *ahw = adapter->ahw;
418 struct net_device *netdev = adapter->netdev;
419 struct qlcnic_hostrq_tx_ctx *prq;
420 struct qlcnic_hostrq_cds_ring *prq_cds;
421 struct qlcnic_cardrsp_tx_ctx *prsp;
422 struct qlcnic_cmd_args cmd;
423 u32 temp, intr_mask, temp_int_crb_mode;
424 dma_addr_t rq_phys_addr, rsp_phys_addr;
425 int temp_nsds_rings, index, err;
426 void *rq_addr, *rsp_addr;
427 size_t rq_size, rsp_size;
428 u64 phys_addr;
429 u16 msix_id;
430
431
432 tx_ring->producer = 0;
433 tx_ring->sw_consumer = 0;
434 *(tx_ring->hw_consumer) = 0;
435
436 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
437 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
438 &rq_phys_addr, GFP_KERNEL);
439 if (!rq_addr)
440 return -ENOMEM;
441
442 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
443 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
444 &rsp_phys_addr, GFP_KERNEL);
445 if (!rsp_addr) {
446 err = -ENOMEM;
447 goto out_free_rq;
448 }
449
450 prq = rq_addr;
451 prsp = rsp_addr;
452
453 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
454
455 temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
456 QLCNIC_CAP0_LSO);
457 if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
458 temp |= QLCNIC_CAP0_TX_MULTI;
459
460 prq->capabilities[0] = cpu_to_le32(temp);
461
462 if (qlcnic_check_multi_tx(adapter) &&
463 !adapter->ahw->diag_test) {
464 temp_nsds_rings = adapter->drv_sds_rings;
465 index = temp_nsds_rings + ring;
466 msix_id = ahw->intr_tbl[index].id;
467 prq->msi_index = cpu_to_le16(msix_id);
468 } else {
469 temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
470 prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
471 prq->msi_index = 0;
472 }
473
474 prq->interrupt_ctl = 0;
475 prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
476
477 prq_cds = &prq->cds_ring;
478
479 prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
480 prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
481
482 phys_addr = rq_phys_addr;
483
484 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
485 if (err)
486 goto out_free_rsp;
487
488 cmd.req.arg[1] = MSD(phys_addr);
489 cmd.req.arg[2] = LSD(phys_addr);
490 cmd.req.arg[3] = rq_size;
491 err = qlcnic_issue_cmd(adapter, &cmd);
492
493 if (err == QLCNIC_RCODE_SUCCESS) {
494 tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
495 temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
496 tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
497 tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
498 if (qlcnic_check_multi_tx(adapter) &&
499 !adapter->ahw->diag_test &&
500 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
501 index = adapter->drv_sds_rings + ring;
502 intr_mask = ahw->intr_tbl[index].src;
503 tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
504 }
505
506 netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
507 tx_ring->ctx_id, tx_ring->state);
508 } else {
509 netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
510 err);
511 err = -EIO;
512 }
513 qlcnic_free_mbx_args(&cmd);
514
515 out_free_rsp:
516 dma_free_coherent(&adapter->pdev->dev, rsp_size, rsp_addr,
517 rsp_phys_addr);
518 out_free_rq:
519 dma_free_coherent(&adapter->pdev->dev, rq_size, rq_addr, rq_phys_addr);
520
521 return err;
522 }
523
524 void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
525 struct qlcnic_host_tx_ring *tx_ring)
526 {
527 struct qlcnic_cmd_args cmd;
528 int ret;
529
530 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
531 if (ret)
532 return;
533
534 cmd.req.arg[1] = tx_ring->ctx_id;
535 if (qlcnic_issue_cmd(adapter, &cmd))
536 dev_err(&adapter->pdev->dev,
537 "Failed to destroy tx ctx in firmware\n");
538 qlcnic_free_mbx_args(&cmd);
539 }
540
541 int
542 qlcnic_fw_cmd_set_port(struct qlcnic_adapter *adapter, u32 config)
543 {
544 int err;
545 struct qlcnic_cmd_args cmd;
546
547 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_PORT);
548 if (err)
549 return err;
550
551 cmd.req.arg[1] = config;
552 err = qlcnic_issue_cmd(adapter, &cmd);
553 qlcnic_free_mbx_args(&cmd);
554 return err;
555 }
556
557 int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
558 {
559 void *addr;
560 int err, ring;
561 struct qlcnic_recv_context *recv_ctx;
562 struct qlcnic_host_rds_ring *rds_ring;
563 struct qlcnic_host_sds_ring *sds_ring;
564 struct qlcnic_host_tx_ring *tx_ring;
565 __le32 *ptr;
566
567 struct pci_dev *pdev = adapter->pdev;
568
569 recv_ctx = adapter->recv_ctx;
570
571 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
572 tx_ring = &adapter->tx_ring[ring];
573 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
574 &tx_ring->hw_cons_phys_addr,
575 GFP_KERNEL);
576 if (ptr == NULL) {
577 err = -ENOMEM;
578 goto err_out_free;
579 }
580
581 tx_ring->hw_consumer = ptr;
582
583 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
584 &tx_ring->phys_addr,
585 GFP_KERNEL);
586 if (addr == NULL) {
587 err = -ENOMEM;
588 goto err_out_free;
589 }
590
591 tx_ring->desc_head = addr;
592 }
593
594 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
595 rds_ring = &recv_ctx->rds_rings[ring];
596 addr = dma_alloc_coherent(&adapter->pdev->dev,
597 RCV_DESC_RINGSIZE(rds_ring),
598 &rds_ring->phys_addr, GFP_KERNEL);
599 if (addr == NULL) {
600 err = -ENOMEM;
601 goto err_out_free;
602 }
603 rds_ring->desc_head = addr;
604
605 }
606
607 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
608 sds_ring = &recv_ctx->sds_rings[ring];
609
610 addr = dma_alloc_coherent(&adapter->pdev->dev,
611 STATUS_DESC_RINGSIZE(sds_ring),
612 &sds_ring->phys_addr, GFP_KERNEL);
613 if (addr == NULL) {
614 err = -ENOMEM;
615 goto err_out_free;
616 }
617 sds_ring->desc_head = addr;
618 }
619
620 return 0;
621
622 err_out_free:
623 qlcnic_free_hw_resources(adapter);
624 return err;
625 }
626
627 int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
628 {
629 int i, err, ring;
630
631 if (dev->flags & QLCNIC_NEED_FLR) {
632 pci_reset_function(dev->pdev);
633 dev->flags &= ~QLCNIC_NEED_FLR;
634 }
635
636 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
637 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST) {
638 err = qlcnic_83xx_config_intrpt(dev, 1);
639 if (err)
640 return err;
641 }
642 }
643
644 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
645 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
646 err = qlcnic_82xx_mq_intrpt(dev, 1);
647 if (err)
648 return err;
649 }
650
651 err = qlcnic_fw_cmd_create_rx_ctx(dev);
652 if (err)
653 goto err_out;
654
655 for (ring = 0; ring < dev->drv_tx_rings; ring++) {
656 err = qlcnic_fw_cmd_create_tx_ctx(dev,
657 &dev->tx_ring[ring],
658 ring);
659 if (err) {
660 qlcnic_fw_cmd_del_rx_ctx(dev);
661 if (ring == 0)
662 goto err_out;
663
664 for (i = 0; i < ring; i++)
665 qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
666
667 goto err_out;
668 }
669 }
670
671 set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
672
673 return 0;
674
675 err_out:
676 if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
677 qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
678 qlcnic_82xx_config_intrpt(dev, 0);
679
680 if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
681 if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
682 qlcnic_83xx_config_intrpt(dev, 0);
683 }
684
685 return err;
686 }
687
688 void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
689 {
690 int ring;
691
692 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
693 qlcnic_fw_cmd_del_rx_ctx(adapter);
694 for (ring = 0; ring < adapter->drv_tx_rings; ring++)
695 qlcnic_fw_cmd_del_tx_ctx(adapter,
696 &adapter->tx_ring[ring]);
697
698 if (qlcnic_82xx_check(adapter) &&
699 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
700 qlcnic_check_multi_tx(adapter) &&
701 !adapter->ahw->diag_test)
702 qlcnic_82xx_config_intrpt(adapter, 0);
703
704 if (qlcnic_83xx_check(adapter) &&
705 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
706 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
707 qlcnic_83xx_config_intrpt(adapter, 0);
708 }
709
710 mdelay(20);
711 }
712 }
713
714 void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
715 {
716 struct qlcnic_recv_context *recv_ctx;
717 struct qlcnic_host_rds_ring *rds_ring;
718 struct qlcnic_host_sds_ring *sds_ring;
719 struct qlcnic_host_tx_ring *tx_ring;
720 int ring;
721
722 recv_ctx = adapter->recv_ctx;
723
724 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
725 tx_ring = &adapter->tx_ring[ring];
726 if (tx_ring->hw_consumer != NULL) {
727 dma_free_coherent(&adapter->pdev->dev, sizeof(u32),
728 tx_ring->hw_consumer,
729 tx_ring->hw_cons_phys_addr);
730
731 tx_ring->hw_consumer = NULL;
732 }
733
734 if (tx_ring->desc_head != NULL) {
735 dma_free_coherent(&adapter->pdev->dev,
736 TX_DESC_RINGSIZE(tx_ring),
737 tx_ring->desc_head,
738 tx_ring->phys_addr);
739 tx_ring->desc_head = NULL;
740 }
741 }
742
743 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
744 rds_ring = &recv_ctx->rds_rings[ring];
745
746 if (rds_ring->desc_head != NULL) {
747 dma_free_coherent(&adapter->pdev->dev,
748 RCV_DESC_RINGSIZE(rds_ring),
749 rds_ring->desc_head,
750 rds_ring->phys_addr);
751 rds_ring->desc_head = NULL;
752 }
753 }
754
755 for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
756 sds_ring = &recv_ctx->sds_rings[ring];
757
758 if (sds_ring->desc_head != NULL) {
759 dma_free_coherent(&adapter->pdev->dev,
760 STATUS_DESC_RINGSIZE(sds_ring),
761 sds_ring->desc_head,
762 sds_ring->phys_addr);
763 sds_ring->desc_head = NULL;
764 }
765 }
766 }
767
768 int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
769 {
770 struct qlcnic_hardware_context *ahw = adapter->ahw;
771 struct net_device *netdev = adapter->netdev;
772 struct qlcnic_cmd_args cmd;
773 u32 type, val;
774 int i, err = 0;
775
776 for (i = 0; i < ahw->num_msix; i++) {
777 err = qlcnic_alloc_mbx_args(&cmd, adapter,
778 QLCNIC_CMD_MQ_TX_CONFIG_INTR);
779 if (err)
780 return err;
781 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
782 val = type | (ahw->intr_tbl[i].type << 4);
783 if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
784 val |= (ahw->intr_tbl[i].id << 16);
785 cmd.req.arg[1] = val;
786 err = qlcnic_issue_cmd(adapter, &cmd);
787 if (err) {
788 netdev_err(netdev, "Failed to %s interrupts %d\n",
789 op_type == QLCNIC_INTRPT_ADD ? "Add" :
790 "Delete", err);
791 qlcnic_free_mbx_args(&cmd);
792 return err;
793 }
794 val = cmd.rsp.arg[1];
795 if (LSB(val)) {
796 netdev_info(netdev,
797 "failed to configure interrupt for %d\n",
798 ahw->intr_tbl[i].id);
799 continue;
800 }
801 if (op_type) {
802 ahw->intr_tbl[i].id = MSW(val);
803 ahw->intr_tbl[i].enabled = 1;
804 ahw->intr_tbl[i].src = cmd.rsp.arg[2];
805 } else {
806 ahw->intr_tbl[i].id = i;
807 ahw->intr_tbl[i].enabled = 0;
808 ahw->intr_tbl[i].src = 0;
809 }
810 qlcnic_free_mbx_args(&cmd);
811 }
812
813 return err;
814 }
815
816 int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
817 u8 function)
818 {
819 int err, i;
820 struct qlcnic_cmd_args cmd;
821 u32 mac_low, mac_high;
822
823 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
824 if (err)
825 return err;
826
827 cmd.req.arg[1] = function | BIT_8;
828 err = qlcnic_issue_cmd(adapter, &cmd);
829
830 if (err == QLCNIC_RCODE_SUCCESS) {
831 mac_low = cmd.rsp.arg[1];
832 mac_high = cmd.rsp.arg[2];
833
834 for (i = 0; i < 2; i++)
835 mac[i] = (u8) (mac_high >> ((1 - i) * 8));
836 for (i = 2; i < 6; i++)
837 mac[i] = (u8) (mac_low >> ((5 - i) * 8));
838 } else {
839 dev_err(&adapter->pdev->dev,
840 "Failed to get mac address%d\n", err);
841 err = -EIO;
842 }
843 qlcnic_free_mbx_args(&cmd);
844 return err;
845 }
846
847
848 int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
849 struct qlcnic_info *npar_info, u8 func_id)
850 {
851 int err;
852 dma_addr_t nic_dma_t;
853 const struct qlcnic_info_le *nic_info;
854 void *nic_info_addr;
855 struct qlcnic_cmd_args cmd;
856 size_t nic_size = sizeof(struct qlcnic_info_le);
857
858 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
859 &nic_dma_t, GFP_KERNEL);
860 if (!nic_info_addr)
861 return -ENOMEM;
862
863 nic_info = nic_info_addr;
864
865 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
866 if (err)
867 goto out_free_dma;
868
869 cmd.req.arg[1] = MSD(nic_dma_t);
870 cmd.req.arg[2] = LSD(nic_dma_t);
871 cmd.req.arg[3] = (func_id << 16 | nic_size);
872 err = qlcnic_issue_cmd(adapter, &cmd);
873 if (err != QLCNIC_RCODE_SUCCESS) {
874 dev_err(&adapter->pdev->dev,
875 "Failed to get nic info%d\n", err);
876 err = -EIO;
877 } else {
878 npar_info->pci_func = le16_to_cpu(nic_info->pci_func);
879 npar_info->op_mode = le16_to_cpu(nic_info->op_mode);
880 npar_info->min_tx_bw = le16_to_cpu(nic_info->min_tx_bw);
881 npar_info->max_tx_bw = le16_to_cpu(nic_info->max_tx_bw);
882 npar_info->phys_port = le16_to_cpu(nic_info->phys_port);
883 npar_info->switch_mode = le16_to_cpu(nic_info->switch_mode);
884 npar_info->max_tx_ques = le16_to_cpu(nic_info->max_tx_ques);
885 npar_info->max_rx_ques = le16_to_cpu(nic_info->max_rx_ques);
886 npar_info->capabilities = le32_to_cpu(nic_info->capabilities);
887 npar_info->max_mtu = le16_to_cpu(nic_info->max_mtu);
888 }
889
890 qlcnic_free_mbx_args(&cmd);
891 out_free_dma:
892 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
893 nic_dma_t);
894
895 return err;
896 }
897
898
899 int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
900 struct qlcnic_info *nic)
901 {
902 int err = -EIO;
903 dma_addr_t nic_dma_t;
904 void *nic_info_addr;
905 struct qlcnic_cmd_args cmd;
906 struct qlcnic_info_le *nic_info;
907 size_t nic_size = sizeof(struct qlcnic_info_le);
908
909 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
910 return err;
911
912 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
913 &nic_dma_t, GFP_KERNEL);
914 if (!nic_info_addr)
915 return -ENOMEM;
916
917 nic_info = nic_info_addr;
918
919 nic_info->pci_func = cpu_to_le16(nic->pci_func);
920 nic_info->op_mode = cpu_to_le16(nic->op_mode);
921 nic_info->phys_port = cpu_to_le16(nic->phys_port);
922 nic_info->switch_mode = cpu_to_le16(nic->switch_mode);
923 nic_info->capabilities = cpu_to_le32(nic->capabilities);
924 nic_info->max_mac_filters = nic->max_mac_filters;
925 nic_info->max_tx_ques = cpu_to_le16(nic->max_tx_ques);
926 nic_info->max_rx_ques = cpu_to_le16(nic->max_rx_ques);
927 nic_info->min_tx_bw = cpu_to_le16(nic->min_tx_bw);
928 nic_info->max_tx_bw = cpu_to_le16(nic->max_tx_bw);
929
930 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO);
931 if (err)
932 goto out_free_dma;
933
934 cmd.req.arg[1] = MSD(nic_dma_t);
935 cmd.req.arg[2] = LSD(nic_dma_t);
936 cmd.req.arg[3] = ((nic->pci_func << 16) | nic_size);
937 err = qlcnic_issue_cmd(adapter, &cmd);
938
939 if (err != QLCNIC_RCODE_SUCCESS) {
940 dev_err(&adapter->pdev->dev,
941 "Failed to set nic info%d\n", err);
942 err = -EIO;
943 }
944
945 qlcnic_free_mbx_args(&cmd);
946 out_free_dma:
947 dma_free_coherent(&adapter->pdev->dev, nic_size, nic_info_addr,
948 nic_dma_t);
949
950 return err;
951 }
952
953
954 int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
955 struct qlcnic_pci_info *pci_info)
956 {
957 struct qlcnic_hardware_context *ahw = adapter->ahw;
958 size_t npar_size = sizeof(struct qlcnic_pci_info_le);
959 size_t pci_size = npar_size * ahw->max_vnic_func;
960 u16 nic = 0, fcoe = 0, iscsi = 0;
961 struct qlcnic_pci_info_le *npar;
962 struct qlcnic_cmd_args cmd;
963 dma_addr_t pci_info_dma_t;
964 void *pci_info_addr;
965 int err = 0, i;
966
967 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
968 &pci_info_dma_t, GFP_KERNEL);
969 if (!pci_info_addr)
970 return -ENOMEM;
971
972 npar = pci_info_addr;
973 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
974 if (err)
975 goto out_free_dma;
976
977 cmd.req.arg[1] = MSD(pci_info_dma_t);
978 cmd.req.arg[2] = LSD(pci_info_dma_t);
979 cmd.req.arg[3] = pci_size;
980 err = qlcnic_issue_cmd(adapter, &cmd);
981
982 ahw->total_nic_func = 0;
983 if (err == QLCNIC_RCODE_SUCCESS) {
984 for (i = 0; i < ahw->max_vnic_func; i++, npar++, pci_info++) {
985 pci_info->id = le16_to_cpu(npar->id);
986 pci_info->active = le16_to_cpu(npar->active);
987 if (!pci_info->active)
988 continue;
989 pci_info->type = le16_to_cpu(npar->type);
990 err = qlcnic_get_pci_func_type(adapter, pci_info->type,
991 &nic, &fcoe, &iscsi);
992 pci_info->default_port =
993 le16_to_cpu(npar->default_port);
994 pci_info->tx_min_bw =
995 le16_to_cpu(npar->tx_min_bw);
996 pci_info->tx_max_bw =
997 le16_to_cpu(npar->tx_max_bw);
998 memcpy(pci_info->mac, npar->mac, ETH_ALEN);
999 }
1000 } else {
1001 dev_err(&adapter->pdev->dev,
1002 "Failed to get PCI Info%d\n", err);
1003 err = -EIO;
1004 }
1005
1006 ahw->total_nic_func = nic;
1007 ahw->total_pci_func = nic + fcoe + iscsi;
1008 if (ahw->total_nic_func == 0 || ahw->total_pci_func == 0) {
1009 dev_err(&adapter->pdev->dev,
1010 "%s: Invalid function count: total nic func[%x], total pci func[%x]\n",
1011 __func__, ahw->total_nic_func, ahw->total_pci_func);
1012 err = -EIO;
1013 }
1014 qlcnic_free_mbx_args(&cmd);
1015 out_free_dma:
1016 dma_free_coherent(&adapter->pdev->dev, pci_size, pci_info_addr,
1017 pci_info_dma_t);
1018
1019 return err;
1020 }
1021
1022
1023 int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
1024 u8 enable_mirroring, u8 pci_func)
1025 {
1026 struct device *dev = &adapter->pdev->dev;
1027 struct qlcnic_cmd_args cmd;
1028 int err = -EIO;
1029 u32 arg1;
1030
1031 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
1032 !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
1033 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1034 __func__);
1035 return err;
1036 }
1037
1038 arg1 = id | (enable_mirroring ? BIT_4 : 0);
1039 arg1 |= pci_func << 8;
1040
1041 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1042 QLCNIC_CMD_SET_PORTMIRRORING);
1043 if (err)
1044 return err;
1045
1046 cmd.req.arg[1] = arg1;
1047 err = qlcnic_issue_cmd(adapter, &cmd);
1048
1049 if (err != QLCNIC_RCODE_SUCCESS)
1050 dev_err(dev, "Failed to configure port mirroring for vNIC function %d on eSwitch %d\n",
1051 pci_func, id);
1052 else
1053 dev_info(dev, "Configured port mirroring for vNIC function %d on eSwitch %d\n",
1054 pci_func, id);
1055 qlcnic_free_mbx_args(&cmd);
1056
1057 return err;
1058 }
1059
1060 int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
1061 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1062
1063 size_t stats_size = sizeof(struct qlcnic_esw_stats_le);
1064 struct qlcnic_esw_stats_le *stats;
1065 dma_addr_t stats_dma_t;
1066 void *stats_addr;
1067 u32 arg1;
1068 struct qlcnic_cmd_args cmd;
1069 int err;
1070
1071 if (esw_stats == NULL)
1072 return -ENOMEM;
1073
1074 if ((adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) &&
1075 (func != adapter->ahw->pci_func)) {
1076 dev_err(&adapter->pdev->dev,
1077 "Not privilege to query stats for func=%d", func);
1078 return -EIO;
1079 }
1080
1081 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1082 &stats_dma_t, GFP_KERNEL);
1083 if (!stats_addr)
1084 return -ENOMEM;
1085
1086 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
1087 arg1 |= rx_tx << 15 | stats_size << 16;
1088
1089 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1090 QLCNIC_CMD_GET_ESWITCH_STATS);
1091 if (err)
1092 goto out_free_dma;
1093
1094 cmd.req.arg[1] = arg1;
1095 cmd.req.arg[2] = MSD(stats_dma_t);
1096 cmd.req.arg[3] = LSD(stats_dma_t);
1097 err = qlcnic_issue_cmd(adapter, &cmd);
1098
1099 if (!err) {
1100 stats = stats_addr;
1101 esw_stats->context_id = le16_to_cpu(stats->context_id);
1102 esw_stats->version = le16_to_cpu(stats->version);
1103 esw_stats->size = le16_to_cpu(stats->size);
1104 esw_stats->multicast_frames =
1105 le64_to_cpu(stats->multicast_frames);
1106 esw_stats->broadcast_frames =
1107 le64_to_cpu(stats->broadcast_frames);
1108 esw_stats->unicast_frames = le64_to_cpu(stats->unicast_frames);
1109 esw_stats->dropped_frames = le64_to_cpu(stats->dropped_frames);
1110 esw_stats->local_frames = le64_to_cpu(stats->local_frames);
1111 esw_stats->errors = le64_to_cpu(stats->errors);
1112 esw_stats->numbytes = le64_to_cpu(stats->numbytes);
1113 }
1114
1115 qlcnic_free_mbx_args(&cmd);
1116 out_free_dma:
1117 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1118 stats_dma_t);
1119
1120 return err;
1121 }
1122
1123
1124 int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1125 struct qlcnic_mac_statistics *mac_stats)
1126 {
1127 struct qlcnic_mac_statistics_le *stats;
1128 struct qlcnic_cmd_args cmd;
1129 size_t stats_size = sizeof(struct qlcnic_mac_statistics_le);
1130 dma_addr_t stats_dma_t;
1131 void *stats_addr;
1132 int err;
1133
1134 if (mac_stats == NULL)
1135 return -ENOMEM;
1136
1137 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1138 &stats_dma_t, GFP_KERNEL);
1139 if (!stats_addr)
1140 return -ENOMEM;
1141
1142 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
1143 if (err)
1144 goto out_free_dma;
1145
1146 cmd.req.arg[1] = stats_size << 16;
1147 cmd.req.arg[2] = MSD(stats_dma_t);
1148 cmd.req.arg[3] = LSD(stats_dma_t);
1149 err = qlcnic_issue_cmd(adapter, &cmd);
1150 if (!err) {
1151 stats = stats_addr;
1152 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
1153 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
1154 mac_stats->mac_tx_mcast_pkts =
1155 le64_to_cpu(stats->mac_tx_mcast_pkts);
1156 mac_stats->mac_tx_bcast_pkts =
1157 le64_to_cpu(stats->mac_tx_bcast_pkts);
1158 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
1159 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
1160 mac_stats->mac_rx_mcast_pkts =
1161 le64_to_cpu(stats->mac_rx_mcast_pkts);
1162 mac_stats->mac_rx_length_error =
1163 le64_to_cpu(stats->mac_rx_length_error);
1164 mac_stats->mac_rx_length_small =
1165 le64_to_cpu(stats->mac_rx_length_small);
1166 mac_stats->mac_rx_length_large =
1167 le64_to_cpu(stats->mac_rx_length_large);
1168 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
1169 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
1170 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
1171 } else {
1172 dev_err(&adapter->pdev->dev,
1173 "%s: Get mac stats failed, err=%d.\n", __func__, err);
1174 }
1175
1176 qlcnic_free_mbx_args(&cmd);
1177
1178 out_free_dma:
1179 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
1180 stats_dma_t);
1181
1182 return err;
1183 }
1184
1185 int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
1186 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
1187
1188 struct __qlcnic_esw_statistics port_stats;
1189 u8 i;
1190 int ret = -EIO;
1191
1192 if (esw_stats == NULL)
1193 return -ENOMEM;
1194 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
1195 return -EIO;
1196 if (adapter->npars == NULL)
1197 return -EIO;
1198
1199 memset(esw_stats, 0, sizeof(u64));
1200 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
1201 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
1202 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
1203 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
1204 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
1205 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
1206 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
1207 esw_stats->context_id = eswitch;
1208
1209 for (i = 0; i < adapter->ahw->total_nic_func; i++) {
1210 if (adapter->npars[i].phy_port != eswitch)
1211 continue;
1212
1213 memset(&port_stats, 0, sizeof(struct __qlcnic_esw_statistics));
1214 if (qlcnic_get_port_stats(adapter, adapter->npars[i].pci_func,
1215 rx_tx, &port_stats))
1216 continue;
1217
1218 esw_stats->size = port_stats.size;
1219 esw_stats->version = port_stats.version;
1220 QLCNIC_ADD_ESW_STATS(esw_stats->unicast_frames,
1221 port_stats.unicast_frames);
1222 QLCNIC_ADD_ESW_STATS(esw_stats->multicast_frames,
1223 port_stats.multicast_frames);
1224 QLCNIC_ADD_ESW_STATS(esw_stats->broadcast_frames,
1225 port_stats.broadcast_frames);
1226 QLCNIC_ADD_ESW_STATS(esw_stats->dropped_frames,
1227 port_stats.dropped_frames);
1228 QLCNIC_ADD_ESW_STATS(esw_stats->errors,
1229 port_stats.errors);
1230 QLCNIC_ADD_ESW_STATS(esw_stats->local_frames,
1231 port_stats.local_frames);
1232 QLCNIC_ADD_ESW_STATS(esw_stats->numbytes,
1233 port_stats.numbytes);
1234 ret = 0;
1235 }
1236 return ret;
1237 }
1238
1239 int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, const u8 func_esw,
1240 const u8 port, const u8 rx_tx)
1241 {
1242 struct qlcnic_hardware_context *ahw = adapter->ahw;
1243 struct qlcnic_cmd_args cmd;
1244 int err;
1245 u32 arg1;
1246
1247 if (ahw->op_mode != QLCNIC_MGMT_FUNC)
1248 return -EIO;
1249
1250 if (func_esw == QLCNIC_STATS_PORT) {
1251 if (port >= ahw->max_vnic_func)
1252 goto err_ret;
1253 } else if (func_esw == QLCNIC_STATS_ESWITCH) {
1254 if (port >= QLCNIC_NIU_MAX_XG_PORTS)
1255 goto err_ret;
1256 } else {
1257 goto err_ret;
1258 }
1259
1260 if (rx_tx > QLCNIC_QUERY_TX_COUNTER)
1261 goto err_ret;
1262
1263 arg1 = port | QLCNIC_STATS_VERSION << 8 | func_esw << 12;
1264 arg1 |= BIT_14 | rx_tx << 15;
1265
1266 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1267 QLCNIC_CMD_GET_ESWITCH_STATS);
1268 if (err)
1269 return err;
1270
1271 cmd.req.arg[1] = arg1;
1272 err = qlcnic_issue_cmd(adapter, &cmd);
1273 qlcnic_free_mbx_args(&cmd);
1274 return err;
1275
1276 err_ret:
1277 dev_err(&adapter->pdev->dev,
1278 "Invalid args func_esw %d port %d rx_ctx %d\n",
1279 func_esw, port, rx_tx);
1280 return -EIO;
1281 }
1282
1283 static int __qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1284 u32 *arg1, u32 *arg2)
1285 {
1286 struct device *dev = &adapter->pdev->dev;
1287 struct qlcnic_cmd_args cmd;
1288 u8 pci_func = *arg1 >> 8;
1289 int err;
1290
1291 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1292 QLCNIC_CMD_GET_ESWITCH_PORT_CONFIG);
1293 if (err)
1294 return err;
1295
1296 cmd.req.arg[1] = *arg1;
1297 err = qlcnic_issue_cmd(adapter, &cmd);
1298 *arg1 = cmd.rsp.arg[1];
1299 *arg2 = cmd.rsp.arg[2];
1300 qlcnic_free_mbx_args(&cmd);
1301
1302 if (err == QLCNIC_RCODE_SUCCESS)
1303 dev_info(dev, "Get eSwitch port config for vNIC function %d\n",
1304 pci_func);
1305 else
1306 dev_err(dev, "Failed to get eswitch port config for vNIC function %d\n",
1307 pci_func);
1308 return err;
1309 }
1310
1311
1312
1313
1314
1315
1316
1317 int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
1318 struct qlcnic_esw_func_cfg *esw_cfg)
1319 {
1320 struct device *dev = &adapter->pdev->dev;
1321 struct qlcnic_cmd_args cmd;
1322 int err = -EIO, index;
1323 u32 arg1, arg2 = 0;
1324 u8 pci_func;
1325
1326 if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
1327 dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
1328 __func__);
1329 return err;
1330 }
1331
1332 pci_func = esw_cfg->pci_func;
1333 index = qlcnic_is_valid_nic_func(adapter, pci_func);
1334 if (index < 0)
1335 return err;
1336 arg1 = (adapter->npars[index].phy_port & BIT_0);
1337 arg1 |= (pci_func << 8);
1338
1339 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1340 return err;
1341 arg1 &= ~(0x0ff << 8);
1342 arg1 |= (pci_func << 8);
1343 arg1 &= ~(BIT_2 | BIT_3);
1344 switch (esw_cfg->op_mode) {
1345 case QLCNIC_PORT_DEFAULTS:
1346 arg1 |= (BIT_4 | BIT_6 | BIT_7);
1347 arg2 |= (BIT_0 | BIT_1);
1348 if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
1349 arg2 |= (BIT_2 | BIT_3);
1350 if (!(esw_cfg->discard_tagged))
1351 arg1 &= ~BIT_4;
1352 if (!(esw_cfg->promisc_mode))
1353 arg1 &= ~BIT_6;
1354 if (!(esw_cfg->mac_override))
1355 arg1 &= ~BIT_7;
1356 if (!(esw_cfg->mac_anti_spoof))
1357 arg2 &= ~BIT_0;
1358 if (!(esw_cfg->offload_flags & BIT_0))
1359 arg2 &= ~(BIT_1 | BIT_2 | BIT_3);
1360 if (!(esw_cfg->offload_flags & BIT_1))
1361 arg2 &= ~BIT_2;
1362 if (!(esw_cfg->offload_flags & BIT_2))
1363 arg2 &= ~BIT_3;
1364 break;
1365 case QLCNIC_ADD_VLAN:
1366 arg1 &= ~(0x0ffff << 16);
1367 arg1 |= (BIT_2 | BIT_5);
1368 arg1 |= (esw_cfg->vlan_id << 16);
1369 break;
1370 case QLCNIC_DEL_VLAN:
1371 arg1 |= (BIT_3 | BIT_5);
1372 arg1 &= ~(0x0ffff << 16);
1373 break;
1374 default:
1375 dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
1376 __func__, esw_cfg->op_mode);
1377 return err;
1378 }
1379
1380 err = qlcnic_alloc_mbx_args(&cmd, adapter,
1381 QLCNIC_CMD_CONFIGURE_ESWITCH);
1382 if (err)
1383 return err;
1384
1385 cmd.req.arg[1] = arg1;
1386 cmd.req.arg[2] = arg2;
1387 err = qlcnic_issue_cmd(adapter, &cmd);
1388 qlcnic_free_mbx_args(&cmd);
1389
1390 if (err != QLCNIC_RCODE_SUCCESS)
1391 dev_err(dev, "Failed to configure eswitch for vNIC function %d\n",
1392 pci_func);
1393 else
1394 dev_info(dev, "Configured eSwitch for vNIC function %d\n",
1395 pci_func);
1396
1397 return err;
1398 }
1399
1400 int
1401 qlcnic_get_eswitch_port_config(struct qlcnic_adapter *adapter,
1402 struct qlcnic_esw_func_cfg *esw_cfg)
1403 {
1404 u32 arg1, arg2;
1405 int index;
1406 u8 phy_port;
1407
1408 if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC) {
1409 index = qlcnic_is_valid_nic_func(adapter, esw_cfg->pci_func);
1410 if (index < 0)
1411 return -EIO;
1412 phy_port = adapter->npars[index].phy_port;
1413 } else {
1414 phy_port = adapter->ahw->physical_port;
1415 }
1416 arg1 = phy_port;
1417 arg1 |= (esw_cfg->pci_func << 8);
1418 if (__qlcnic_get_eswitch_port_config(adapter, &arg1, &arg2))
1419 return -EIO;
1420
1421 esw_cfg->discard_tagged = !!(arg1 & BIT_4);
1422 esw_cfg->host_vlan_tag = !!(arg1 & BIT_5);
1423 esw_cfg->promisc_mode = !!(arg1 & BIT_6);
1424 esw_cfg->mac_override = !!(arg1 & BIT_7);
1425 esw_cfg->vlan_id = LSW(arg1 >> 16);
1426 esw_cfg->mac_anti_spoof = (arg2 & 0x1);
1427 esw_cfg->offload_flags = ((arg2 >> 1) & 0x7);
1428
1429 return 0;
1430 }