This source file includes following definitions.
- ql_read_other_func_reg
- ql_write_other_func_reg
- ql_wait_other_func_reg_rdy
- ql_read_other_func_serdes_reg
- ql_read_serdes_reg
- ql_get_both_serdes
- ql_get_serdes_regs
- ql_read_other_func_xgmac_reg
- ql_get_xgmac_regs
- ql_get_ets_regs
- ql_get_intr_states
- ql_get_cam_entries
- ql_get_routing_entries
- ql_get_mpi_shadow_regs
- ql_get_mpi_regs
- ql_get_probe
- ql_get_probe_dump
- ql_get_routing_index_registers
- ql_get_mac_protocol_registers
- ql_get_sem_registers
- ql_build_coredump_seg_header
- ql_core_dump
- ql_get_core_dump
- ql_gen_reg_dump
- ql_get_dump
- ql_mpi_core_to_log
- ql_dump_intr_states
- ql_dump_xgmac_control_regs
- ql_dump_ets_regs
- ql_dump_cam_entries
- ql_dump_routing_entries
- ql_dump_regs
- ql_dump_stat
- ql_dump_qdev
- ql_dump_wqicb
- ql_dump_tx_ring
- ql_dump_ricb
- ql_dump_cqicb
- ql_dump_rx_ring
- ql_dump_hw_cb
- ql_dump_tx_desc
- ql_dump_ob_mac_iocb
- ql_dump_ob_mac_rsp
- ql_dump_ib_mac_rsp
- ql_dump_all
1
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3
4 #include <linux/slab.h>
5
6 #include "qlge.h"
7
8
9 static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
10 u32 reg)
11 {
12 u32 register_to_read;
13 u32 reg_val;
14 unsigned int status = 0;
15
16 register_to_read = MPI_NIC_REG_BLOCK
17 | MPI_NIC_READ
18 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
19 | reg;
20 status = ql_read_mpi_reg(qdev, register_to_read, ®_val);
21 if (status != 0)
22 return 0xffffffff;
23
24 return reg_val;
25 }
26
27
28 static int ql_write_other_func_reg(struct ql_adapter *qdev,
29 u32 reg, u32 reg_val)
30 {
31 u32 register_to_read;
32 int status = 0;
33
34 register_to_read = MPI_NIC_REG_BLOCK
35 | MPI_NIC_READ
36 | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
37 | reg;
38 status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
39
40 return status;
41 }
42
43 static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
44 u32 bit, u32 err_bit)
45 {
46 u32 temp;
47 int count = 10;
48
49 while (count) {
50 temp = ql_read_other_func_reg(qdev, reg);
51
52
53 if (temp & err_bit)
54 return -1;
55 else if (temp & bit)
56 return 0;
57 mdelay(10);
58 count--;
59 }
60 return -1;
61 }
62
63 static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
64 u32 *data)
65 {
66 int status;
67
68
69 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
70 XG_SERDES_ADDR_RDY, 0);
71 if (status)
72 goto exit;
73
74
75 ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
76
77
78 status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
79 XG_SERDES_ADDR_RDY, 0);
80 if (status)
81 goto exit;
82
83
84 *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
85 exit:
86 return status;
87 }
88
89
90 static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
91 {
92 int status;
93
94
95 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
96 if (status)
97 goto exit;
98
99
100 ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
101
102
103 status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
104 if (status)
105 goto exit;
106
107
108 *data = ql_read32(qdev, XG_SERDES_DATA);
109 exit:
110 return status;
111 }
112
113 static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
114 u32 *direct_ptr, u32 *indirect_ptr,
115 unsigned int direct_valid, unsigned int indirect_valid)
116 {
117 unsigned int status;
118
119 status = 1;
120 if (direct_valid)
121 status = ql_read_serdes_reg(qdev, addr, direct_ptr);
122
123 if (status)
124 *direct_ptr = 0xDEADBEEF;
125
126 status = 1;
127 if (indirect_valid)
128 status = ql_read_other_func_serdes_reg(
129 qdev, addr, indirect_ptr);
130
131 if (status)
132 *indirect_ptr = 0xDEADBEEF;
133 }
134
135 static int ql_get_serdes_regs(struct ql_adapter *qdev,
136 struct ql_mpi_coredump *mpi_coredump)
137 {
138 int status;
139 unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
140 unsigned int xaui_indirect_valid, i;
141 u32 *direct_ptr, temp;
142 u32 *indirect_ptr;
143
144 xfi_direct_valid = xfi_indirect_valid = 0;
145 xaui_direct_valid = xaui_indirect_valid = 1;
146
147
148 status = ql_read_other_func_serdes_reg(qdev,
149 XG_SERDES_XAUI_HSS_PCS_START, &temp);
150 if (status)
151 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
152
153 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
154 XG_SERDES_ADDR_XAUI_PWR_DOWN)
155 xaui_indirect_valid = 0;
156
157 status = ql_read_serdes_reg(qdev, XG_SERDES_XAUI_HSS_PCS_START, &temp);
158
159 if (status)
160 temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
161
162 if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
163 XG_SERDES_ADDR_XAUI_PWR_DOWN)
164 xaui_direct_valid = 0;
165
166
167
168
169
170 status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
171 if (status)
172 temp = 0;
173
174 if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
175 XG_SERDES_ADDR_XFI1_PWR_UP) {
176
177 if (qdev->func & 1)
178
179 xfi_indirect_valid = 1;
180 else
181 xfi_direct_valid = 1;
182 }
183 if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
184 XG_SERDES_ADDR_XFI2_PWR_UP) {
185
186 if (qdev->func & 1)
187
188 xfi_direct_valid = 1;
189 else
190 xfi_indirect_valid = 1;
191 }
192
193
194 if (qdev->func & 1) {
195
196 direct_ptr = mpi_coredump->serdes2_xaui_an;
197 indirect_ptr = mpi_coredump->serdes_xaui_an;
198 } else {
199
200 direct_ptr = mpi_coredump->serdes_xaui_an;
201 indirect_ptr = mpi_coredump->serdes2_xaui_an;
202 }
203
204 for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
205 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
206 xaui_direct_valid, xaui_indirect_valid);
207
208
209 if (qdev->func & 1) {
210 direct_ptr =
211 mpi_coredump->serdes2_xaui_hss_pcs;
212 indirect_ptr =
213 mpi_coredump->serdes_xaui_hss_pcs;
214 } else {
215 direct_ptr =
216 mpi_coredump->serdes_xaui_hss_pcs;
217 indirect_ptr =
218 mpi_coredump->serdes2_xaui_hss_pcs;
219 }
220
221 for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
222 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
223 xaui_direct_valid, xaui_indirect_valid);
224
225
226 if (qdev->func & 1) {
227 direct_ptr = mpi_coredump->serdes2_xfi_an;
228 indirect_ptr = mpi_coredump->serdes_xfi_an;
229 } else {
230 direct_ptr = mpi_coredump->serdes_xfi_an;
231 indirect_ptr = mpi_coredump->serdes2_xfi_an;
232 }
233
234 for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
235 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
236 xfi_direct_valid, xfi_indirect_valid);
237
238
239 if (qdev->func & 1) {
240 direct_ptr = mpi_coredump->serdes2_xfi_train;
241 indirect_ptr =
242 mpi_coredump->serdes_xfi_train;
243 } else {
244 direct_ptr = mpi_coredump->serdes_xfi_train;
245 indirect_ptr =
246 mpi_coredump->serdes2_xfi_train;
247 }
248
249 for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
250 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
251 xfi_direct_valid, xfi_indirect_valid);
252
253
254 if (qdev->func & 1) {
255 direct_ptr =
256 mpi_coredump->serdes2_xfi_hss_pcs;
257 indirect_ptr =
258 mpi_coredump->serdes_xfi_hss_pcs;
259 } else {
260 direct_ptr =
261 mpi_coredump->serdes_xfi_hss_pcs;
262 indirect_ptr =
263 mpi_coredump->serdes2_xfi_hss_pcs;
264 }
265
266 for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
267 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
268 xfi_direct_valid, xfi_indirect_valid);
269
270
271 if (qdev->func & 1) {
272 direct_ptr =
273 mpi_coredump->serdes2_xfi_hss_tx;
274 indirect_ptr =
275 mpi_coredump->serdes_xfi_hss_tx;
276 } else {
277 direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
278 indirect_ptr =
279 mpi_coredump->serdes2_xfi_hss_tx;
280 }
281 for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
282 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
283 xfi_direct_valid, xfi_indirect_valid);
284
285
286 if (qdev->func & 1) {
287 direct_ptr =
288 mpi_coredump->serdes2_xfi_hss_rx;
289 indirect_ptr =
290 mpi_coredump->serdes_xfi_hss_rx;
291 } else {
292 direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
293 indirect_ptr =
294 mpi_coredump->serdes2_xfi_hss_rx;
295 }
296
297 for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
298 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
299 xfi_direct_valid, xfi_indirect_valid);
300
301
302
303 if (qdev->func & 1) {
304 direct_ptr =
305 mpi_coredump->serdes2_xfi_hss_pll;
306 indirect_ptr =
307 mpi_coredump->serdes_xfi_hss_pll;
308 } else {
309 direct_ptr =
310 mpi_coredump->serdes_xfi_hss_pll;
311 indirect_ptr =
312 mpi_coredump->serdes2_xfi_hss_pll;
313 }
314 for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
315 ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
316 xfi_direct_valid, xfi_indirect_valid);
317 return 0;
318 }
319
320 static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
321 u32 *data)
322 {
323 int status = 0;
324
325
326 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
327 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
328 if (status)
329 goto exit;
330
331
332 ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
333
334
335 status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
336 XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
337 if (status)
338 goto exit;
339
340
341 *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
342 exit:
343 return status;
344 }
345
346
347
348
349 static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 *buf,
350 unsigned int other_function)
351 {
352 int status = 0;
353 int i;
354
355 for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
356
357
358
359 if ((i == 0x00000114) ||
360 (i == 0x00000118) ||
361 (i == 0x0000013c) ||
362 (i == 0x00000140) ||
363 (i > 0x00000150 && i < 0x000001fc) ||
364 (i > 0x00000278 && i < 0x000002a0) ||
365 (i > 0x000002c0 && i < 0x000002cf) ||
366 (i > 0x000002dc && i < 0x000002f0) ||
367 (i > 0x000003c8 && i < 0x00000400) ||
368 (i > 0x00000400 && i < 0x00000410) ||
369 (i > 0x00000410 && i < 0x00000420) ||
370 (i > 0x00000420 && i < 0x00000430) ||
371 (i > 0x00000430 && i < 0x00000440) ||
372 (i > 0x00000440 && i < 0x00000450) ||
373 (i > 0x00000450 && i < 0x00000500) ||
374 (i > 0x0000054c && i < 0x00000568) ||
375 (i > 0x000005c8 && i < 0x00000600)) {
376 if (other_function)
377 status =
378 ql_read_other_func_xgmac_reg(qdev, i, buf);
379 else
380 status = ql_read_xgmac_reg(qdev, i, buf);
381
382 if (status)
383 *buf = 0xdeadbeef;
384 break;
385 }
386 }
387 return status;
388 }
389
390 static int ql_get_ets_regs(struct ql_adapter *qdev, u32 *buf)
391 {
392 int status = 0;
393 int i;
394
395 for (i = 0; i < 8; i++, buf++) {
396 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
397 *buf = ql_read32(qdev, NIC_ETS);
398 }
399
400 for (i = 0; i < 2; i++, buf++) {
401 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
402 *buf = ql_read32(qdev, CNA_ETS);
403 }
404
405 return status;
406 }
407
408 static void ql_get_intr_states(struct ql_adapter *qdev, u32 *buf)
409 {
410 int i;
411
412 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
413 ql_write32(qdev, INTR_EN,
414 qdev->intr_context[i].intr_read_mask);
415 *buf = ql_read32(qdev, INTR_EN);
416 }
417 }
418
419 static int ql_get_cam_entries(struct ql_adapter *qdev, u32 *buf)
420 {
421 int i, status;
422 u32 value[3];
423
424 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
425 if (status)
426 return status;
427
428 for (i = 0; i < 16; i++) {
429 status = ql_get_mac_addr_reg(qdev,
430 MAC_ADDR_TYPE_CAM_MAC, i, value);
431 if (status) {
432 netif_err(qdev, drv, qdev->ndev,
433 "Failed read of mac index register\n");
434 goto err;
435 }
436 *buf++ = value[0];
437 *buf++ = value[1];
438 *buf++ = value[2];
439 }
440 for (i = 0; i < 32; i++) {
441 status = ql_get_mac_addr_reg(qdev,
442 MAC_ADDR_TYPE_MULTI_MAC, i, value);
443 if (status) {
444 netif_err(qdev, drv, qdev->ndev,
445 "Failed read of mac index register\n");
446 goto err;
447 }
448 *buf++ = value[0];
449 *buf++ = value[1];
450 }
451 err:
452 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
453 return status;
454 }
455
456 static int ql_get_routing_entries(struct ql_adapter *qdev, u32 *buf)
457 {
458 int status;
459 u32 value, i;
460
461 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
462 if (status)
463 return status;
464
465 for (i = 0; i < 16; i++) {
466 status = ql_get_routing_reg(qdev, i, &value);
467 if (status) {
468 netif_err(qdev, drv, qdev->ndev,
469 "Failed read of routing index register\n");
470 goto err;
471 } else {
472 *buf++ = value;
473 }
474 }
475 err:
476 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
477 return status;
478 }
479
480
481 static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 *buf)
482 {
483 u32 i;
484 int status;
485
486 for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
487 status = ql_write_mpi_reg(qdev, RISC_124,
488 (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
489 if (status)
490 goto end;
491 status = ql_read_mpi_reg(qdev, RISC_127, buf);
492 if (status)
493 goto end;
494 }
495 end:
496 return status;
497 }
498
499
500 static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 *buf,
501 u32 offset, u32 count)
502 {
503 int i, status = 0;
504 for (i = 0; i < count; i++, buf++) {
505 status = ql_read_mpi_reg(qdev, offset + i, buf);
506 if (status)
507 return status;
508 }
509 return status;
510 }
511
512
513 static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
514 u32 valid, u32 *buf)
515 {
516 u32 module, mux_sel, probe, lo_val, hi_val;
517
518 for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
519 if (!((valid >> module) & 1))
520 continue;
521 for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
522 probe = clock
523 | PRB_MX_ADDR_ARE
524 | mux_sel
525 | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
526 ql_write32(qdev, PRB_MX_ADDR, probe);
527 lo_val = ql_read32(qdev, PRB_MX_DATA);
528 if (mux_sel == 0) {
529 *buf = probe;
530 buf++;
531 }
532 probe |= PRB_MX_ADDR_UP;
533 ql_write32(qdev, PRB_MX_ADDR, probe);
534 hi_val = ql_read32(qdev, PRB_MX_DATA);
535 *buf = lo_val;
536 buf++;
537 *buf = hi_val;
538 buf++;
539 }
540 }
541 return buf;
542 }
543
544 static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
545 {
546
547 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
548 buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
549 PRB_MX_ADDR_VALID_SYS_MOD, buf);
550 buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
551 PRB_MX_ADDR_VALID_PCI_MOD, buf);
552 buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
553 PRB_MX_ADDR_VALID_XGM_MOD, buf);
554 buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
555 PRB_MX_ADDR_VALID_FC_MOD, buf);
556 return 0;
557
558 }
559
560
561 static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
562 {
563 int status;
564 u32 type, index, index_max;
565 u32 result_index;
566 u32 result_data;
567 u32 val;
568
569 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
570 if (status)
571 return status;
572
573 for (type = 0; type < 4; type++) {
574 if (type < 2)
575 index_max = 8;
576 else
577 index_max = 16;
578 for (index = 0; index < index_max; index++) {
579 val = RT_IDX_RS
580 | (type << RT_IDX_TYPE_SHIFT)
581 | (index << RT_IDX_IDX_SHIFT);
582 ql_write32(qdev, RT_IDX, val);
583 result_index = 0;
584 while ((result_index & RT_IDX_MR) == 0)
585 result_index = ql_read32(qdev, RT_IDX);
586 result_data = ql_read32(qdev, RT_DATA);
587 *buf = type;
588 buf++;
589 *buf = index;
590 buf++;
591 *buf = result_index;
592 buf++;
593 *buf = result_data;
594 buf++;
595 }
596 }
597 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
598 return status;
599 }
600
601
602 static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
603 {
604 u32 result_index, result_data;
605 u32 type;
606 u32 index;
607 u32 offset;
608 u32 val;
609 u32 initial_val = MAC_ADDR_RS;
610 u32 max_index;
611 u32 max_offset;
612
613 for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
614 switch (type) {
615
616 case 0:
617 initial_val |= MAC_ADDR_ADR;
618 max_index = MAC_ADDR_MAX_CAM_ENTRIES;
619 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
620 break;
621 case 1:
622 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
623 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
624 break;
625 case 2:
626 case 3:
627 max_index = MAC_ADDR_MAX_CAM_WCOUNT;
628 max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
629 break;
630 case 4:
631 max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
632 max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
633 break;
634 case 5:
635 max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
636 max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
637 break;
638 case 6:
639 max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
640 max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
641 break;
642 case 7:
643 max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
644 max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
645 break;
646 case 8:
647 max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
648 max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
649 break;
650 case 9:
651 max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
652 max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
653 break;
654 default:
655 pr_err("Bad type!!! 0x%08x\n", type);
656 max_index = 0;
657 max_offset = 0;
658 break;
659 }
660 for (index = 0; index < max_index; index++) {
661 for (offset = 0; offset < max_offset; offset++) {
662 val = initial_val
663 | (type << MAC_ADDR_TYPE_SHIFT)
664 | (index << MAC_ADDR_IDX_SHIFT)
665 | (offset);
666 ql_write32(qdev, MAC_ADDR_IDX, val);
667 result_index = 0;
668 while ((result_index & MAC_ADDR_MR) == 0) {
669 result_index = ql_read32(qdev,
670 MAC_ADDR_IDX);
671 }
672 result_data = ql_read32(qdev, MAC_ADDR_DATA);
673 *buf = result_index;
674 buf++;
675 *buf = result_data;
676 buf++;
677 }
678 }
679 }
680 }
681
682 static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
683 {
684 u32 func_num, reg, reg_val;
685 int status;
686
687 for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
688 reg = MPI_NIC_REG_BLOCK
689 | (func_num << MPI_NIC_FUNCTION_SHIFT)
690 | (SEM / 4);
691 status = ql_read_mpi_reg(qdev, reg, ®_val);
692 *buf = reg_val;
693
694 if (!status)
695 *buf = 0xdeadbeef;
696 buf++;
697 }
698 }
699
700
701 static void ql_build_coredump_seg_header(
702 struct mpi_coredump_segment_header *seg_hdr,
703 u32 seg_number, u32 seg_size, u8 *desc)
704 {
705 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
706 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
707 seg_hdr->segNum = seg_number;
708 seg_hdr->segSize = seg_size;
709 strncpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
710 }
711
712
713
714
715
716
717
718
719 int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
720 {
721 int status;
722 int i;
723
724 if (!mpi_coredump) {
725 netif_err(qdev, drv, qdev->ndev, "No memory allocated\n");
726 return -EINVAL;
727 }
728
729
730
731
732
733 ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
734
735 status = ql_pause_mpi_risc(qdev);
736 if (status) {
737 netif_err(qdev, drv, qdev->ndev,
738 "Failed RISC pause. Status = 0x%.08x\n", status);
739 goto err;
740 }
741
742
743 memset(&(mpi_coredump->mpi_global_header), 0,
744 sizeof(struct mpi_coredump_global_header));
745 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
746 mpi_coredump->mpi_global_header.headerSize =
747 sizeof(struct mpi_coredump_global_header);
748 mpi_coredump->mpi_global_header.imageSize =
749 sizeof(struct ql_mpi_coredump);
750 strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
751 sizeof(mpi_coredump->mpi_global_header.idString));
752
753
754 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
755 NIC1_CONTROL_SEG_NUM,
756 sizeof(struct mpi_coredump_segment_header) +
757 sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
758
759 ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
760 NIC2_CONTROL_SEG_NUM,
761 sizeof(struct mpi_coredump_segment_header) +
762 sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
763
764
765 ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
766 NIC1_XGMAC_SEG_NUM,
767 sizeof(struct mpi_coredump_segment_header) +
768 sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
769
770 ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
771 NIC2_XGMAC_SEG_NUM,
772 sizeof(struct mpi_coredump_segment_header) +
773 sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
774
775 if (qdev->func & 1) {
776
777 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
778 mpi_coredump->nic2_regs[i] =
779 ql_read32(qdev, i * sizeof(u32));
780
781 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
782 mpi_coredump->nic_regs[i] =
783 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
784
785 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
786 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
787 } else {
788
789 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
790 mpi_coredump->nic_regs[i] =
791 ql_read32(qdev, i * sizeof(u32));
792 for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
793 mpi_coredump->nic2_regs[i] =
794 ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
795
796 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
797 ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
798 }
799
800
801 ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
802 XAUI_AN_SEG_NUM,
803 sizeof(struct mpi_coredump_segment_header) +
804 sizeof(mpi_coredump->serdes_xaui_an),
805 "XAUI AN Registers");
806
807
808 ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
809 XAUI_HSS_PCS_SEG_NUM,
810 sizeof(struct mpi_coredump_segment_header) +
811 sizeof(mpi_coredump->serdes_xaui_hss_pcs),
812 "XAUI HSS PCS Registers");
813
814 ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
815 sizeof(struct mpi_coredump_segment_header) +
816 sizeof(mpi_coredump->serdes_xfi_an),
817 "XFI AN Registers");
818
819 ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
820 XFI_TRAIN_SEG_NUM,
821 sizeof(struct mpi_coredump_segment_header) +
822 sizeof(mpi_coredump->serdes_xfi_train),
823 "XFI TRAIN Registers");
824
825 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
826 XFI_HSS_PCS_SEG_NUM,
827 sizeof(struct mpi_coredump_segment_header) +
828 sizeof(mpi_coredump->serdes_xfi_hss_pcs),
829 "XFI HSS PCS Registers");
830
831 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
832 XFI_HSS_TX_SEG_NUM,
833 sizeof(struct mpi_coredump_segment_header) +
834 sizeof(mpi_coredump->serdes_xfi_hss_tx),
835 "XFI HSS TX Registers");
836
837 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
838 XFI_HSS_RX_SEG_NUM,
839 sizeof(struct mpi_coredump_segment_header) +
840 sizeof(mpi_coredump->serdes_xfi_hss_rx),
841 "XFI HSS RX Registers");
842
843 ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
844 XFI_HSS_PLL_SEG_NUM,
845 sizeof(struct mpi_coredump_segment_header) +
846 sizeof(mpi_coredump->serdes_xfi_hss_pll),
847 "XFI HSS PLL Registers");
848
849 ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
850 XAUI2_AN_SEG_NUM,
851 sizeof(struct mpi_coredump_segment_header) +
852 sizeof(mpi_coredump->serdes2_xaui_an),
853 "XAUI2 AN Registers");
854
855 ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
856 XAUI2_HSS_PCS_SEG_NUM,
857 sizeof(struct mpi_coredump_segment_header) +
858 sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
859 "XAUI2 HSS PCS Registers");
860
861 ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
862 XFI2_AN_SEG_NUM,
863 sizeof(struct mpi_coredump_segment_header) +
864 sizeof(mpi_coredump->serdes2_xfi_an),
865 "XFI2 AN Registers");
866
867 ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
868 XFI2_TRAIN_SEG_NUM,
869 sizeof(struct mpi_coredump_segment_header) +
870 sizeof(mpi_coredump->serdes2_xfi_train),
871 "XFI2 TRAIN Registers");
872
873 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
874 XFI2_HSS_PCS_SEG_NUM,
875 sizeof(struct mpi_coredump_segment_header) +
876 sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
877 "XFI2 HSS PCS Registers");
878
879 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
880 XFI2_HSS_TX_SEG_NUM,
881 sizeof(struct mpi_coredump_segment_header) +
882 sizeof(mpi_coredump->serdes2_xfi_hss_tx),
883 "XFI2 HSS TX Registers");
884
885 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
886 XFI2_HSS_RX_SEG_NUM,
887 sizeof(struct mpi_coredump_segment_header) +
888 sizeof(mpi_coredump->serdes2_xfi_hss_rx),
889 "XFI2 HSS RX Registers");
890
891 ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
892 XFI2_HSS_PLL_SEG_NUM,
893 sizeof(struct mpi_coredump_segment_header) +
894 sizeof(mpi_coredump->serdes2_xfi_hss_pll),
895 "XFI2 HSS PLL Registers");
896
897 status = ql_get_serdes_regs(qdev, mpi_coredump);
898 if (status) {
899 netif_err(qdev, drv, qdev->ndev,
900 "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
901 status);
902 goto err;
903 }
904
905 ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
906 CORE_SEG_NUM,
907 sizeof(mpi_coredump->core_regs_seg_hdr) +
908 sizeof(mpi_coredump->mpi_core_regs) +
909 sizeof(mpi_coredump->mpi_core_sh_regs),
910 "Core Registers");
911
912
913 status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
914 MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
915 if (status)
916 goto err;
917
918 status = ql_get_mpi_shadow_regs(qdev,
919 &mpi_coredump->mpi_core_sh_regs[0]);
920 if (status)
921 goto err;
922
923
924 ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
925 TEST_LOGIC_SEG_NUM,
926 sizeof(struct mpi_coredump_segment_header)
927 + sizeof(mpi_coredump->test_logic_regs),
928 "Test Logic Regs");
929 status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
930 TEST_REGS_ADDR, TEST_REGS_CNT);
931 if (status)
932 goto err;
933
934
935 ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
936 RMII_SEG_NUM,
937 sizeof(struct mpi_coredump_segment_header)
938 + sizeof(mpi_coredump->rmii_regs),
939 "RMII Registers");
940 status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
941 RMII_REGS_ADDR, RMII_REGS_CNT);
942 if (status)
943 goto err;
944
945
946 ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
947 FCMAC1_SEG_NUM,
948 sizeof(struct mpi_coredump_segment_header)
949 + sizeof(mpi_coredump->fcmac1_regs),
950 "FCMAC1 Registers");
951 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
952 FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
953 if (status)
954 goto err;
955
956
957
958 ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
959 FCMAC2_SEG_NUM,
960 sizeof(struct mpi_coredump_segment_header)
961 + sizeof(mpi_coredump->fcmac2_regs),
962 "FCMAC2 Registers");
963
964 status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
965 FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
966 if (status)
967 goto err;
968
969
970 ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
971 FC1_MBOX_SEG_NUM,
972 sizeof(struct mpi_coredump_segment_header)
973 + sizeof(mpi_coredump->fc1_mbx_regs),
974 "FC1 MBox Regs");
975 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
976 FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
977 if (status)
978 goto err;
979
980
981 ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
982 IDE_SEG_NUM,
983 sizeof(struct mpi_coredump_segment_header)
984 + sizeof(mpi_coredump->ide_regs),
985 "IDE Registers");
986 status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
987 IDE_REGS_ADDR, IDE_REGS_CNT);
988 if (status)
989 goto err;
990
991
992 ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
993 NIC1_MBOX_SEG_NUM,
994 sizeof(struct mpi_coredump_segment_header)
995 + sizeof(mpi_coredump->nic1_mbx_regs),
996 "NIC1 MBox Regs");
997 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
998 NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
999 if (status)
1000 goto err;
1001
1002
1003 ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
1004 SMBUS_SEG_NUM,
1005 sizeof(struct mpi_coredump_segment_header)
1006 + sizeof(mpi_coredump->smbus_regs),
1007 "SMBus Registers");
1008 status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
1009 SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
1010 if (status)
1011 goto err;
1012
1013
1014 ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
1015 FC2_MBOX_SEG_NUM,
1016 sizeof(struct mpi_coredump_segment_header)
1017 + sizeof(mpi_coredump->fc2_mbx_regs),
1018 "FC2 MBox Regs");
1019 status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
1020 FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
1021 if (status)
1022 goto err;
1023
1024
1025 ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
1026 NIC2_MBOX_SEG_NUM,
1027 sizeof(struct mpi_coredump_segment_header)
1028 + sizeof(mpi_coredump->nic2_mbx_regs),
1029 "NIC2 MBox Regs");
1030 status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
1031 NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
1032 if (status)
1033 goto err;
1034
1035
1036 ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
1037 I2C_SEG_NUM,
1038 sizeof(struct mpi_coredump_segment_header)
1039 + sizeof(mpi_coredump->i2c_regs),
1040 "I2C Registers");
1041 status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
1042 I2C_REGS_ADDR, I2C_REGS_CNT);
1043 if (status)
1044 goto err;
1045
1046
1047 ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
1048 MEMC_SEG_NUM,
1049 sizeof(struct mpi_coredump_segment_header)
1050 + sizeof(mpi_coredump->memc_regs),
1051 "MEMC Registers");
1052 status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
1053 MEMC_REGS_ADDR, MEMC_REGS_CNT);
1054 if (status)
1055 goto err;
1056
1057
1058 ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
1059 PBUS_SEG_NUM,
1060 sizeof(struct mpi_coredump_segment_header)
1061 + sizeof(mpi_coredump->pbus_regs),
1062 "PBUS Registers");
1063 status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
1064 PBUS_REGS_ADDR, PBUS_REGS_CNT);
1065 if (status)
1066 goto err;
1067
1068
1069 ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
1070 MDE_SEG_NUM,
1071 sizeof(struct mpi_coredump_segment_header)
1072 + sizeof(mpi_coredump->mde_regs),
1073 "MDE Registers");
1074 status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
1075 MDE_REGS_ADDR, MDE_REGS_CNT);
1076 if (status)
1077 goto err;
1078
1079 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1080 MISC_NIC_INFO_SEG_NUM,
1081 sizeof(struct mpi_coredump_segment_header)
1082 + sizeof(mpi_coredump->misc_nic_info),
1083 "MISC NIC INFO");
1084 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1085 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1086 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1087 mpi_coredump->misc_nic_info.function = qdev->func;
1088
1089
1090
1091 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1092 INTR_STATES_SEG_NUM,
1093 sizeof(struct mpi_coredump_segment_header)
1094 + sizeof(mpi_coredump->intr_states),
1095 "INTR States");
1096 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1097
1098 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1099 CAM_ENTRIES_SEG_NUM,
1100 sizeof(struct mpi_coredump_segment_header)
1101 + sizeof(mpi_coredump->cam_entries),
1102 "CAM Entries");
1103 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1104 if (status)
1105 goto err;
1106
1107 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1108 ROUTING_WORDS_SEG_NUM,
1109 sizeof(struct mpi_coredump_segment_header)
1110 + sizeof(mpi_coredump->nic_routing_words),
1111 "Routing Words");
1112 status = ql_get_routing_entries(qdev,
1113 &mpi_coredump->nic_routing_words[0]);
1114 if (status)
1115 goto err;
1116
1117
1118 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1119 ETS_SEG_NUM,
1120 sizeof(struct mpi_coredump_segment_header)
1121 + sizeof(mpi_coredump->ets),
1122 "ETS Registers");
1123 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1124 if (status)
1125 goto err;
1126
1127 ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
1128 PROBE_DUMP_SEG_NUM,
1129 sizeof(struct mpi_coredump_segment_header)
1130 + sizeof(mpi_coredump->probe_dump),
1131 "Probe Dump");
1132 ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
1133
1134 ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
1135 ROUTING_INDEX_SEG_NUM,
1136 sizeof(struct mpi_coredump_segment_header)
1137 + sizeof(mpi_coredump->routing_regs),
1138 "Routing Regs");
1139 status = ql_get_routing_index_registers(qdev,
1140 &mpi_coredump->routing_regs[0]);
1141 if (status)
1142 goto err;
1143
1144 ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
1145 MAC_PROTOCOL_SEG_NUM,
1146 sizeof(struct mpi_coredump_segment_header)
1147 + sizeof(mpi_coredump->mac_prot_regs),
1148 "MAC Prot Regs");
1149 ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
1150
1151
1152 ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
1153 SEM_REGS_SEG_NUM,
1154 sizeof(struct mpi_coredump_segment_header) +
1155 sizeof(mpi_coredump->sem_regs), "Sem Registers");
1156
1157 ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
1158
1159
1160 ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
1161
1162
1163 status = ql_unpause_mpi_risc(qdev);
1164 if (status) {
1165 netif_err(qdev, drv, qdev->ndev,
1166 "Failed RISC unpause. Status = 0x%.08x\n", status);
1167 goto err;
1168 }
1169
1170
1171 status = ql_hard_reset_mpi_risc(qdev);
1172 if (status) {
1173 netif_err(qdev, drv, qdev->ndev,
1174 "Failed RISC reset. Status = 0x%.08x\n", status);
1175 goto err;
1176 }
1177
1178 ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
1179 WCS_RAM_SEG_NUM,
1180 sizeof(struct mpi_coredump_segment_header)
1181 + sizeof(mpi_coredump->code_ram),
1182 "WCS RAM");
1183 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
1184 CODE_RAM_ADDR, CODE_RAM_CNT);
1185 if (status) {
1186 netif_err(qdev, drv, qdev->ndev,
1187 "Failed Dump of CODE RAM. Status = 0x%.08x\n",
1188 status);
1189 goto err;
1190 }
1191
1192
1193 ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
1194 MEMC_RAM_SEG_NUM,
1195 sizeof(struct mpi_coredump_segment_header)
1196 + sizeof(mpi_coredump->memc_ram),
1197 "MEMC RAM");
1198 status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
1199 MEMC_RAM_ADDR, MEMC_RAM_CNT);
1200 if (status) {
1201 netif_err(qdev, drv, qdev->ndev,
1202 "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
1203 status);
1204 goto err;
1205 }
1206 err:
1207 ql_sem_unlock(qdev, SEM_PROC_REG_MASK);
1208 return status;
1209
1210 }
1211
1212 static void ql_get_core_dump(struct ql_adapter *qdev)
1213 {
1214 if (!ql_own_firmware(qdev)) {
1215 netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
1216 return;
1217 }
1218
1219 if (!netif_running(qdev->ndev)) {
1220 netif_err(qdev, ifup, qdev->ndev,
1221 "Force Coredump can only be done from interface that is up\n");
1222 return;
1223 }
1224 ql_queue_fw_error(qdev);
1225 }
1226
1227 static void ql_gen_reg_dump(struct ql_adapter *qdev,
1228 struct ql_reg_dump *mpi_coredump)
1229 {
1230 int i, status;
1231
1232
1233 memset(&(mpi_coredump->mpi_global_header), 0,
1234 sizeof(struct mpi_coredump_global_header));
1235 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
1236 mpi_coredump->mpi_global_header.headerSize =
1237 sizeof(struct mpi_coredump_global_header);
1238 mpi_coredump->mpi_global_header.imageSize =
1239 sizeof(struct ql_reg_dump);
1240 strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
1241 sizeof(mpi_coredump->mpi_global_header.idString));
1242
1243
1244
1245 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
1246 MISC_NIC_INFO_SEG_NUM,
1247 sizeof(struct mpi_coredump_segment_header)
1248 + sizeof(mpi_coredump->misc_nic_info),
1249 "MISC NIC INFO");
1250 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
1251 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
1252 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
1253 mpi_coredump->misc_nic_info.function = qdev->func;
1254
1255
1256 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
1257 NIC1_CONTROL_SEG_NUM,
1258 sizeof(struct mpi_coredump_segment_header)
1259 + sizeof(mpi_coredump->nic_regs),
1260 "NIC Registers");
1261
1262 for (i = 0; i < 64; i++)
1263 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
1264
1265
1266
1267 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
1268 INTR_STATES_SEG_NUM,
1269 sizeof(struct mpi_coredump_segment_header)
1270 + sizeof(mpi_coredump->intr_states),
1271 "INTR States");
1272 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
1273
1274 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
1275 CAM_ENTRIES_SEG_NUM,
1276 sizeof(struct mpi_coredump_segment_header)
1277 + sizeof(mpi_coredump->cam_entries),
1278 "CAM Entries");
1279 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
1280 if (status)
1281 return;
1282
1283 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
1284 ROUTING_WORDS_SEG_NUM,
1285 sizeof(struct mpi_coredump_segment_header)
1286 + sizeof(mpi_coredump->nic_routing_words),
1287 "Routing Words");
1288 status = ql_get_routing_entries(qdev,
1289 &mpi_coredump->nic_routing_words[0]);
1290 if (status)
1291 return;
1292
1293
1294 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
1295 ETS_SEG_NUM,
1296 sizeof(struct mpi_coredump_segment_header)
1297 + sizeof(mpi_coredump->ets),
1298 "ETS Registers");
1299 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1300 if (status)
1301 return;
1302 }
1303
1304 void ql_get_dump(struct ql_adapter *qdev, void *buff)
1305 {
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1316 if (!ql_core_dump(qdev, buff))
1317 ql_soft_reset_mpi_risc(qdev);
1318 else
1319 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1320 } else {
1321 ql_gen_reg_dump(qdev, buff);
1322 ql_get_core_dump(qdev);
1323 }
1324 }
1325
1326
1327 void ql_mpi_core_to_log(struct work_struct *work)
1328 {
1329 struct ql_adapter *qdev =
1330 container_of(work, struct ql_adapter, mpi_core_to_log.work);
1331 u32 *tmp, count;
1332 int i;
1333
1334 count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
1335 tmp = (u32 *)qdev->mpi_coredump;
1336 netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
1337 "Core is dumping to log file!\n");
1338
1339 for (i = 0; i < count; i += 8) {
1340 pr_err("%.08x: %.08x %.08x %.08x %.08x %.08x "
1341 "%.08x %.08x %.08x\n", i,
1342 tmp[i + 0],
1343 tmp[i + 1],
1344 tmp[i + 2],
1345 tmp[i + 3],
1346 tmp[i + 4],
1347 tmp[i + 5],
1348 tmp[i + 6],
1349 tmp[i + 7]);
1350 msleep(5);
1351 }
1352 }
1353
1354 #ifdef QL_REG_DUMP
1355 static void ql_dump_intr_states(struct ql_adapter *qdev)
1356 {
1357 int i;
1358 u32 value;
1359 for (i = 0; i < qdev->intr_count; i++) {
1360 ql_write32(qdev, INTR_EN, qdev->intr_context[i].intr_read_mask);
1361 value = ql_read32(qdev, INTR_EN);
1362 pr_err("%s: Interrupt %d is %s\n",
1363 qdev->ndev->name, i,
1364 (value & INTR_EN_EN ? "enabled" : "disabled"));
1365 }
1366 }
1367
1368 #define DUMP_XGMAC(qdev, reg) \
1369 do { \
1370 u32 data; \
1371 ql_read_xgmac_reg(qdev, reg, &data); \
1372 pr_err("%s: %s = 0x%.08x\n", qdev->ndev->name, #reg, data); \
1373 } while (0)
1374
1375 void ql_dump_xgmac_control_regs(struct ql_adapter *qdev)
1376 {
1377 if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
1378 pr_err("%s: Couldn't get xgmac sem\n", __func__);
1379 return;
1380 }
1381 DUMP_XGMAC(qdev, PAUSE_SRC_LO);
1382 DUMP_XGMAC(qdev, PAUSE_SRC_HI);
1383 DUMP_XGMAC(qdev, GLOBAL_CFG);
1384 DUMP_XGMAC(qdev, TX_CFG);
1385 DUMP_XGMAC(qdev, RX_CFG);
1386 DUMP_XGMAC(qdev, FLOW_CTL);
1387 DUMP_XGMAC(qdev, PAUSE_OPCODE);
1388 DUMP_XGMAC(qdev, PAUSE_TIMER);
1389 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_LO);
1390 DUMP_XGMAC(qdev, PAUSE_FRM_DEST_HI);
1391 DUMP_XGMAC(qdev, MAC_TX_PARAMS);
1392 DUMP_XGMAC(qdev, MAC_RX_PARAMS);
1393 DUMP_XGMAC(qdev, MAC_SYS_INT);
1394 DUMP_XGMAC(qdev, MAC_SYS_INT_MASK);
1395 DUMP_XGMAC(qdev, MAC_MGMT_INT);
1396 DUMP_XGMAC(qdev, MAC_MGMT_IN_MASK);
1397 DUMP_XGMAC(qdev, EXT_ARB_MODE);
1398 ql_sem_unlock(qdev, qdev->xg_sem_mask);
1399 }
1400
1401 static void ql_dump_ets_regs(struct ql_adapter *qdev)
1402 {
1403 }
1404
1405 static void ql_dump_cam_entries(struct ql_adapter *qdev)
1406 {
1407 int i;
1408 u32 value[3];
1409
1410 i = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
1411 if (i)
1412 return;
1413 for (i = 0; i < 4; i++) {
1414 if (ql_get_mac_addr_reg(qdev, MAC_ADDR_TYPE_CAM_MAC, i, value)) {
1415 pr_err("%s: Failed read of mac index register\n",
1416 __func__);
1417 return;
1418 } else {
1419 if (value[0])
1420 pr_err("%s: CAM index %d CAM Lookup Lower = 0x%.08x:%.08x, Output = 0x%.08x\n",
1421 qdev->ndev->name, i, value[1], value[0],
1422 value[2]);
1423 }
1424 }
1425 for (i = 0; i < 32; i++) {
1426 if (ql_get_mac_addr_reg
1427 (qdev, MAC_ADDR_TYPE_MULTI_MAC, i, value)) {
1428 pr_err("%s: Failed read of mac index register\n",
1429 __func__);
1430 return;
1431 } else {
1432 if (value[0])
1433 pr_err("%s: MCAST index %d CAM Lookup Lower = 0x%.08x:%.08x\n",
1434 qdev->ndev->name, i, value[1], value[0]);
1435 }
1436 }
1437 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1438 }
1439
1440 void ql_dump_routing_entries(struct ql_adapter *qdev)
1441 {
1442 int i;
1443 u32 value;
1444 i = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
1445 if (i)
1446 return;
1447 for (i = 0; i < 16; i++) {
1448 value = 0;
1449 if (ql_get_routing_reg(qdev, i, &value)) {
1450 pr_err("%s: Failed read of routing index register\n",
1451 __func__);
1452 return;
1453 } else {
1454 if (value)
1455 pr_err("%s: Routing Mask %d = 0x%.08x\n",
1456 qdev->ndev->name, i, value);
1457 }
1458 }
1459 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
1460 }
1461
1462 #define DUMP_REG(qdev, reg) \
1463 pr_err("%-32s= 0x%x\n", #reg, ql_read32(qdev, reg))
1464
1465 void ql_dump_regs(struct ql_adapter *qdev)
1466 {
1467 pr_err("reg dump for function #%d\n", qdev->func);
1468 DUMP_REG(qdev, SYS);
1469 DUMP_REG(qdev, RST_FO);
1470 DUMP_REG(qdev, FSC);
1471 DUMP_REG(qdev, CSR);
1472 DUMP_REG(qdev, ICB_RID);
1473 DUMP_REG(qdev, ICB_L);
1474 DUMP_REG(qdev, ICB_H);
1475 DUMP_REG(qdev, CFG);
1476 DUMP_REG(qdev, BIOS_ADDR);
1477 DUMP_REG(qdev, STS);
1478 DUMP_REG(qdev, INTR_EN);
1479 DUMP_REG(qdev, INTR_MASK);
1480 DUMP_REG(qdev, ISR1);
1481 DUMP_REG(qdev, ISR2);
1482 DUMP_REG(qdev, ISR3);
1483 DUMP_REG(qdev, ISR4);
1484 DUMP_REG(qdev, REV_ID);
1485 DUMP_REG(qdev, FRC_ECC_ERR);
1486 DUMP_REG(qdev, ERR_STS);
1487 DUMP_REG(qdev, RAM_DBG_ADDR);
1488 DUMP_REG(qdev, RAM_DBG_DATA);
1489 DUMP_REG(qdev, ECC_ERR_CNT);
1490 DUMP_REG(qdev, SEM);
1491 DUMP_REG(qdev, GPIO_1);
1492 DUMP_REG(qdev, GPIO_2);
1493 DUMP_REG(qdev, GPIO_3);
1494 DUMP_REG(qdev, XGMAC_ADDR);
1495 DUMP_REG(qdev, XGMAC_DATA);
1496 DUMP_REG(qdev, NIC_ETS);
1497 DUMP_REG(qdev, CNA_ETS);
1498 DUMP_REG(qdev, FLASH_ADDR);
1499 DUMP_REG(qdev, FLASH_DATA);
1500 DUMP_REG(qdev, CQ_STOP);
1501 DUMP_REG(qdev, PAGE_TBL_RID);
1502 DUMP_REG(qdev, WQ_PAGE_TBL_LO);
1503 DUMP_REG(qdev, WQ_PAGE_TBL_HI);
1504 DUMP_REG(qdev, CQ_PAGE_TBL_LO);
1505 DUMP_REG(qdev, CQ_PAGE_TBL_HI);
1506 DUMP_REG(qdev, COS_DFLT_CQ1);
1507 DUMP_REG(qdev, COS_DFLT_CQ2);
1508 DUMP_REG(qdev, SPLT_HDR);
1509 DUMP_REG(qdev, FC_PAUSE_THRES);
1510 DUMP_REG(qdev, NIC_PAUSE_THRES);
1511 DUMP_REG(qdev, FC_ETHERTYPE);
1512 DUMP_REG(qdev, FC_RCV_CFG);
1513 DUMP_REG(qdev, NIC_RCV_CFG);
1514 DUMP_REG(qdev, FC_COS_TAGS);
1515 DUMP_REG(qdev, NIC_COS_TAGS);
1516 DUMP_REG(qdev, MGMT_RCV_CFG);
1517 DUMP_REG(qdev, XG_SERDES_ADDR);
1518 DUMP_REG(qdev, XG_SERDES_DATA);
1519 DUMP_REG(qdev, PRB_MX_ADDR);
1520 DUMP_REG(qdev, PRB_MX_DATA);
1521 ql_dump_intr_states(qdev);
1522 ql_dump_xgmac_control_regs(qdev);
1523 ql_dump_ets_regs(qdev);
1524 ql_dump_cam_entries(qdev);
1525 ql_dump_routing_entries(qdev);
1526 }
1527 #endif
1528
1529 #ifdef QL_STAT_DUMP
1530
1531 #define DUMP_STAT(qdev, stat) \
1532 pr_err("%s = %ld\n", #stat, (unsigned long)qdev->nic_stats.stat)
1533
1534 void ql_dump_stat(struct ql_adapter *qdev)
1535 {
1536 pr_err("%s: Enter\n", __func__);
1537 DUMP_STAT(qdev, tx_pkts);
1538 DUMP_STAT(qdev, tx_bytes);
1539 DUMP_STAT(qdev, tx_mcast_pkts);
1540 DUMP_STAT(qdev, tx_bcast_pkts);
1541 DUMP_STAT(qdev, tx_ucast_pkts);
1542 DUMP_STAT(qdev, tx_ctl_pkts);
1543 DUMP_STAT(qdev, tx_pause_pkts);
1544 DUMP_STAT(qdev, tx_64_pkt);
1545 DUMP_STAT(qdev, tx_65_to_127_pkt);
1546 DUMP_STAT(qdev, tx_128_to_255_pkt);
1547 DUMP_STAT(qdev, tx_256_511_pkt);
1548 DUMP_STAT(qdev, tx_512_to_1023_pkt);
1549 DUMP_STAT(qdev, tx_1024_to_1518_pkt);
1550 DUMP_STAT(qdev, tx_1519_to_max_pkt);
1551 DUMP_STAT(qdev, tx_undersize_pkt);
1552 DUMP_STAT(qdev, tx_oversize_pkt);
1553 DUMP_STAT(qdev, rx_bytes);
1554 DUMP_STAT(qdev, rx_bytes_ok);
1555 DUMP_STAT(qdev, rx_pkts);
1556 DUMP_STAT(qdev, rx_pkts_ok);
1557 DUMP_STAT(qdev, rx_bcast_pkts);
1558 DUMP_STAT(qdev, rx_mcast_pkts);
1559 DUMP_STAT(qdev, rx_ucast_pkts);
1560 DUMP_STAT(qdev, rx_undersize_pkts);
1561 DUMP_STAT(qdev, rx_oversize_pkts);
1562 DUMP_STAT(qdev, rx_jabber_pkts);
1563 DUMP_STAT(qdev, rx_undersize_fcerr_pkts);
1564 DUMP_STAT(qdev, rx_drop_events);
1565 DUMP_STAT(qdev, rx_fcerr_pkts);
1566 DUMP_STAT(qdev, rx_align_err);
1567 DUMP_STAT(qdev, rx_symbol_err);
1568 DUMP_STAT(qdev, rx_mac_err);
1569 DUMP_STAT(qdev, rx_ctl_pkts);
1570 DUMP_STAT(qdev, rx_pause_pkts);
1571 DUMP_STAT(qdev, rx_64_pkts);
1572 DUMP_STAT(qdev, rx_65_to_127_pkts);
1573 DUMP_STAT(qdev, rx_128_255_pkts);
1574 DUMP_STAT(qdev, rx_256_511_pkts);
1575 DUMP_STAT(qdev, rx_512_to_1023_pkts);
1576 DUMP_STAT(qdev, rx_1024_to_1518_pkts);
1577 DUMP_STAT(qdev, rx_1519_to_max_pkts);
1578 DUMP_STAT(qdev, rx_len_err_pkts);
1579 };
1580 #endif
1581
1582 #ifdef QL_DEV_DUMP
1583
1584 #define DUMP_QDEV_FIELD(qdev, type, field) \
1585 pr_err("qdev->%-24s = " type "\n", #field, qdev->field)
1586 #define DUMP_QDEV_DMA_FIELD(qdev, field) \
1587 pr_err("qdev->%-24s = %llx\n", #field, (unsigned long long)qdev->field)
1588 #define DUMP_QDEV_ARRAY(qdev, type, array, index, field) \
1589 pr_err("%s[%d].%s = " type "\n", \
1590 #array, index, #field, qdev->array[index].field);
1591 void ql_dump_qdev(struct ql_adapter *qdev)
1592 {
1593 int i;
1594 DUMP_QDEV_FIELD(qdev, "%lx", flags);
1595 DUMP_QDEV_FIELD(qdev, "%p", vlgrp);
1596 DUMP_QDEV_FIELD(qdev, "%p", pdev);
1597 DUMP_QDEV_FIELD(qdev, "%p", ndev);
1598 DUMP_QDEV_FIELD(qdev, "%d", chip_rev_id);
1599 DUMP_QDEV_FIELD(qdev, "%p", reg_base);
1600 DUMP_QDEV_FIELD(qdev, "%p", doorbell_area);
1601 DUMP_QDEV_FIELD(qdev, "%d", doorbell_area_size);
1602 DUMP_QDEV_FIELD(qdev, "%x", msg_enable);
1603 DUMP_QDEV_FIELD(qdev, "%p", rx_ring_shadow_reg_area);
1604 DUMP_QDEV_DMA_FIELD(qdev, rx_ring_shadow_reg_dma);
1605 DUMP_QDEV_FIELD(qdev, "%p", tx_ring_shadow_reg_area);
1606 DUMP_QDEV_DMA_FIELD(qdev, tx_ring_shadow_reg_dma);
1607 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1608 if (qdev->msi_x_entry)
1609 for (i = 0; i < qdev->intr_count; i++) {
1610 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, vector);
1611 DUMP_QDEV_ARRAY(qdev, "%d", msi_x_entry, i, entry);
1612 }
1613 for (i = 0; i < qdev->intr_count; i++) {
1614 DUMP_QDEV_ARRAY(qdev, "%p", intr_context, i, qdev);
1615 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, intr);
1616 DUMP_QDEV_ARRAY(qdev, "%d", intr_context, i, hooked);
1617 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_en_mask);
1618 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_dis_mask);
1619 DUMP_QDEV_ARRAY(qdev, "0x%08x", intr_context, i, intr_read_mask);
1620 }
1621 DUMP_QDEV_FIELD(qdev, "%d", tx_ring_count);
1622 DUMP_QDEV_FIELD(qdev, "%d", rx_ring_count);
1623 DUMP_QDEV_FIELD(qdev, "%d", ring_mem_size);
1624 DUMP_QDEV_FIELD(qdev, "%p", ring_mem);
1625 DUMP_QDEV_FIELD(qdev, "%d", intr_count);
1626 DUMP_QDEV_FIELD(qdev, "%p", tx_ring);
1627 DUMP_QDEV_FIELD(qdev, "%d", rss_ring_count);
1628 DUMP_QDEV_FIELD(qdev, "%p", rx_ring);
1629 DUMP_QDEV_FIELD(qdev, "%d", default_rx_queue);
1630 DUMP_QDEV_FIELD(qdev, "0x%08x", xg_sem_mask);
1631 DUMP_QDEV_FIELD(qdev, "0x%08x", port_link_up);
1632 DUMP_QDEV_FIELD(qdev, "0x%08x", port_init);
1633 }
1634 #endif
1635
1636 #ifdef QL_CB_DUMP
1637 void ql_dump_wqicb(struct wqicb *wqicb)
1638 {
1639 pr_err("Dumping wqicb stuff...\n");
1640 pr_err("wqicb->len = 0x%x\n", le16_to_cpu(wqicb->len));
1641 pr_err("wqicb->flags = %x\n", le16_to_cpu(wqicb->flags));
1642 pr_err("wqicb->cq_id_rss = %d\n",
1643 le16_to_cpu(wqicb->cq_id_rss));
1644 pr_err("wqicb->rid = 0x%x\n", le16_to_cpu(wqicb->rid));
1645 pr_err("wqicb->wq_addr = 0x%llx\n",
1646 (unsigned long long) le64_to_cpu(wqicb->addr));
1647 pr_err("wqicb->wq_cnsmr_idx_addr = 0x%llx\n",
1648 (unsigned long long) le64_to_cpu(wqicb->cnsmr_idx_addr));
1649 }
1650
1651 void ql_dump_tx_ring(struct tx_ring *tx_ring)
1652 {
1653 if (tx_ring == NULL)
1654 return;
1655 pr_err("===================== Dumping tx_ring %d ===============\n",
1656 tx_ring->wq_id);
1657 pr_err("tx_ring->base = %p\n", tx_ring->wq_base);
1658 pr_err("tx_ring->base_dma = 0x%llx\n",
1659 (unsigned long long) tx_ring->wq_base_dma);
1660 pr_err("tx_ring->cnsmr_idx_sh_reg, addr = 0x%p, value = %d\n",
1661 tx_ring->cnsmr_idx_sh_reg,
1662 tx_ring->cnsmr_idx_sh_reg
1663 ? ql_read_sh_reg(tx_ring->cnsmr_idx_sh_reg) : 0);
1664 pr_err("tx_ring->size = %d\n", tx_ring->wq_size);
1665 pr_err("tx_ring->len = %d\n", tx_ring->wq_len);
1666 pr_err("tx_ring->prod_idx_db_reg = %p\n", tx_ring->prod_idx_db_reg);
1667 pr_err("tx_ring->valid_db_reg = %p\n", tx_ring->valid_db_reg);
1668 pr_err("tx_ring->prod_idx = %d\n", tx_ring->prod_idx);
1669 pr_err("tx_ring->cq_id = %d\n", tx_ring->cq_id);
1670 pr_err("tx_ring->wq_id = %d\n", tx_ring->wq_id);
1671 pr_err("tx_ring->q = %p\n", tx_ring->q);
1672 pr_err("tx_ring->tx_count = %d\n", atomic_read(&tx_ring->tx_count));
1673 }
1674
1675 void ql_dump_ricb(struct ricb *ricb)
1676 {
1677 int i;
1678 pr_err("===================== Dumping ricb ===============\n");
1679 pr_err("Dumping ricb stuff...\n");
1680
1681 pr_err("ricb->base_cq = %d\n", ricb->base_cq & 0x1f);
1682 pr_err("ricb->flags = %s%s%s%s%s%s%s%s%s\n",
1683 ricb->base_cq & RSS_L4K ? "RSS_L4K " : "",
1684 ricb->flags & RSS_L6K ? "RSS_L6K " : "",
1685 ricb->flags & RSS_LI ? "RSS_LI " : "",
1686 ricb->flags & RSS_LB ? "RSS_LB " : "",
1687 ricb->flags & RSS_LM ? "RSS_LM " : "",
1688 ricb->flags & RSS_RI4 ? "RSS_RI4 " : "",
1689 ricb->flags & RSS_RT4 ? "RSS_RT4 " : "",
1690 ricb->flags & RSS_RI6 ? "RSS_RI6 " : "",
1691 ricb->flags & RSS_RT6 ? "RSS_RT6 " : "");
1692 pr_err("ricb->mask = 0x%.04x\n", le16_to_cpu(ricb->mask));
1693 for (i = 0; i < 16; i++)
1694 pr_err("ricb->hash_cq_id[%d] = 0x%.08x\n", i,
1695 le32_to_cpu(ricb->hash_cq_id[i]));
1696 for (i = 0; i < 10; i++)
1697 pr_err("ricb->ipv6_hash_key[%d] = 0x%.08x\n", i,
1698 le32_to_cpu(ricb->ipv6_hash_key[i]));
1699 for (i = 0; i < 4; i++)
1700 pr_err("ricb->ipv4_hash_key[%d] = 0x%.08x\n", i,
1701 le32_to_cpu(ricb->ipv4_hash_key[i]));
1702 }
1703
1704 void ql_dump_cqicb(struct cqicb *cqicb)
1705 {
1706 pr_err("Dumping cqicb stuff...\n");
1707
1708 pr_err("cqicb->msix_vect = %d\n", cqicb->msix_vect);
1709 pr_err("cqicb->flags = %x\n", cqicb->flags);
1710 pr_err("cqicb->len = %d\n", le16_to_cpu(cqicb->len));
1711 pr_err("cqicb->addr = 0x%llx\n",
1712 (unsigned long long) le64_to_cpu(cqicb->addr));
1713 pr_err("cqicb->prod_idx_addr = 0x%llx\n",
1714 (unsigned long long) le64_to_cpu(cqicb->prod_idx_addr));
1715 pr_err("cqicb->pkt_delay = 0x%.04x\n",
1716 le16_to_cpu(cqicb->pkt_delay));
1717 pr_err("cqicb->irq_delay = 0x%.04x\n",
1718 le16_to_cpu(cqicb->irq_delay));
1719 pr_err("cqicb->lbq_addr = 0x%llx\n",
1720 (unsigned long long) le64_to_cpu(cqicb->lbq_addr));
1721 pr_err("cqicb->lbq_buf_size = 0x%.04x\n",
1722 le16_to_cpu(cqicb->lbq_buf_size));
1723 pr_err("cqicb->lbq_len = 0x%.04x\n",
1724 le16_to_cpu(cqicb->lbq_len));
1725 pr_err("cqicb->sbq_addr = 0x%llx\n",
1726 (unsigned long long) le64_to_cpu(cqicb->sbq_addr));
1727 pr_err("cqicb->sbq_buf_size = 0x%.04x\n",
1728 le16_to_cpu(cqicb->sbq_buf_size));
1729 pr_err("cqicb->sbq_len = 0x%.04x\n",
1730 le16_to_cpu(cqicb->sbq_len));
1731 }
1732
1733 void ql_dump_rx_ring(struct rx_ring *rx_ring)
1734 {
1735 if (rx_ring == NULL)
1736 return;
1737 pr_err("===================== Dumping rx_ring %d ===============\n",
1738 rx_ring->cq_id);
1739 pr_err("Dumping rx_ring %d, type = %s%s%s\n",
1740 rx_ring->cq_id, rx_ring->type == DEFAULT_Q ? "DEFAULT" : "",
1741 rx_ring->type == TX_Q ? "OUTBOUND COMPLETIONS" : "",
1742 rx_ring->type == RX_Q ? "INBOUND_COMPLETIONS" : "");
1743 pr_err("rx_ring->cqicb = %p\n", &rx_ring->cqicb);
1744 pr_err("rx_ring->cq_base = %p\n", rx_ring->cq_base);
1745 pr_err("rx_ring->cq_base_dma = %llx\n",
1746 (unsigned long long) rx_ring->cq_base_dma);
1747 pr_err("rx_ring->cq_size = %d\n", rx_ring->cq_size);
1748 pr_err("rx_ring->cq_len = %d\n", rx_ring->cq_len);
1749 pr_err("rx_ring->prod_idx_sh_reg, addr = 0x%p, value = %d\n",
1750 rx_ring->prod_idx_sh_reg,
1751 rx_ring->prod_idx_sh_reg
1752 ? ql_read_sh_reg(rx_ring->prod_idx_sh_reg) : 0);
1753 pr_err("rx_ring->prod_idx_sh_reg_dma = %llx\n",
1754 (unsigned long long) rx_ring->prod_idx_sh_reg_dma);
1755 pr_err("rx_ring->cnsmr_idx_db_reg = %p\n",
1756 rx_ring->cnsmr_idx_db_reg);
1757 pr_err("rx_ring->cnsmr_idx = %d\n", rx_ring->cnsmr_idx);
1758 pr_err("rx_ring->curr_entry = %p\n", rx_ring->curr_entry);
1759 pr_err("rx_ring->valid_db_reg = %p\n", rx_ring->valid_db_reg);
1760
1761 pr_err("rx_ring->lbq_base = %p\n", rx_ring->lbq_base);
1762 pr_err("rx_ring->lbq_base_dma = %llx\n",
1763 (unsigned long long) rx_ring->lbq_base_dma);
1764 pr_err("rx_ring->lbq_base_indirect = %p\n",
1765 rx_ring->lbq_base_indirect);
1766 pr_err("rx_ring->lbq_base_indirect_dma = %llx\n",
1767 (unsigned long long) rx_ring->lbq_base_indirect_dma);
1768 pr_err("rx_ring->lbq = %p\n", rx_ring->lbq);
1769 pr_err("rx_ring->lbq_len = %d\n", rx_ring->lbq_len);
1770 pr_err("rx_ring->lbq_size = %d\n", rx_ring->lbq_size);
1771 pr_err("rx_ring->lbq_prod_idx_db_reg = %p\n",
1772 rx_ring->lbq_prod_idx_db_reg);
1773 pr_err("rx_ring->lbq_prod_idx = %d\n", rx_ring->lbq_prod_idx);
1774 pr_err("rx_ring->lbq_curr_idx = %d\n", rx_ring->lbq_curr_idx);
1775 pr_err("rx_ring->lbq_clean_idx = %d\n", rx_ring->lbq_clean_idx);
1776 pr_err("rx_ring->lbq_free_cnt = %d\n", rx_ring->lbq_free_cnt);
1777 pr_err("rx_ring->lbq_buf_size = %d\n", rx_ring->lbq_buf_size);
1778
1779 pr_err("rx_ring->sbq_base = %p\n", rx_ring->sbq_base);
1780 pr_err("rx_ring->sbq_base_dma = %llx\n",
1781 (unsigned long long) rx_ring->sbq_base_dma);
1782 pr_err("rx_ring->sbq_base_indirect = %p\n",
1783 rx_ring->sbq_base_indirect);
1784 pr_err("rx_ring->sbq_base_indirect_dma = %llx\n",
1785 (unsigned long long) rx_ring->sbq_base_indirect_dma);
1786 pr_err("rx_ring->sbq = %p\n", rx_ring->sbq);
1787 pr_err("rx_ring->sbq_len = %d\n", rx_ring->sbq_len);
1788 pr_err("rx_ring->sbq_size = %d\n", rx_ring->sbq_size);
1789 pr_err("rx_ring->sbq_prod_idx_db_reg addr = %p\n",
1790 rx_ring->sbq_prod_idx_db_reg);
1791 pr_err("rx_ring->sbq_prod_idx = %d\n", rx_ring->sbq_prod_idx);
1792 pr_err("rx_ring->sbq_curr_idx = %d\n", rx_ring->sbq_curr_idx);
1793 pr_err("rx_ring->sbq_clean_idx = %d\n", rx_ring->sbq_clean_idx);
1794 pr_err("rx_ring->sbq_free_cnt = %d\n", rx_ring->sbq_free_cnt);
1795 pr_err("rx_ring->sbq_buf_size = %d\n", rx_ring->sbq_buf_size);
1796 pr_err("rx_ring->cq_id = %d\n", rx_ring->cq_id);
1797 pr_err("rx_ring->irq = %d\n", rx_ring->irq);
1798 pr_err("rx_ring->cpu = %d\n", rx_ring->cpu);
1799 pr_err("rx_ring->qdev = %p\n", rx_ring->qdev);
1800 }
1801
1802 void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id)
1803 {
1804 void *ptr;
1805
1806 pr_err("%s: Enter\n", __func__);
1807
1808 ptr = kmalloc(size, GFP_ATOMIC);
1809 if (ptr == NULL)
1810 return;
1811
1812 if (ql_write_cfg(qdev, ptr, size, bit, q_id)) {
1813 pr_err("%s: Failed to upload control block!\n", __func__);
1814 goto fail_it;
1815 }
1816 switch (bit) {
1817 case CFG_DRQ:
1818 ql_dump_wqicb((struct wqicb *)ptr);
1819 break;
1820 case CFG_DCQ:
1821 ql_dump_cqicb((struct cqicb *)ptr);
1822 break;
1823 case CFG_DR:
1824 ql_dump_ricb((struct ricb *)ptr);
1825 break;
1826 default:
1827 pr_err("%s: Invalid bit value = %x\n", __func__, bit);
1828 break;
1829 }
1830 fail_it:
1831 kfree(ptr);
1832 }
1833 #endif
1834
1835 #ifdef QL_OB_DUMP
1836 void ql_dump_tx_desc(struct tx_buf_desc *tbd)
1837 {
1838 pr_err("tbd->addr = 0x%llx\n",
1839 le64_to_cpu((u64) tbd->addr));
1840 pr_err("tbd->len = %d\n",
1841 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1842 pr_err("tbd->flags = %s %s\n",
1843 tbd->len & TX_DESC_C ? "C" : ".",
1844 tbd->len & TX_DESC_E ? "E" : ".");
1845 tbd++;
1846 pr_err("tbd->addr = 0x%llx\n",
1847 le64_to_cpu((u64) tbd->addr));
1848 pr_err("tbd->len = %d\n",
1849 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1850 pr_err("tbd->flags = %s %s\n",
1851 tbd->len & TX_DESC_C ? "C" : ".",
1852 tbd->len & TX_DESC_E ? "E" : ".");
1853 tbd++;
1854 pr_err("tbd->addr = 0x%llx\n",
1855 le64_to_cpu((u64) tbd->addr));
1856 pr_err("tbd->len = %d\n",
1857 le32_to_cpu(tbd->len & TX_DESC_LEN_MASK));
1858 pr_err("tbd->flags = %s %s\n",
1859 tbd->len & TX_DESC_C ? "C" : ".",
1860 tbd->len & TX_DESC_E ? "E" : ".");
1861
1862 }
1863
1864 void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
1865 {
1866 struct ob_mac_tso_iocb_req *ob_mac_tso_iocb =
1867 (struct ob_mac_tso_iocb_req *)ob_mac_iocb;
1868 struct tx_buf_desc *tbd;
1869 u16 frame_len;
1870
1871 pr_err("%s\n", __func__);
1872 pr_err("opcode = %s\n",
1873 (ob_mac_iocb->opcode == OPCODE_OB_MAC_IOCB) ? "MAC" : "TSO");
1874 pr_err("flags1 = %s %s %s %s %s\n",
1875 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_OI ? "OI" : "",
1876 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_I ? "I" : "",
1877 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_D ? "D" : "",
1878 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP4 ? "IP4" : "",
1879 ob_mac_tso_iocb->flags1 & OB_MAC_TSO_IOCB_IP6 ? "IP6" : "");
1880 pr_err("flags2 = %s %s %s\n",
1881 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
1882 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
1883 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
1884 pr_err("flags3 = %s %s %s\n",
1885 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
1886 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
1887 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
1888 pr_err("tid = %x\n", ob_mac_iocb->tid);
1889 pr_err("txq_idx = %d\n", ob_mac_iocb->txq_idx);
1890 pr_err("vlan_tci = %x\n", ob_mac_tso_iocb->vlan_tci);
1891 if (ob_mac_iocb->opcode == OPCODE_OB_MAC_TSO_IOCB) {
1892 pr_err("frame_len = %d\n",
1893 le32_to_cpu(ob_mac_tso_iocb->frame_len));
1894 pr_err("mss = %d\n",
1895 le16_to_cpu(ob_mac_tso_iocb->mss));
1896 pr_err("prot_hdr_len = %d\n",
1897 le16_to_cpu(ob_mac_tso_iocb->total_hdrs_len));
1898 pr_err("hdr_offset = 0x%.04x\n",
1899 le16_to_cpu(ob_mac_tso_iocb->net_trans_offset));
1900 frame_len = le32_to_cpu(ob_mac_tso_iocb->frame_len);
1901 } else {
1902 pr_err("frame_len = %d\n",
1903 le16_to_cpu(ob_mac_iocb->frame_len));
1904 frame_len = le16_to_cpu(ob_mac_iocb->frame_len);
1905 }
1906 tbd = &ob_mac_iocb->tbd[0];
1907 ql_dump_tx_desc(tbd);
1908 }
1909
1910 void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp)
1911 {
1912 pr_err("%s\n", __func__);
1913 pr_err("opcode = %d\n", ob_mac_rsp->opcode);
1914 pr_err("flags = %s %s %s %s %s %s %s\n",
1915 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_OI ? "OI" : ".",
1916 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_I ? "I" : ".",
1917 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_E ? "E" : ".",
1918 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_S ? "S" : ".",
1919 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_L ? "L" : ".",
1920 ob_mac_rsp->flags1 & OB_MAC_IOCB_RSP_P ? "P" : ".",
1921 ob_mac_rsp->flags2 & OB_MAC_IOCB_RSP_B ? "B" : ".");
1922 pr_err("tid = %x\n", ob_mac_rsp->tid);
1923 }
1924 #endif
1925
1926 #ifdef QL_IB_DUMP
1927 void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp)
1928 {
1929 pr_err("%s\n", __func__);
1930 pr_err("opcode = 0x%x\n", ib_mac_rsp->opcode);
1931 pr_err("flags1 = %s%s%s%s%s%s\n",
1932 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_OI ? "OI " : "",
1933 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_I ? "I " : "",
1934 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_TE ? "TE " : "",
1935 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_NU ? "NU " : "",
1936 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_IE ? "IE " : "",
1937 ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_B ? "B " : "");
1938
1939 if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK)
1940 pr_err("%s%s%s Multicast\n",
1941 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1942 IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
1943 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1944 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1945 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1946 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1947
1948 pr_err("flags2 = %s%s%s%s%s\n",
1949 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) ? "P " : "",
1950 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? "V " : "",
1951 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) ? "U " : "",
1952 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) ? "T " : "",
1953 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_FO) ? "FO " : "");
1954
1955 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK)
1956 pr_err("%s%s%s%s%s error\n",
1957 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1958 IB_MAC_IOCB_RSP_ERR_OVERSIZE ? "oversize" : "",
1959 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1960 IB_MAC_IOCB_RSP_ERR_UNDERSIZE ? "undersize" : "",
1961 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1962 IB_MAC_IOCB_RSP_ERR_PREAMBLE ? "preamble" : "",
1963 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1964 IB_MAC_IOCB_RSP_ERR_FRAME_LEN ? "frame length" : "",
1965 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) ==
1966 IB_MAC_IOCB_RSP_ERR_CRC ? "CRC" : "");
1967
1968 pr_err("flags3 = %s%s\n",
1969 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS ? "DS " : "",
1970 ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL ? "DL " : "");
1971
1972 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1973 pr_err("RSS flags = %s%s%s%s\n",
1974 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1975 IB_MAC_IOCB_RSP_M_IPV4) ? "IPv4 RSS" : "",
1976 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1977 IB_MAC_IOCB_RSP_M_IPV6) ? "IPv6 RSS " : "",
1978 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1979 IB_MAC_IOCB_RSP_M_TCP_V4) ? "TCP/IPv4 RSS" : "",
1980 ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK) ==
1981 IB_MAC_IOCB_RSP_M_TCP_V6) ? "TCP/IPv6 RSS" : "");
1982
1983 pr_err("data_len = %d\n",
1984 le32_to_cpu(ib_mac_rsp->data_len));
1985 pr_err("data_addr = 0x%llx\n",
1986 (unsigned long long) le64_to_cpu(ib_mac_rsp->data_addr));
1987 if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_RSS_MASK)
1988 pr_err("rss = %x\n",
1989 le32_to_cpu(ib_mac_rsp->rss));
1990 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)
1991 pr_err("vlan_id = %x\n",
1992 le16_to_cpu(ib_mac_rsp->vlan_id));
1993
1994 pr_err("flags4 = %s%s%s\n",
1995 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV ? "HV " : "",
1996 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS ? "HS " : "",
1997 ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HL ? "HL " : "");
1998
1999 if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2000 pr_err("hdr length = %d\n",
2001 le32_to_cpu(ib_mac_rsp->hdr_len));
2002 pr_err("hdr addr = 0x%llx\n",
2003 (unsigned long long) le64_to_cpu(ib_mac_rsp->hdr_addr));
2004 }
2005 }
2006 #endif
2007
2008 #ifdef QL_ALL_DUMP
2009 void ql_dump_all(struct ql_adapter *qdev)
2010 {
2011 int i;
2012
2013 QL_DUMP_REGS(qdev);
2014 QL_DUMP_QDEV(qdev);
2015 for (i = 0; i < qdev->tx_ring_count; i++) {
2016 QL_DUMP_TX_RING(&qdev->tx_ring[i]);
2017 QL_DUMP_WQICB((struct wqicb *)&qdev->tx_ring[i]);
2018 }
2019 for (i = 0; i < qdev->rx_ring_count; i++) {
2020 QL_DUMP_RX_RING(&qdev->rx_ring[i]);
2021 QL_DUMP_CQICB((struct cqicb *)&qdev->rx_ring[i]);
2022 }
2023 }
2024 #endif