This source file includes following definitions.
- qlcnic_82xx_get_saved_state
- qlcnic_82xx_set_saved_state
- qlcnic_82xx_cache_tmpl_hdr_values
- qlcnic_82xx_get_cap_size
- qlcnic_82xx_set_sys_info
- qlcnic_82xx_store_cap_mask
- qlcnic_83xx_get_saved_state
- qlcnic_83xx_set_saved_state
- qlcnic_83xx_cache_tmpl_hdr_values
- qlcnic_83xx_get_cap_size
- qlcnic_83xx_set_sys_info
- qlcnic_83xx_store_cap_mask
- qlcnic_dump_crb
- qlcnic_dump_ctrl
- qlcnic_dump_mux
- qlcnic_dump_que
- qlcnic_dump_ocm
- qlcnic_read_rom
- qlcnic_dump_l1_cache
- qlcnic_dump_l2_cache
- qlcnic_read_memory_test_agent
- qlcnic_start_pex_dma
- qlcnic_read_memory_pexdma
- qlcnic_read_memory
- qlcnic_dump_nop
- qlcnic_valid_dump_entry
- qlcnic_read_pollrdmwr
- qlcnic_read_pollrd
- qlcnic_read_mux2
- qlcnic_83xx_dump_rom
- qlcnic_temp_checksum
- qlcnic_fw_flash_get_minidump_temp
- qlcnic_fw_flash_get_minidump_temp_size
- qlcnic_fw_get_minidump_temp_size
- __qlcnic_fw_cmd_get_minidump_temp
- qlcnic_fw_cmd_get_minidump_temp
- qlcnic_dump_fw
- qlcnic_83xx_md_check_extended_dump_capability
- qlcnic_83xx_get_minidump_template
1
2
3
4
5
6
7
8 #include <net/ip.h>
9
10 #include "qlcnic.h"
11 #include "qlcnic_hdr.h"
12 #include "qlcnic_83xx_hw.h"
13 #include "qlcnic_hw.h"
14
15 #define QLC_83XX_MINIDUMP_FLASH 0x520000
16 #define QLC_83XX_OCM_INDEX 3
17 #define QLC_83XX_PCI_INDEX 0
18 #define QLC_83XX_DMA_ENGINE_INDEX 8
19
20 static const u32 qlcnic_ms_read_data[] = {
21 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC
22 };
23
24 #define QLCNIC_DUMP_WCRB BIT_0
25 #define QLCNIC_DUMP_RWCRB BIT_1
26 #define QLCNIC_DUMP_ANDCRB BIT_2
27 #define QLCNIC_DUMP_ORCRB BIT_3
28 #define QLCNIC_DUMP_POLLCRB BIT_4
29 #define QLCNIC_DUMP_RD_SAVE BIT_5
30 #define QLCNIC_DUMP_WRT_SAVED BIT_6
31 #define QLCNIC_DUMP_MOD_SAVE_ST BIT_7
32 #define QLCNIC_DUMP_SKIP BIT_7
33
34 #define QLCNIC_DUMP_MASK_MAX 0xff
35
36 struct qlcnic_pex_dma_descriptor {
37 u32 read_data_size;
38 u32 dma_desc_cmd;
39 u32 src_addr_low;
40 u32 src_addr_high;
41 u32 dma_bus_addr_low;
42 u32 dma_bus_addr_high;
43 u32 rsvd[6];
44 } __packed;
45
46 struct qlcnic_common_entry_hdr {
47 u32 type;
48 u32 offset;
49 u32 cap_size;
50 #if defined(__LITTLE_ENDIAN)
51 u8 mask;
52 u8 rsvd[2];
53 u8 flags;
54 #else
55 u8 flags;
56 u8 rsvd[2];
57 u8 mask;
58 #endif
59 } __packed;
60
61 struct __crb {
62 u32 addr;
63 #if defined(__LITTLE_ENDIAN)
64 u8 stride;
65 u8 rsvd1[3];
66 #else
67 u8 rsvd1[3];
68 u8 stride;
69 #endif
70 u32 data_size;
71 u32 no_ops;
72 u32 rsvd2[4];
73 } __packed;
74
75 struct __ctrl {
76 u32 addr;
77 #if defined(__LITTLE_ENDIAN)
78 u8 stride;
79 u8 index_a;
80 u16 timeout;
81 #else
82 u16 timeout;
83 u8 index_a;
84 u8 stride;
85 #endif
86 u32 data_size;
87 u32 no_ops;
88 #if defined(__LITTLE_ENDIAN)
89 u8 opcode;
90 u8 index_v;
91 u8 shl_val;
92 u8 shr_val;
93 #else
94 u8 shr_val;
95 u8 shl_val;
96 u8 index_v;
97 u8 opcode;
98 #endif
99 u32 val1;
100 u32 val2;
101 u32 val3;
102 } __packed;
103
104 struct __cache {
105 u32 addr;
106 #if defined(__LITTLE_ENDIAN)
107 u16 stride;
108 u16 init_tag_val;
109 #else
110 u16 init_tag_val;
111 u16 stride;
112 #endif
113 u32 size;
114 u32 no_ops;
115 u32 ctrl_addr;
116 u32 ctrl_val;
117 u32 read_addr;
118 #if defined(__LITTLE_ENDIAN)
119 u8 read_addr_stride;
120 u8 read_addr_num;
121 u8 rsvd1[2];
122 #else
123 u8 rsvd1[2];
124 u8 read_addr_num;
125 u8 read_addr_stride;
126 #endif
127 } __packed;
128
129 struct __ocm {
130 u8 rsvd[8];
131 u32 size;
132 u32 no_ops;
133 u8 rsvd1[8];
134 u32 read_addr;
135 u32 read_addr_stride;
136 } __packed;
137
138 struct __mem {
139 u32 desc_card_addr;
140 u32 dma_desc_cmd;
141 u32 start_dma_cmd;
142 u32 rsvd[3];
143 u32 addr;
144 u32 size;
145 } __packed;
146
147 struct __mux {
148 u32 addr;
149 u8 rsvd[4];
150 u32 size;
151 u32 no_ops;
152 u32 val;
153 u32 val_stride;
154 u32 read_addr;
155 u8 rsvd2[4];
156 } __packed;
157
158 struct __queue {
159 u32 sel_addr;
160 #if defined(__LITTLE_ENDIAN)
161 u16 stride;
162 u8 rsvd[2];
163 #else
164 u8 rsvd[2];
165 u16 stride;
166 #endif
167 u32 size;
168 u32 no_ops;
169 u8 rsvd2[8];
170 u32 read_addr;
171 #if defined(__LITTLE_ENDIAN)
172 u8 read_addr_stride;
173 u8 read_addr_cnt;
174 u8 rsvd3[2];
175 #else
176 u8 rsvd3[2];
177 u8 read_addr_cnt;
178 u8 read_addr_stride;
179 #endif
180 } __packed;
181
182 struct __pollrd {
183 u32 sel_addr;
184 u32 read_addr;
185 u32 sel_val;
186 #if defined(__LITTLE_ENDIAN)
187 u16 sel_val_stride;
188 u16 no_ops;
189 #else
190 u16 no_ops;
191 u16 sel_val_stride;
192 #endif
193 u32 poll_wait;
194 u32 poll_mask;
195 u32 data_size;
196 u8 rsvd[4];
197 } __packed;
198
199 struct __mux2 {
200 u32 sel_addr1;
201 u32 sel_addr2;
202 u32 sel_val1;
203 u32 sel_val2;
204 u32 no_ops;
205 u32 sel_val_mask;
206 u32 read_addr;
207 #if defined(__LITTLE_ENDIAN)
208 u8 sel_val_stride;
209 u8 data_size;
210 u8 rsvd[2];
211 #else
212 u8 rsvd[2];
213 u8 data_size;
214 u8 sel_val_stride;
215 #endif
216 } __packed;
217
218 struct __pollrdmwr {
219 u32 addr1;
220 u32 addr2;
221 u32 val1;
222 u32 val2;
223 u32 poll_wait;
224 u32 poll_mask;
225 u32 mod_mask;
226 u32 data_size;
227 } __packed;
228
229 struct qlcnic_dump_entry {
230 struct qlcnic_common_entry_hdr hdr;
231 union {
232 struct __crb crb;
233 struct __cache cache;
234 struct __ocm ocm;
235 struct __mem mem;
236 struct __mux mux;
237 struct __queue que;
238 struct __ctrl ctrl;
239 struct __pollrdmwr pollrdmwr;
240 struct __mux2 mux2;
241 struct __pollrd pollrd;
242 } region;
243 } __packed;
244
245 enum qlcnic_minidump_opcode {
246 QLCNIC_DUMP_NOP = 0,
247 QLCNIC_DUMP_READ_CRB = 1,
248 QLCNIC_DUMP_READ_MUX = 2,
249 QLCNIC_DUMP_QUEUE = 3,
250 QLCNIC_DUMP_BRD_CONFIG = 4,
251 QLCNIC_DUMP_READ_OCM = 6,
252 QLCNIC_DUMP_PEG_REG = 7,
253 QLCNIC_DUMP_L1_DTAG = 8,
254 QLCNIC_DUMP_L1_ITAG = 9,
255 QLCNIC_DUMP_L1_DATA = 11,
256 QLCNIC_DUMP_L1_INST = 12,
257 QLCNIC_DUMP_L2_DTAG = 21,
258 QLCNIC_DUMP_L2_ITAG = 22,
259 QLCNIC_DUMP_L2_DATA = 23,
260 QLCNIC_DUMP_L2_INST = 24,
261 QLCNIC_DUMP_POLL_RD = 35,
262 QLCNIC_READ_MUX2 = 36,
263 QLCNIC_READ_POLLRDMWR = 37,
264 QLCNIC_DUMP_READ_ROM = 71,
265 QLCNIC_DUMP_READ_MEM = 72,
266 QLCNIC_DUMP_READ_CTRL = 98,
267 QLCNIC_DUMP_TLHDR = 99,
268 QLCNIC_DUMP_RDEND = 255
269 };
270
271 inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
272 {
273 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
274
275 return hdr->saved_state[index];
276 }
277
278 inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
279 u32 value)
280 {
281 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
282
283 hdr->saved_state[index] = value;
284 }
285
286 void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
287 {
288 struct qlcnic_82xx_dump_template_hdr *hdr;
289
290 hdr = fw_dump->tmpl_hdr;
291 fw_dump->tmpl_hdr_size = hdr->size;
292 fw_dump->version = hdr->version;
293 fw_dump->num_entries = hdr->num_entries;
294 fw_dump->offset = hdr->offset;
295
296 hdr->drv_cap_mask = hdr->cap_mask;
297 fw_dump->cap_mask = hdr->cap_mask;
298
299 fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
300 }
301
302 inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
303 {
304 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
305
306 return hdr->cap_sizes[index];
307 }
308
309 void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
310 {
311 struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
312
313 hdr->sys_info[idx] = value;
314 }
315
316 void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
317 {
318 struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
319
320 hdr->drv_cap_mask = mask;
321 }
322
323 inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
324 {
325 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
326
327 return hdr->saved_state[index];
328 }
329
330 inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
331 u32 value)
332 {
333 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
334
335 hdr->saved_state[index] = value;
336 }
337
338 #define QLCNIC_TEMPLATE_VERSION (0x20001)
339
340 void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
341 {
342 struct qlcnic_83xx_dump_template_hdr *hdr;
343
344 hdr = fw_dump->tmpl_hdr;
345 fw_dump->tmpl_hdr_size = hdr->size;
346 fw_dump->version = hdr->version;
347 fw_dump->num_entries = hdr->num_entries;
348 fw_dump->offset = hdr->offset;
349
350 hdr->drv_cap_mask = hdr->cap_mask;
351 fw_dump->cap_mask = hdr->cap_mask;
352
353 fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
354 QLCNIC_TEMPLATE_VERSION;
355 }
356
357 inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
358 {
359 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
360
361 return hdr->cap_sizes[index];
362 }
363
364 void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
365 {
366 struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
367
368 hdr->sys_info[idx] = value;
369 }
370
371 void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
372 {
373 struct qlcnic_83xx_dump_template_hdr *hdr;
374
375 hdr = tmpl_hdr;
376 hdr->drv_cap_mask = mask;
377 }
378
379 struct qlcnic_dump_operations {
380 enum qlcnic_minidump_opcode opcode;
381 u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
382 __le32 *);
383 };
384
385 static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
386 struct qlcnic_dump_entry *entry, __le32 *buffer)
387 {
388 int i;
389 u32 addr, data;
390 struct __crb *crb = &entry->region.crb;
391
392 addr = crb->addr;
393
394 for (i = 0; i < crb->no_ops; i++) {
395 data = qlcnic_ind_rd(adapter, addr);
396 *buffer++ = cpu_to_le32(addr);
397 *buffer++ = cpu_to_le32(data);
398 addr += crb->stride;
399 }
400 return crb->no_ops * 2 * sizeof(u32);
401 }
402
403 static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
404 struct qlcnic_dump_entry *entry, __le32 *buffer)
405 {
406 void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
407 struct __ctrl *ctr = &entry->region.ctrl;
408 int i, k, timeout = 0;
409 u32 addr, data, temp;
410 u8 no_ops;
411
412 addr = ctr->addr;
413 no_ops = ctr->no_ops;
414
415 for (i = 0; i < no_ops; i++) {
416 k = 0;
417 for (k = 0; k < 8; k++) {
418 if (!(ctr->opcode & (1 << k)))
419 continue;
420 switch (1 << k) {
421 case QLCNIC_DUMP_WCRB:
422 qlcnic_ind_wr(adapter, addr, ctr->val1);
423 break;
424 case QLCNIC_DUMP_RWCRB:
425 data = qlcnic_ind_rd(adapter, addr);
426 qlcnic_ind_wr(adapter, addr, data);
427 break;
428 case QLCNIC_DUMP_ANDCRB:
429 data = qlcnic_ind_rd(adapter, addr);
430 qlcnic_ind_wr(adapter, addr,
431 (data & ctr->val2));
432 break;
433 case QLCNIC_DUMP_ORCRB:
434 data = qlcnic_ind_rd(adapter, addr);
435 qlcnic_ind_wr(adapter, addr,
436 (data | ctr->val3));
437 break;
438 case QLCNIC_DUMP_POLLCRB:
439 while (timeout <= ctr->timeout) {
440 data = qlcnic_ind_rd(adapter, addr);
441 if ((data & ctr->val2) == ctr->val1)
442 break;
443 usleep_range(1000, 2000);
444 timeout++;
445 }
446 if (timeout > ctr->timeout) {
447 dev_info(&adapter->pdev->dev,
448 "Timed out, aborting poll CRB\n");
449 return -EINVAL;
450 }
451 break;
452 case QLCNIC_DUMP_RD_SAVE:
453 temp = ctr->index_a;
454 if (temp)
455 addr = qlcnic_get_saved_state(adapter,
456 hdr,
457 temp);
458 data = qlcnic_ind_rd(adapter, addr);
459 qlcnic_set_saved_state(adapter, hdr,
460 ctr->index_v, data);
461 break;
462 case QLCNIC_DUMP_WRT_SAVED:
463 temp = ctr->index_v;
464 if (temp)
465 data = qlcnic_get_saved_state(adapter,
466 hdr,
467 temp);
468 else
469 data = ctr->val1;
470
471 temp = ctr->index_a;
472 if (temp)
473 addr = qlcnic_get_saved_state(adapter,
474 hdr,
475 temp);
476 qlcnic_ind_wr(adapter, addr, data);
477 break;
478 case QLCNIC_DUMP_MOD_SAVE_ST:
479 data = qlcnic_get_saved_state(adapter, hdr,
480 ctr->index_v);
481 data <<= ctr->shl_val;
482 data >>= ctr->shr_val;
483 if (ctr->val2)
484 data &= ctr->val2;
485 data |= ctr->val3;
486 data += ctr->val1;
487 qlcnic_set_saved_state(adapter, hdr,
488 ctr->index_v, data);
489 break;
490 default:
491 dev_info(&adapter->pdev->dev,
492 "Unknown opcode\n");
493 break;
494 }
495 }
496 addr += ctr->stride;
497 }
498 return 0;
499 }
500
501 static u32 qlcnic_dump_mux(struct qlcnic_adapter *adapter,
502 struct qlcnic_dump_entry *entry, __le32 *buffer)
503 {
504 int loop;
505 u32 val, data = 0;
506 struct __mux *mux = &entry->region.mux;
507
508 val = mux->val;
509 for (loop = 0; loop < mux->no_ops; loop++) {
510 qlcnic_ind_wr(adapter, mux->addr, val);
511 data = qlcnic_ind_rd(adapter, mux->read_addr);
512 *buffer++ = cpu_to_le32(val);
513 *buffer++ = cpu_to_le32(data);
514 val += mux->val_stride;
515 }
516 return 2 * mux->no_ops * sizeof(u32);
517 }
518
519 static u32 qlcnic_dump_que(struct qlcnic_adapter *adapter,
520 struct qlcnic_dump_entry *entry, __le32 *buffer)
521 {
522 int i, loop;
523 u32 cnt, addr, data, que_id = 0;
524 struct __queue *que = &entry->region.que;
525
526 addr = que->read_addr;
527 cnt = que->read_addr_cnt;
528
529 for (loop = 0; loop < que->no_ops; loop++) {
530 qlcnic_ind_wr(adapter, que->sel_addr, que_id);
531 addr = que->read_addr;
532 for (i = 0; i < cnt; i++) {
533 data = qlcnic_ind_rd(adapter, addr);
534 *buffer++ = cpu_to_le32(data);
535 addr += que->read_addr_stride;
536 }
537 que_id += que->stride;
538 }
539 return que->no_ops * cnt * sizeof(u32);
540 }
541
542 static u32 qlcnic_dump_ocm(struct qlcnic_adapter *adapter,
543 struct qlcnic_dump_entry *entry, __le32 *buffer)
544 {
545 int i;
546 u32 data;
547 void __iomem *addr;
548 struct __ocm *ocm = &entry->region.ocm;
549
550 addr = adapter->ahw->pci_base0 + ocm->read_addr;
551 for (i = 0; i < ocm->no_ops; i++) {
552 data = readl(addr);
553 *buffer++ = cpu_to_le32(data);
554 addr += ocm->read_addr_stride;
555 }
556 return ocm->no_ops * sizeof(u32);
557 }
558
559 static u32 qlcnic_read_rom(struct qlcnic_adapter *adapter,
560 struct qlcnic_dump_entry *entry, __le32 *buffer)
561 {
562 int i, count = 0;
563 u32 fl_addr, size, val, lck_val, addr;
564 struct __mem *rom = &entry->region.mem;
565
566 fl_addr = rom->addr;
567 size = rom->size / 4;
568 lock_try:
569 lck_val = QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_LOCK);
570 if (!lck_val && count < MAX_CTL_CHECK) {
571 usleep_range(10000, 11000);
572 count++;
573 goto lock_try;
574 }
575 QLC_SHARED_REG_WR32(adapter, QLCNIC_FLASH_LOCK_OWNER,
576 adapter->ahw->pci_func);
577 for (i = 0; i < size; i++) {
578 addr = fl_addr & 0xFFFF0000;
579 qlcnic_ind_wr(adapter, FLASH_ROM_WINDOW, addr);
580 addr = LSW(fl_addr) + FLASH_ROM_DATA;
581 val = qlcnic_ind_rd(adapter, addr);
582 fl_addr += 4;
583 *buffer++ = cpu_to_le32(val);
584 }
585 QLC_SHARED_REG_RD32(adapter, QLCNIC_FLASH_UNLOCK);
586 return rom->size;
587 }
588
589 static u32 qlcnic_dump_l1_cache(struct qlcnic_adapter *adapter,
590 struct qlcnic_dump_entry *entry, __le32 *buffer)
591 {
592 int i;
593 u32 cnt, val, data, addr;
594 struct __cache *l1 = &entry->region.cache;
595
596 val = l1->init_tag_val;
597
598 for (i = 0; i < l1->no_ops; i++) {
599 qlcnic_ind_wr(adapter, l1->addr, val);
600 qlcnic_ind_wr(adapter, l1->ctrl_addr, LSW(l1->ctrl_val));
601 addr = l1->read_addr;
602 cnt = l1->read_addr_num;
603 while (cnt) {
604 data = qlcnic_ind_rd(adapter, addr);
605 *buffer++ = cpu_to_le32(data);
606 addr += l1->read_addr_stride;
607 cnt--;
608 }
609 val += l1->stride;
610 }
611 return l1->no_ops * l1->read_addr_num * sizeof(u32);
612 }
613
614 static u32 qlcnic_dump_l2_cache(struct qlcnic_adapter *adapter,
615 struct qlcnic_dump_entry *entry, __le32 *buffer)
616 {
617 int i;
618 u32 cnt, val, data, addr;
619 u8 poll_mask, poll_to, time_out = 0;
620 struct __cache *l2 = &entry->region.cache;
621
622 val = l2->init_tag_val;
623 poll_mask = LSB(MSW(l2->ctrl_val));
624 poll_to = MSB(MSW(l2->ctrl_val));
625
626 for (i = 0; i < l2->no_ops; i++) {
627 qlcnic_ind_wr(adapter, l2->addr, val);
628 if (LSW(l2->ctrl_val))
629 qlcnic_ind_wr(adapter, l2->ctrl_addr,
630 LSW(l2->ctrl_val));
631 if (!poll_mask)
632 goto skip_poll;
633 do {
634 data = qlcnic_ind_rd(adapter, l2->ctrl_addr);
635 if (!(data & poll_mask))
636 break;
637 usleep_range(1000, 2000);
638 time_out++;
639 } while (time_out <= poll_to);
640
641 if (time_out > poll_to) {
642 dev_err(&adapter->pdev->dev,
643 "Timeout exceeded in %s, aborting dump\n",
644 __func__);
645 return -EINVAL;
646 }
647 skip_poll:
648 addr = l2->read_addr;
649 cnt = l2->read_addr_num;
650 while (cnt) {
651 data = qlcnic_ind_rd(adapter, addr);
652 *buffer++ = cpu_to_le32(data);
653 addr += l2->read_addr_stride;
654 cnt--;
655 }
656 val += l2->stride;
657 }
658 return l2->no_ops * l2->read_addr_num * sizeof(u32);
659 }
660
661 static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter,
662 struct __mem *mem, __le32 *buffer,
663 int *ret)
664 {
665 u32 addr, data, test;
666 int i, reg_read;
667
668 reg_read = mem->size;
669 addr = mem->addr;
670
671 if ((addr & 0xf) || (reg_read%16)) {
672 dev_info(&adapter->pdev->dev,
673 "Unaligned memory addr:0x%x size:0x%x\n",
674 addr, reg_read);
675 *ret = -EINVAL;
676 return 0;
677 }
678
679 mutex_lock(&adapter->ahw->mem_lock);
680
681 while (reg_read != 0) {
682 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
683 qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
684 qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_START_ENABLE);
685
686 for (i = 0; i < MAX_CTL_CHECK; i++) {
687 test = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
688 if (!(test & TA_CTL_BUSY))
689 break;
690 }
691 if (i == MAX_CTL_CHECK) {
692 if (printk_ratelimit()) {
693 dev_err(&adapter->pdev->dev,
694 "failed to read through agent\n");
695 *ret = -EIO;
696 goto out;
697 }
698 }
699 for (i = 0; i < 4; i++) {
700 data = qlcnic_ind_rd(adapter, qlcnic_ms_read_data[i]);
701 *buffer++ = cpu_to_le32(data);
702 }
703 addr += 16;
704 reg_read -= 16;
705 ret += 16;
706 cond_resched();
707 }
708 out:
709 mutex_unlock(&adapter->ahw->mem_lock);
710 return mem->size;
711 }
712
713
714 #define QLC_DMA_REG_BASE_ADDR(dma_no) (0x77320000 + (dma_no * 0x10000))
715
716
717 #define QLC_DMA_CMD_BUFF_ADDR_LOW 0
718 #define QLC_DMA_CMD_BUFF_ADDR_HI 4
719 #define QLC_DMA_CMD_STATUS_CTRL 8
720
721 static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
722 struct __mem *mem)
723 {
724 struct device *dev = &adapter->pdev->dev;
725 u32 dma_no, dma_base_addr, temp_addr;
726 int i, ret, dma_sts;
727 void *tmpl_hdr;
728
729 tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
730 dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
731 QLC_83XX_DMA_ENGINE_INDEX);
732 dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
733
734 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
735 ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
736 if (ret)
737 return ret;
738
739 temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
740 ret = qlcnic_ind_wr(adapter, temp_addr, 0);
741 if (ret)
742 return ret;
743
744 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
745 ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
746 if (ret)
747 return ret;
748
749
750 temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
751 for (i = 0; i < 400; i++) {
752 dma_sts = qlcnic_ind_rd(adapter, temp_addr);
753
754 if (dma_sts & BIT_1)
755 usleep_range(250, 500);
756 else
757 break;
758 }
759
760 if (i >= 400) {
761 dev_info(dev, "PEX DMA operation timed out");
762 ret = -EIO;
763 }
764
765 return ret;
766 }
767
768 static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
769 struct __mem *mem,
770 __le32 *buffer, int *ret)
771 {
772 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
773 u32 temp, dma_base_addr, size = 0, read_size = 0;
774 struct qlcnic_pex_dma_descriptor *dma_descr;
775 struct device *dev = &adapter->pdev->dev;
776 dma_addr_t dma_phys_addr;
777 void *dma_buffer;
778 void *tmpl_hdr;
779
780 tmpl_hdr = fw_dump->tmpl_hdr;
781
782
783 temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
784 QLC_83XX_DMA_ENGINE_INDEX);
785 dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
786 temp = qlcnic_ind_rd(adapter,
787 dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
788
789 if (!(temp & BIT_31)) {
790 dev_info(dev, "%s: DMA engine is not available\n", __func__);
791 *ret = -EIO;
792 return 0;
793 }
794
795
796 dma_descr = kzalloc(sizeof(struct qlcnic_pex_dma_descriptor),
797 GFP_KERNEL);
798 if (!dma_descr) {
799 *ret = -ENOMEM;
800 return 0;
801 }
802
803
804
805
806
807
808 dma_phys_addr = fw_dump->phys_addr;
809 dma_buffer = fw_dump->dma_buffer;
810 temp = 0;
811 temp = mem->dma_desc_cmd & 0xff0f;
812 temp |= (adapter->ahw->pci_func & 0xf) << 4;
813 dma_descr->dma_desc_cmd = (temp << 16) & 0xffff0000;
814 dma_descr->dma_bus_addr_low = LSD(dma_phys_addr);
815 dma_descr->dma_bus_addr_high = MSD(dma_phys_addr);
816 dma_descr->src_addr_high = 0;
817
818
819 while (read_size < mem->size) {
820 if (mem->size - read_size >= QLC_PEX_DMA_READ_SIZE)
821 size = QLC_PEX_DMA_READ_SIZE;
822 else
823 size = mem->size - read_size;
824
825 dma_descr->src_addr_low = mem->addr + read_size;
826 dma_descr->read_data_size = size;
827
828
829 temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
830 *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
831 (u32 *)dma_descr, temp);
832 if (*ret) {
833 dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
834 mem->desc_card_addr);
835 goto free_dma_descr;
836 }
837
838 *ret = qlcnic_start_pex_dma(adapter, mem);
839 if (*ret) {
840 dev_info(dev, "Failed to start PEX DMA operation\n");
841 goto free_dma_descr;
842 }
843
844 memcpy(buffer, dma_buffer, size);
845 buffer += size / 4;
846 read_size += size;
847 }
848
849 free_dma_descr:
850 kfree(dma_descr);
851
852 return read_size;
853 }
854
855 static u32 qlcnic_read_memory(struct qlcnic_adapter *adapter,
856 struct qlcnic_dump_entry *entry, __le32 *buffer)
857 {
858 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
859 struct device *dev = &adapter->pdev->dev;
860 struct __mem *mem = &entry->region.mem;
861 u32 data_size;
862 int ret = 0;
863
864 if (fw_dump->use_pex_dma) {
865 data_size = qlcnic_read_memory_pexdma(adapter, mem, buffer,
866 &ret);
867 if (ret)
868 dev_info(dev,
869 "Failed to read memory dump using PEX DMA: mask[0x%x]\n",
870 entry->hdr.mask);
871 else
872 return data_size;
873 }
874
875 data_size = qlcnic_read_memory_test_agent(adapter, mem, buffer, &ret);
876 if (ret) {
877 dev_info(dev,
878 "Failed to read memory dump using test agent method: mask[0x%x]\n",
879 entry->hdr.mask);
880 return 0;
881 } else {
882 return data_size;
883 }
884 }
885
886 static u32 qlcnic_dump_nop(struct qlcnic_adapter *adapter,
887 struct qlcnic_dump_entry *entry, __le32 *buffer)
888 {
889 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
890 return 0;
891 }
892
893 static int qlcnic_valid_dump_entry(struct device *dev,
894 struct qlcnic_dump_entry *entry, u32 size)
895 {
896 int ret = 1;
897 if (size != entry->hdr.cap_size) {
898 dev_err(dev,
899 "Invalid entry, Type:%d\tMask:%d\tSize:%dCap_size:%d\n",
900 entry->hdr.type, entry->hdr.mask, size,
901 entry->hdr.cap_size);
902 ret = 0;
903 }
904 return ret;
905 }
906
907 static u32 qlcnic_read_pollrdmwr(struct qlcnic_adapter *adapter,
908 struct qlcnic_dump_entry *entry,
909 __le32 *buffer)
910 {
911 struct __pollrdmwr *poll = &entry->region.pollrdmwr;
912 u32 data, wait_count, poll_wait, temp;
913
914 poll_wait = poll->poll_wait;
915
916 qlcnic_ind_wr(adapter, poll->addr1, poll->val1);
917 wait_count = 0;
918
919 while (wait_count < poll_wait) {
920 data = qlcnic_ind_rd(adapter, poll->addr1);
921 if ((data & poll->poll_mask) != 0)
922 break;
923 wait_count++;
924 }
925
926 if (wait_count == poll_wait) {
927 dev_err(&adapter->pdev->dev,
928 "Timeout exceeded in %s, aborting dump\n",
929 __func__);
930 return 0;
931 }
932
933 data = qlcnic_ind_rd(adapter, poll->addr2) & poll->mod_mask;
934 qlcnic_ind_wr(adapter, poll->addr2, data);
935 qlcnic_ind_wr(adapter, poll->addr1, poll->val2);
936 wait_count = 0;
937
938 while (wait_count < poll_wait) {
939 temp = qlcnic_ind_rd(adapter, poll->addr1);
940 if ((temp & poll->poll_mask) != 0)
941 break;
942 wait_count++;
943 }
944
945 *buffer++ = cpu_to_le32(poll->addr2);
946 *buffer++ = cpu_to_le32(data);
947
948 return 2 * sizeof(u32);
949
950 }
951
952 static u32 qlcnic_read_pollrd(struct qlcnic_adapter *adapter,
953 struct qlcnic_dump_entry *entry, __le32 *buffer)
954 {
955 struct __pollrd *pollrd = &entry->region.pollrd;
956 u32 data, wait_count, poll_wait, sel_val;
957 int i;
958
959 poll_wait = pollrd->poll_wait;
960 sel_val = pollrd->sel_val;
961
962 for (i = 0; i < pollrd->no_ops; i++) {
963 qlcnic_ind_wr(adapter, pollrd->sel_addr, sel_val);
964 wait_count = 0;
965 while (wait_count < poll_wait) {
966 data = qlcnic_ind_rd(adapter, pollrd->sel_addr);
967 if ((data & pollrd->poll_mask) != 0)
968 break;
969 wait_count++;
970 }
971
972 if (wait_count == poll_wait) {
973 dev_err(&adapter->pdev->dev,
974 "Timeout exceeded in %s, aborting dump\n",
975 __func__);
976 return 0;
977 }
978
979 data = qlcnic_ind_rd(adapter, pollrd->read_addr);
980 *buffer++ = cpu_to_le32(sel_val);
981 *buffer++ = cpu_to_le32(data);
982 sel_val += pollrd->sel_val_stride;
983 }
984 return pollrd->no_ops * (2 * sizeof(u32));
985 }
986
987 static u32 qlcnic_read_mux2(struct qlcnic_adapter *adapter,
988 struct qlcnic_dump_entry *entry, __le32 *buffer)
989 {
990 struct __mux2 *mux2 = &entry->region.mux2;
991 u32 data;
992 u32 t_sel_val, sel_val1, sel_val2;
993 int i;
994
995 sel_val1 = mux2->sel_val1;
996 sel_val2 = mux2->sel_val2;
997
998 for (i = 0; i < mux2->no_ops; i++) {
999 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val1);
1000 t_sel_val = sel_val1 & mux2->sel_val_mask;
1001 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1002 data = qlcnic_ind_rd(adapter, mux2->read_addr);
1003 *buffer++ = cpu_to_le32(t_sel_val);
1004 *buffer++ = cpu_to_le32(data);
1005 qlcnic_ind_wr(adapter, mux2->sel_addr1, sel_val2);
1006 t_sel_val = sel_val2 & mux2->sel_val_mask;
1007 qlcnic_ind_wr(adapter, mux2->sel_addr2, t_sel_val);
1008 data = qlcnic_ind_rd(adapter, mux2->read_addr);
1009 *buffer++ = cpu_to_le32(t_sel_val);
1010 *buffer++ = cpu_to_le32(data);
1011 sel_val1 += mux2->sel_val_stride;
1012 sel_val2 += mux2->sel_val_stride;
1013 }
1014
1015 return mux2->no_ops * (4 * sizeof(u32));
1016 }
1017
1018 static u32 qlcnic_83xx_dump_rom(struct qlcnic_adapter *adapter,
1019 struct qlcnic_dump_entry *entry, __le32 *buffer)
1020 {
1021 u32 fl_addr, size;
1022 struct __mem *rom = &entry->region.mem;
1023
1024 fl_addr = rom->addr;
1025 size = rom->size / 4;
1026
1027 if (!qlcnic_83xx_lockless_flash_read32(adapter, fl_addr,
1028 (u8 *)buffer, size))
1029 return rom->size;
1030
1031 return 0;
1032 }
1033
1034 static const struct qlcnic_dump_operations qlcnic_fw_dump_ops[] = {
1035 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1036 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1037 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1038 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1039 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_read_rom},
1040 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1041 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1042 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1043 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1044 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1045 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1046 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1047 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1048 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1049 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1050 {QLCNIC_DUMP_READ_ROM, qlcnic_read_rom},
1051 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1052 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1053 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1054 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1055 };
1056
1057 static const struct qlcnic_dump_operations qlcnic_83xx_fw_dump_ops[] = {
1058 {QLCNIC_DUMP_NOP, qlcnic_dump_nop},
1059 {QLCNIC_DUMP_READ_CRB, qlcnic_dump_crb},
1060 {QLCNIC_DUMP_READ_MUX, qlcnic_dump_mux},
1061 {QLCNIC_DUMP_QUEUE, qlcnic_dump_que},
1062 {QLCNIC_DUMP_BRD_CONFIG, qlcnic_83xx_dump_rom},
1063 {QLCNIC_DUMP_READ_OCM, qlcnic_dump_ocm},
1064 {QLCNIC_DUMP_PEG_REG, qlcnic_dump_ctrl},
1065 {QLCNIC_DUMP_L1_DTAG, qlcnic_dump_l1_cache},
1066 {QLCNIC_DUMP_L1_ITAG, qlcnic_dump_l1_cache},
1067 {QLCNIC_DUMP_L1_DATA, qlcnic_dump_l1_cache},
1068 {QLCNIC_DUMP_L1_INST, qlcnic_dump_l1_cache},
1069 {QLCNIC_DUMP_L2_DTAG, qlcnic_dump_l2_cache},
1070 {QLCNIC_DUMP_L2_ITAG, qlcnic_dump_l2_cache},
1071 {QLCNIC_DUMP_L2_DATA, qlcnic_dump_l2_cache},
1072 {QLCNIC_DUMP_L2_INST, qlcnic_dump_l2_cache},
1073 {QLCNIC_DUMP_POLL_RD, qlcnic_read_pollrd},
1074 {QLCNIC_READ_MUX2, qlcnic_read_mux2},
1075 {QLCNIC_READ_POLLRDMWR, qlcnic_read_pollrdmwr},
1076 {QLCNIC_DUMP_READ_ROM, qlcnic_83xx_dump_rom},
1077 {QLCNIC_DUMP_READ_MEM, qlcnic_read_memory},
1078 {QLCNIC_DUMP_READ_CTRL, qlcnic_dump_ctrl},
1079 {QLCNIC_DUMP_TLHDR, qlcnic_dump_nop},
1080 {QLCNIC_DUMP_RDEND, qlcnic_dump_nop},
1081 };
1082
1083 static uint32_t qlcnic_temp_checksum(uint32_t *temp_buffer, u32 temp_size)
1084 {
1085 uint64_t sum = 0;
1086 int count = temp_size / sizeof(uint32_t);
1087 while (count-- > 0)
1088 sum += *temp_buffer++;
1089 while (sum >> 32)
1090 sum = (sum & 0xFFFFFFFF) + (sum >> 32);
1091 return ~sum;
1092 }
1093
1094 static int qlcnic_fw_flash_get_minidump_temp(struct qlcnic_adapter *adapter,
1095 u8 *buffer, u32 size)
1096 {
1097 int ret = 0;
1098
1099 if (qlcnic_82xx_check(adapter))
1100 return -EIO;
1101
1102 if (qlcnic_83xx_lock_flash(adapter))
1103 return -EIO;
1104
1105 ret = qlcnic_83xx_lockless_flash_read32(adapter,
1106 QLC_83XX_MINIDUMP_FLASH,
1107 buffer, size / sizeof(u32));
1108
1109 qlcnic_83xx_unlock_flash(adapter);
1110
1111 return ret;
1112 }
1113
1114 static int
1115 qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1116 struct qlcnic_cmd_args *cmd)
1117 {
1118 struct qlcnic_83xx_dump_template_hdr tmp_hdr;
1119 u32 size = sizeof(tmp_hdr) / sizeof(u32);
1120 int ret = 0;
1121
1122 if (qlcnic_82xx_check(adapter))
1123 return -EIO;
1124
1125 if (qlcnic_83xx_lock_flash(adapter))
1126 return -EIO;
1127
1128 ret = qlcnic_83xx_lockless_flash_read32(adapter,
1129 QLC_83XX_MINIDUMP_FLASH,
1130 (u8 *)&tmp_hdr, size);
1131
1132 qlcnic_83xx_unlock_flash(adapter);
1133
1134 cmd->rsp.arg[2] = tmp_hdr.size;
1135 cmd->rsp.arg[3] = tmp_hdr.version;
1136
1137 return ret;
1138 }
1139
1140 static int qlcnic_fw_get_minidump_temp_size(struct qlcnic_adapter *adapter,
1141 u32 *version, u32 *temp_size,
1142 u8 *use_flash_temp)
1143 {
1144 int err = 0;
1145 struct qlcnic_cmd_args cmd;
1146
1147 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TEMP_SIZE))
1148 return -ENOMEM;
1149
1150 err = qlcnic_issue_cmd(adapter, &cmd);
1151 if (err != QLCNIC_RCODE_SUCCESS) {
1152 if (qlcnic_fw_flash_get_minidump_temp_size(adapter, &cmd)) {
1153 qlcnic_free_mbx_args(&cmd);
1154 return -EIO;
1155 }
1156 *use_flash_temp = 1;
1157 }
1158
1159 *temp_size = cmd.rsp.arg[2];
1160 *version = cmd.rsp.arg[3];
1161 qlcnic_free_mbx_args(&cmd);
1162
1163 if (!(*temp_size))
1164 return -EIO;
1165
1166 return 0;
1167 }
1168
1169 static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
1170 u32 *buffer, u32 temp_size)
1171 {
1172 int err = 0, i;
1173 void *tmp_addr;
1174 __le32 *tmp_buf;
1175 struct qlcnic_cmd_args cmd;
1176 dma_addr_t tmp_addr_t = 0;
1177
1178 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
1179 &tmp_addr_t, GFP_KERNEL);
1180 if (!tmp_addr)
1181 return -ENOMEM;
1182
1183 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
1184 err = -ENOMEM;
1185 goto free_mem;
1186 }
1187
1188 cmd.req.arg[1] = LSD(tmp_addr_t);
1189 cmd.req.arg[2] = MSD(tmp_addr_t);
1190 cmd.req.arg[3] = temp_size;
1191 err = qlcnic_issue_cmd(adapter, &cmd);
1192
1193 tmp_buf = tmp_addr;
1194 if (err == QLCNIC_RCODE_SUCCESS) {
1195 for (i = 0; i < temp_size / sizeof(u32); i++)
1196 *buffer++ = __le32_to_cpu(*tmp_buf++);
1197 }
1198
1199 qlcnic_free_mbx_args(&cmd);
1200
1201 free_mem:
1202 dma_free_coherent(&adapter->pdev->dev, temp_size, tmp_addr, tmp_addr_t);
1203
1204 return err;
1205 }
1206
1207 int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
1208 {
1209 struct qlcnic_hardware_context *ahw;
1210 struct qlcnic_fw_dump *fw_dump;
1211 u32 version, csum, *tmp_buf;
1212 u8 use_flash_temp = 0;
1213 u32 temp_size = 0;
1214 void *temp_buffer;
1215 int err;
1216
1217 ahw = adapter->ahw;
1218 fw_dump = &ahw->fw_dump;
1219 err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
1220 &use_flash_temp);
1221 if (err) {
1222 dev_err(&adapter->pdev->dev,
1223 "Can't get template size %d\n", err);
1224 return -EIO;
1225 }
1226
1227 fw_dump->tmpl_hdr = vzalloc(temp_size);
1228 if (!fw_dump->tmpl_hdr)
1229 return -ENOMEM;
1230
1231 tmp_buf = (u32 *)fw_dump->tmpl_hdr;
1232 if (use_flash_temp)
1233 goto flash_temp;
1234
1235 err = __qlcnic_fw_cmd_get_minidump_temp(adapter, tmp_buf, temp_size);
1236
1237 if (err) {
1238 flash_temp:
1239 err = qlcnic_fw_flash_get_minidump_temp(adapter, (u8 *)tmp_buf,
1240 temp_size);
1241
1242 if (err) {
1243 dev_err(&adapter->pdev->dev,
1244 "Failed to get minidump template header %d\n",
1245 err);
1246 vfree(fw_dump->tmpl_hdr);
1247 fw_dump->tmpl_hdr = NULL;
1248 return -EIO;
1249 }
1250 }
1251
1252 csum = qlcnic_temp_checksum((uint32_t *)tmp_buf, temp_size);
1253
1254 if (csum) {
1255 dev_err(&adapter->pdev->dev,
1256 "Template header checksum validation failed\n");
1257 vfree(fw_dump->tmpl_hdr);
1258 fw_dump->tmpl_hdr = NULL;
1259 return -EIO;
1260 }
1261
1262 qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
1263
1264 if (fw_dump->use_pex_dma) {
1265 fw_dump->dma_buffer = NULL;
1266 temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
1267 QLC_PEX_DMA_READ_SIZE,
1268 &fw_dump->phys_addr,
1269 GFP_KERNEL);
1270 if (!temp_buffer)
1271 fw_dump->use_pex_dma = false;
1272 else
1273 fw_dump->dma_buffer = temp_buffer;
1274 }
1275
1276
1277 dev_info(&adapter->pdev->dev,
1278 "Default minidump capture mask 0x%x\n",
1279 fw_dump->cap_mask);
1280
1281 qlcnic_enable_fw_dump_state(adapter);
1282
1283 return 0;
1284 }
1285
1286 int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
1287 {
1288 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1289 const struct qlcnic_dump_operations *fw_dump_ops;
1290 struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
1291 u32 entry_offset, dump, no_entries, buf_offset = 0;
1292 int i, k, ops_cnt, ops_index, dump_size = 0;
1293 struct device *dev = &adapter->pdev->dev;
1294 struct qlcnic_hardware_context *ahw;
1295 struct qlcnic_dump_entry *entry;
1296 void *tmpl_hdr;
1297 u32 ocm_window;
1298 __le32 *buffer;
1299 char mesg[64];
1300 char *msg[] = {mesg, NULL};
1301
1302 ahw = adapter->ahw;
1303 tmpl_hdr = fw_dump->tmpl_hdr;
1304
1305
1306 if (!tmpl_hdr)
1307 return -EIO;
1308
1309 if (!qlcnic_check_fw_dump_state(adapter)) {
1310 dev_info(&adapter->pdev->dev, "Dump not enabled\n");
1311 return -EIO;
1312 }
1313
1314 if (fw_dump->clr) {
1315 dev_info(&adapter->pdev->dev,
1316 "Previous dump not cleared, not capturing dump\n");
1317 return -EIO;
1318 }
1319
1320 netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
1321
1322 for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
1323 if (i & fw_dump->cap_mask)
1324 dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
1325
1326 if (!dump_size)
1327 return -EIO;
1328
1329 fw_dump->data = vzalloc(dump_size);
1330 if (!fw_dump->data)
1331 return -ENOMEM;
1332
1333 buffer = fw_dump->data;
1334 fw_dump->size = dump_size;
1335 no_entries = fw_dump->num_entries;
1336 entry_offset = fw_dump->offset;
1337 qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
1338 qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
1339
1340 if (qlcnic_82xx_check(adapter)) {
1341 ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
1342 fw_dump_ops = qlcnic_fw_dump_ops;
1343 } else {
1344 hdr_83xx = tmpl_hdr;
1345 ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
1346 fw_dump_ops = qlcnic_83xx_fw_dump_ops;
1347 ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
1348 hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
1349 hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
1350 }
1351
1352 for (i = 0; i < no_entries; i++) {
1353 entry = tmpl_hdr + entry_offset;
1354 if (!(entry->hdr.mask & fw_dump->cap_mask)) {
1355 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1356 entry_offset += entry->hdr.offset;
1357 continue;
1358 }
1359
1360
1361 ops_index = 0;
1362 while (ops_index < ops_cnt) {
1363 if (entry->hdr.type == fw_dump_ops[ops_index].opcode)
1364 break;
1365 ops_index++;
1366 }
1367
1368 if (ops_index == ops_cnt) {
1369 dev_info(dev, "Skipping unknown entry opcode %d\n",
1370 entry->hdr.type);
1371 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1372 entry_offset += entry->hdr.offset;
1373 continue;
1374 }
1375
1376
1377 dump = fw_dump_ops[ops_index].handler(adapter, entry, buffer);
1378 if (!qlcnic_valid_dump_entry(dev, entry, dump)) {
1379 entry->hdr.flags |= QLCNIC_DUMP_SKIP;
1380 entry_offset += entry->hdr.offset;
1381 continue;
1382 }
1383
1384 buf_offset += entry->hdr.cap_size;
1385 entry_offset += entry->hdr.offset;
1386 buffer = fw_dump->data + buf_offset;
1387 cond_resched();
1388 }
1389
1390 fw_dump->clr = 1;
1391 snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
1392 netdev_info(adapter->netdev,
1393 "Dump data %d bytes captured, dump data address = %p, template header size %d bytes, template address = %p\n",
1394 fw_dump->size, fw_dump->data, fw_dump->tmpl_hdr_size,
1395 fw_dump->tmpl_hdr);
1396
1397 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
1398
1399 return 0;
1400 }
1401
1402 static inline bool
1403 qlcnic_83xx_md_check_extended_dump_capability(struct qlcnic_adapter *adapter)
1404 {
1405
1406
1407
1408
1409
1410 return ((adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE8830) &&
1411 (adapter->ahw->extra_capability[0] &
1412 QLCNIC_FW_CAPABILITY_2_EXT_ISCSI_DUMP));
1413 }
1414
1415 void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
1416 {
1417 u32 prev_version, current_version;
1418 struct qlcnic_hardware_context *ahw = adapter->ahw;
1419 struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
1420 struct pci_dev *pdev = adapter->pdev;
1421 bool extended = false;
1422 int ret;
1423
1424 prev_version = adapter->fw_version;
1425 current_version = qlcnic_83xx_get_fw_version(adapter);
1426
1427 if (fw_dump->tmpl_hdr == NULL || current_version > prev_version) {
1428 vfree(fw_dump->tmpl_hdr);
1429
1430 if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
1431 extended = !qlcnic_83xx_extend_md_capab(adapter);
1432
1433 ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
1434 if (ret)
1435 return;
1436
1437 dev_info(&pdev->dev, "Supports FW dump capability\n");
1438
1439
1440
1441
1442
1443 if (extended) {
1444 struct qlcnic_83xx_dump_template_hdr *hdr;
1445
1446 hdr = fw_dump->tmpl_hdr;
1447 hdr->drv_cap_mask = 0x1f;
1448 fw_dump->cap_mask = 0x1f;
1449 dev_info(&pdev->dev,
1450 "Extended iSCSI dump capability and updated capture mask to 0x1f\n");
1451 }
1452 }
1453 }