root/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. cudbg_do_compression
  2. cudbg_write_and_release_buff
  3. is_fw_attached
  4. cudbg_align_debug_buffer
  5. cudbg_get_entity_hdr
  6. cudbg_read_vpd_reg
  7. cudbg_mem_desc_cmp
  8. cudbg_fill_meminfo
  9. cudbg_collect_reg_dump
  10. cudbg_collect_fw_devlog
  11. cudbg_collect_cim_la
  12. cudbg_collect_cim_ma_la
  13. cudbg_collect_cim_qcfg
  14. cudbg_read_cim_ibq
  15. cudbg_collect_cim_ibq_tp0
  16. cudbg_collect_cim_ibq_tp1
  17. cudbg_collect_cim_ibq_ulp
  18. cudbg_collect_cim_ibq_sge0
  19. cudbg_collect_cim_ibq_sge1
  20. cudbg_collect_cim_ibq_ncsi
  21. cudbg_cim_obq_size
  22. cudbg_read_cim_obq
  23. cudbg_collect_cim_obq_ulp0
  24. cudbg_collect_cim_obq_ulp1
  25. cudbg_collect_cim_obq_ulp2
  26. cudbg_collect_cim_obq_ulp3
  27. cudbg_collect_cim_obq_sge
  28. cudbg_collect_cim_obq_ncsi
  29. cudbg_collect_obq_sge_rx_q0
  30. cudbg_collect_obq_sge_rx_q1
  31. cudbg_meminfo_get_mem_index
  32. cudbg_get_mem_region
  33. cudbg_get_mem_relative
  34. cudbg_get_payload_range
  35. cudbg_memory_read
  36. cudbg_read_fw_mem
  37. cudbg_t4_fwcache
  38. cudbg_mem_region_size
  39. cudbg_collect_mem_region
  40. cudbg_collect_edc0_meminfo
  41. cudbg_collect_edc1_meminfo
  42. cudbg_collect_mc0_meminfo
  43. cudbg_collect_mc1_meminfo
  44. cudbg_collect_hma_meminfo
  45. cudbg_collect_rss
  46. cudbg_collect_rss_vf_config
  47. cudbg_collect_path_mtu
  48. cudbg_collect_pm_stats
  49. cudbg_collect_hw_sched
  50. cudbg_collect_tp_indirect
  51. cudbg_read_sge_qbase_indirect_reg
  52. cudbg_collect_sge_indirect
  53. cudbg_collect_ulprx_la
  54. cudbg_collect_tp_la
  55. cudbg_collect_meminfo
  56. cudbg_collect_cim_pif_la
  57. cudbg_collect_clk_info
  58. cudbg_collect_pcie_indirect
  59. cudbg_collect_pm_indirect
  60. cudbg_collect_tid
  61. cudbg_collect_pcie_config
  62. cudbg_sge_ctxt_check_valid
  63. cudbg_get_ctxt_region_info
  64. cudbg_dump_context_size
  65. cudbg_read_sge_ctxt
  66. cudbg_get_sge_ctxt_fw
  67. cudbg_collect_dump_context
  68. cudbg_tcamxy2valmask
  69. cudbg_mps_rpl_backdoor
  70. cudbg_collect_tcam_index
  71. cudbg_collect_mps_tcam
  72. cudbg_collect_vpd_data
  73. cudbg_read_tid
  74. cudbg_get_le_type
  75. cudbg_is_ipv6_entry
  76. cudbg_fill_le_tcam_info
  77. cudbg_collect_le_tcam
  78. cudbg_collect_cctrl
  79. cudbg_collect_ma_indirect
  80. cudbg_collect_ulptx_la
  81. cudbg_collect_up_cim_indirect
  82. cudbg_collect_pbt_tables
  83. cudbg_collect_mbox_log
  84. cudbg_collect_hma_indirect
  85. cudbg_fill_qdesc_num_and_size
  86. cudbg_collect_qdesc

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  *  Copyright (C) 2017 Chelsio Communications.  All rights reserved.
   4  */
   5 
   6 #include <linux/sort.h>
   7 
   8 #include "t4_regs.h"
   9 #include "cxgb4.h"
  10 #include "cxgb4_cudbg.h"
  11 #include "cudbg_if.h"
  12 #include "cudbg_lib_common.h"
  13 #include "cudbg_entity.h"
  14 #include "cudbg_lib.h"
  15 #include "cudbg_zlib.h"
  16 
  17 static int cudbg_do_compression(struct cudbg_init *pdbg_init,
  18                                 struct cudbg_buffer *pin_buff,
  19                                 struct cudbg_buffer *dbg_buff)
  20 {
  21         struct cudbg_buffer temp_in_buff = { 0 };
  22         int bytes_left, bytes_read, bytes;
  23         u32 offset = dbg_buff->offset;
  24         int rc;
  25 
  26         temp_in_buff.offset = pin_buff->offset;
  27         temp_in_buff.data = pin_buff->data;
  28         temp_in_buff.size = pin_buff->size;
  29 
  30         bytes_left = pin_buff->size;
  31         bytes_read = 0;
  32         while (bytes_left > 0) {
  33                 /* Do compression in smaller chunks */
  34                 bytes = min_t(unsigned long, bytes_left,
  35                               (unsigned long)CUDBG_CHUNK_SIZE);
  36                 temp_in_buff.data = (char *)pin_buff->data + bytes_read;
  37                 temp_in_buff.size = bytes;
  38                 rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
  39                 if (rc)
  40                         return rc;
  41                 bytes_left -= bytes;
  42                 bytes_read += bytes;
  43         }
  44 
  45         pin_buff->size = dbg_buff->offset - offset;
  46         return 0;
  47 }
  48 
  49 static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
  50                                         struct cudbg_buffer *pin_buff,
  51                                         struct cudbg_buffer *dbg_buff)
  52 {
  53         int rc = 0;
  54 
  55         if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
  56                 cudbg_update_buff(pin_buff, dbg_buff);
  57         } else {
  58                 rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
  59                 if (rc)
  60                         goto out;
  61         }
  62 
  63 out:
  64         cudbg_put_buff(pdbg_init, pin_buff);
  65         return rc;
  66 }
  67 
  68 static int is_fw_attached(struct cudbg_init *pdbg_init)
  69 {
  70         struct adapter *padap = pdbg_init->adap;
  71 
  72         if (!(padap->flags & CXGB4_FW_OK) || padap->use_bd)
  73                 return 0;
  74 
  75         return 1;
  76 }
  77 
  78 /* This function will add additional padding bytes into debug_buffer to make it
  79  * 4 byte aligned.
  80  */
  81 void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
  82                               struct cudbg_entity_hdr *entity_hdr)
  83 {
  84         u8 zero_buf[4] = {0};
  85         u8 padding, remain;
  86 
  87         remain = (dbg_buff->offset - entity_hdr->start_offset) % 4;
  88         padding = 4 - remain;
  89         if (remain) {
  90                 memcpy(((u8 *)dbg_buff->data) + dbg_buff->offset, &zero_buf,
  91                        padding);
  92                 dbg_buff->offset += padding;
  93                 entity_hdr->num_pad = padding;
  94         }
  95         entity_hdr->size = dbg_buff->offset - entity_hdr->start_offset;
  96 }
  97 
  98 struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i)
  99 {
 100         struct cudbg_hdr *cudbg_hdr = (struct cudbg_hdr *)outbuf;
 101 
 102         return (struct cudbg_entity_hdr *)
 103                ((char *)outbuf + cudbg_hdr->hdr_len +
 104                 (sizeof(struct cudbg_entity_hdr) * (i - 1)));
 105 }
 106 
 107 static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
 108                               void *dest)
 109 {
 110         int vaddr, rc;
 111 
 112         vaddr = t4_eeprom_ptov(addr, padap->pf, EEPROMPFSIZE);
 113         if (vaddr < 0)
 114                 return vaddr;
 115 
 116         rc = pci_read_vpd(padap->pdev, vaddr, len, dest);
 117         if (rc < 0)
 118                 return rc;
 119 
 120         return 0;
 121 }
 122 
 123 static int cudbg_mem_desc_cmp(const void *a, const void *b)
 124 {
 125         return ((const struct cudbg_mem_desc *)a)->base -
 126                ((const struct cudbg_mem_desc *)b)->base;
 127 }
 128 
 129 int cudbg_fill_meminfo(struct adapter *padap,
 130                        struct cudbg_meminfo *meminfo_buff)
 131 {
 132         struct cudbg_mem_desc *md;
 133         u32 lo, hi, used, alloc;
 134         int n, i;
 135 
 136         memset(meminfo_buff->avail, 0,
 137                ARRAY_SIZE(meminfo_buff->avail) *
 138                sizeof(struct cudbg_mem_desc));
 139         memset(meminfo_buff->mem, 0,
 140                (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
 141         md  = meminfo_buff->mem;
 142 
 143         for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
 144                 meminfo_buff->mem[i].limit = 0;
 145                 meminfo_buff->mem[i].idx = i;
 146         }
 147 
 148         /* Find and sort the populated memory ranges */
 149         i = 0;
 150         lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
 151         if (lo & EDRAM0_ENABLE_F) {
 152                 hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
 153                 meminfo_buff->avail[i].base =
 154                         cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
 155                 meminfo_buff->avail[i].limit =
 156                         meminfo_buff->avail[i].base +
 157                         cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
 158                 meminfo_buff->avail[i].idx = 0;
 159                 i++;
 160         }
 161 
 162         if (lo & EDRAM1_ENABLE_F) {
 163                 hi =  t4_read_reg(padap, MA_EDRAM1_BAR_A);
 164                 meminfo_buff->avail[i].base =
 165                         cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
 166                 meminfo_buff->avail[i].limit =
 167                         meminfo_buff->avail[i].base +
 168                         cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
 169                 meminfo_buff->avail[i].idx = 1;
 170                 i++;
 171         }
 172 
 173         if (is_t5(padap->params.chip)) {
 174                 if (lo & EXT_MEM0_ENABLE_F) {
 175                         hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
 176                         meminfo_buff->avail[i].base =
 177                                 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
 178                         meminfo_buff->avail[i].limit =
 179                                 meminfo_buff->avail[i].base +
 180                                 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
 181                         meminfo_buff->avail[i].idx = 3;
 182                         i++;
 183                 }
 184 
 185                 if (lo & EXT_MEM1_ENABLE_F) {
 186                         hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
 187                         meminfo_buff->avail[i].base =
 188                                 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
 189                         meminfo_buff->avail[i].limit =
 190                                 meminfo_buff->avail[i].base +
 191                                 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
 192                         meminfo_buff->avail[i].idx = 4;
 193                         i++;
 194                 }
 195         } else {
 196                 if (lo & EXT_MEM_ENABLE_F) {
 197                         hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
 198                         meminfo_buff->avail[i].base =
 199                                 cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
 200                         meminfo_buff->avail[i].limit =
 201                                 meminfo_buff->avail[i].base +
 202                                 cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
 203                         meminfo_buff->avail[i].idx = 2;
 204                         i++;
 205                 }
 206 
 207                 if (lo & HMA_MUX_F) {
 208                         hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
 209                         meminfo_buff->avail[i].base =
 210                                 cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
 211                         meminfo_buff->avail[i].limit =
 212                                 meminfo_buff->avail[i].base +
 213                                 cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
 214                         meminfo_buff->avail[i].idx = 5;
 215                         i++;
 216                 }
 217         }
 218 
 219         if (!i) /* no memory available */
 220                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
 221 
 222         meminfo_buff->avail_c = i;
 223         sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
 224              cudbg_mem_desc_cmp, NULL);
 225         (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
 226         (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
 227         (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
 228         (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
 229         (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
 230         (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
 231         (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
 232         (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
 233         (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
 234 
 235         /* the next few have explicit upper bounds */
 236         md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
 237         md->limit = md->base - 1 +
 238                     t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
 239                     PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
 240         md++;
 241 
 242         md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
 243         md->limit = md->base - 1 +
 244                     t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
 245                     PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
 246         md++;
 247 
 248         if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
 249                 if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
 250                         hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
 251                         md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
 252                 } else {
 253                         hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
 254                         md->base = t4_read_reg(padap,
 255                                                LE_DB_HASH_TBL_BASE_ADDR_A);
 256                 }
 257                 md->limit = 0;
 258         } else {
 259                 md->base = 0;
 260                 md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
 261         }
 262         md++;
 263 
 264 #define ulp_region(reg) do { \
 265         md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
 266         (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
 267 } while (0)
 268 
 269         ulp_region(RX_ISCSI);
 270         ulp_region(RX_TDDP);
 271         ulp_region(TX_TPT);
 272         ulp_region(RX_STAG);
 273         ulp_region(RX_RQ);
 274         ulp_region(RX_RQUDP);
 275         ulp_region(RX_PBL);
 276         ulp_region(TX_PBL);
 277 #undef ulp_region
 278         md->base = 0;
 279         md->idx = ARRAY_SIZE(cudbg_region);
 280         if (!is_t4(padap->params.chip)) {
 281                 u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
 282                 u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
 283                 u32 size = 0;
 284 
 285                 if (is_t5(padap->params.chip)) {
 286                         if (sge_ctrl & VFIFO_ENABLE_F)
 287                                 size = DBVFIFO_SIZE_G(fifo_size);
 288                 } else {
 289                         size = T6_DBVFIFO_SIZE_G(fifo_size);
 290                 }
 291 
 292                 if (size) {
 293                         md->base = BASEADDR_G(t4_read_reg(padap,
 294                                                           SGE_DBVFIFO_BADDR_A));
 295                         md->limit = md->base + (size << 2) - 1;
 296                 }
 297         }
 298 
 299         md++;
 300 
 301         md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
 302         md->limit = 0;
 303         md++;
 304         md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
 305         md->limit = 0;
 306         md++;
 307 
 308         md->base = padap->vres.ocq.start;
 309         if (padap->vres.ocq.size)
 310                 md->limit = md->base + padap->vres.ocq.size - 1;
 311         else
 312                 md->idx = ARRAY_SIZE(cudbg_region);  /* hide it */
 313         md++;
 314 
 315         /* add any address-space holes, there can be up to 3 */
 316         for (n = 0; n < i - 1; n++)
 317                 if (meminfo_buff->avail[n].limit <
 318                     meminfo_buff->avail[n + 1].base)
 319                         (md++)->base = meminfo_buff->avail[n].limit;
 320 
 321         if (meminfo_buff->avail[n].limit)
 322                 (md++)->base = meminfo_buff->avail[n].limit;
 323 
 324         n = md - meminfo_buff->mem;
 325         meminfo_buff->mem_c = n;
 326 
 327         sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
 328              cudbg_mem_desc_cmp, NULL);
 329 
 330         lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
 331         hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
 332         meminfo_buff->up_ram_lo = lo;
 333         meminfo_buff->up_ram_hi = hi;
 334 
 335         lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
 336         hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
 337         meminfo_buff->up_extmem2_lo = lo;
 338         meminfo_buff->up_extmem2_hi = hi;
 339 
 340         lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
 341         for (i = 0, meminfo_buff->free_rx_cnt = 0; i < 2; i++)
 342                 meminfo_buff->free_rx_cnt +=
 343                         FREERXPAGECOUNT_G(t4_read_reg(padap,
 344                                                       TP_FLM_FREE_RX_CNT_A));
 345 
 346         meminfo_buff->rx_pages_data[0] =  PMRXMAXPAGE_G(lo);
 347         meminfo_buff->rx_pages_data[1] =
 348                 t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
 349         meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
 350 
 351         lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
 352         hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
 353         for (i = 0, meminfo_buff->free_tx_cnt = 0; i < 4; i++)
 354                 meminfo_buff->free_tx_cnt +=
 355                         FREETXPAGECOUNT_G(t4_read_reg(padap,
 356                                                       TP_FLM_FREE_TX_CNT_A));
 357 
 358         meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
 359         meminfo_buff->tx_pages_data[1] =
 360                 hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
 361         meminfo_buff->tx_pages_data[2] =
 362                 hi >= (1 << 20) ? 'M' : 'K';
 363         meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
 364 
 365         meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
 366         meminfo_buff->p_structs_free_cnt =
 367                 FREEPSTRUCTCOUNT_G(t4_read_reg(padap, TP_FLM_FREE_PS_CNT_A));
 368 
 369         for (i = 0; i < 4; i++) {
 370                 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
 371                         lo = t4_read_reg(padap,
 372                                          MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
 373                 else
 374                         lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
 375                 if (is_t5(padap->params.chip)) {
 376                         used = T5_USED_G(lo);
 377                         alloc = T5_ALLOC_G(lo);
 378                 } else {
 379                         used = USED_G(lo);
 380                         alloc = ALLOC_G(lo);
 381                 }
 382                 meminfo_buff->port_used[i] = used;
 383                 meminfo_buff->port_alloc[i] = alloc;
 384         }
 385 
 386         for (i = 0; i < padap->params.arch.nchan; i++) {
 387                 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
 388                         lo = t4_read_reg(padap,
 389                                          MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
 390                 else
 391                         lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
 392                 if (is_t5(padap->params.chip)) {
 393                         used = T5_USED_G(lo);
 394                         alloc = T5_ALLOC_G(lo);
 395                 } else {
 396                         used = USED_G(lo);
 397                         alloc = ALLOC_G(lo);
 398                 }
 399                 meminfo_buff->loopback_used[i] = used;
 400                 meminfo_buff->loopback_alloc[i] = alloc;
 401         }
 402 
 403         return 0;
 404 }
 405 
 406 int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
 407                            struct cudbg_buffer *dbg_buff,
 408                            struct cudbg_error *cudbg_err)
 409 {
 410         struct adapter *padap = pdbg_init->adap;
 411         struct cudbg_buffer temp_buff = { 0 };
 412         u32 buf_size = 0;
 413         int rc = 0;
 414 
 415         if (is_t4(padap->params.chip))
 416                 buf_size = T4_REGMAP_SIZE;
 417         else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
 418                 buf_size = T5_REGMAP_SIZE;
 419 
 420         rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
 421         if (rc)
 422                 return rc;
 423         t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
 424         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 425 }
 426 
 427 int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
 428                             struct cudbg_buffer *dbg_buff,
 429                             struct cudbg_error *cudbg_err)
 430 {
 431         struct adapter *padap = pdbg_init->adap;
 432         struct cudbg_buffer temp_buff = { 0 };
 433         struct devlog_params *dparams;
 434         int rc = 0;
 435 
 436         rc = t4_init_devlog_params(padap);
 437         if (rc < 0) {
 438                 cudbg_err->sys_err = rc;
 439                 return rc;
 440         }
 441 
 442         dparams = &padap->params.devlog;
 443         rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
 444         if (rc)
 445                 return rc;
 446 
 447         /* Collect FW devlog */
 448         if (dparams->start != 0) {
 449                 spin_lock(&padap->win0_lock);
 450                 rc = t4_memory_rw(padap, padap->params.drv_memwin,
 451                                   dparams->memtype, dparams->start,
 452                                   dparams->size,
 453                                   (__be32 *)(char *)temp_buff.data,
 454                                   1);
 455                 spin_unlock(&padap->win0_lock);
 456                 if (rc) {
 457                         cudbg_err->sys_err = rc;
 458                         cudbg_put_buff(pdbg_init, &temp_buff);
 459                         return rc;
 460                 }
 461         }
 462         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 463 }
 464 
 465 int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
 466                          struct cudbg_buffer *dbg_buff,
 467                          struct cudbg_error *cudbg_err)
 468 {
 469         struct adapter *padap = pdbg_init->adap;
 470         struct cudbg_buffer temp_buff = { 0 };
 471         int size, rc;
 472         u32 cfg = 0;
 473 
 474         if (is_t6(padap->params.chip)) {
 475                 size = padap->params.cim_la_size / 10 + 1;
 476                 size *= 10 * sizeof(u32);
 477         } else {
 478                 size = padap->params.cim_la_size / 8;
 479                 size *= 8 * sizeof(u32);
 480         }
 481 
 482         size += sizeof(cfg);
 483         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 484         if (rc)
 485                 return rc;
 486 
 487         rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
 488         if (rc) {
 489                 cudbg_err->sys_err = rc;
 490                 cudbg_put_buff(pdbg_init, &temp_buff);
 491                 return rc;
 492         }
 493 
 494         memcpy((char *)temp_buff.data, &cfg, sizeof(cfg));
 495         rc = t4_cim_read_la(padap,
 496                             (u32 *)((char *)temp_buff.data + sizeof(cfg)),
 497                             NULL);
 498         if (rc < 0) {
 499                 cudbg_err->sys_err = rc;
 500                 cudbg_put_buff(pdbg_init, &temp_buff);
 501                 return rc;
 502         }
 503         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 504 }
 505 
 506 int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
 507                             struct cudbg_buffer *dbg_buff,
 508                             struct cudbg_error *cudbg_err)
 509 {
 510         struct adapter *padap = pdbg_init->adap;
 511         struct cudbg_buffer temp_buff = { 0 };
 512         int size, rc;
 513 
 514         size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
 515         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
 516         if (rc)
 517                 return rc;
 518 
 519         t4_cim_read_ma_la(padap,
 520                           (u32 *)temp_buff.data,
 521                           (u32 *)((char *)temp_buff.data +
 522                                   5 * CIM_MALA_SIZE));
 523         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 524 }
 525 
 526 int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
 527                            struct cudbg_buffer *dbg_buff,
 528                            struct cudbg_error *cudbg_err)
 529 {
 530         struct adapter *padap = pdbg_init->adap;
 531         struct cudbg_buffer temp_buff = { 0 };
 532         struct cudbg_cim_qcfg *cim_qcfg_data;
 533         int rc;
 534 
 535         rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
 536                             &temp_buff);
 537         if (rc)
 538                 return rc;
 539 
 540         cim_qcfg_data = (struct cudbg_cim_qcfg *)temp_buff.data;
 541         cim_qcfg_data->chip = padap->params.chip;
 542         rc = t4_cim_read(padap, UP_IBQ_0_RDADDR_A,
 543                          ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
 544         if (rc) {
 545                 cudbg_err->sys_err = rc;
 546                 cudbg_put_buff(pdbg_init, &temp_buff);
 547                 return rc;
 548         }
 549 
 550         rc = t4_cim_read(padap, UP_OBQ_0_REALADDR_A,
 551                          ARRAY_SIZE(cim_qcfg_data->obq_wr),
 552                          cim_qcfg_data->obq_wr);
 553         if (rc) {
 554                 cudbg_err->sys_err = rc;
 555                 cudbg_put_buff(pdbg_init, &temp_buff);
 556                 return rc;
 557         }
 558 
 559         t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
 560                          cim_qcfg_data->thres);
 561         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 562 }
 563 
 564 static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
 565                               struct cudbg_buffer *dbg_buff,
 566                               struct cudbg_error *cudbg_err, int qid)
 567 {
 568         struct adapter *padap = pdbg_init->adap;
 569         struct cudbg_buffer temp_buff = { 0 };
 570         int no_of_read_words, rc = 0;
 571         u32 qsize;
 572 
 573         /* collect CIM IBQ */
 574         qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
 575         rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
 576         if (rc)
 577                 return rc;
 578 
 579         /* t4_read_cim_ibq will return no. of read words or error */
 580         no_of_read_words = t4_read_cim_ibq(padap, qid,
 581                                            (u32 *)temp_buff.data, qsize);
 582         /* no_of_read_words is less than or equal to 0 means error */
 583         if (no_of_read_words <= 0) {
 584                 if (!no_of_read_words)
 585                         rc = CUDBG_SYSTEM_ERROR;
 586                 else
 587                         rc = no_of_read_words;
 588                 cudbg_err->sys_err = rc;
 589                 cudbg_put_buff(pdbg_init, &temp_buff);
 590                 return rc;
 591         }
 592         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 593 }
 594 
 595 int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
 596                               struct cudbg_buffer *dbg_buff,
 597                               struct cudbg_error *cudbg_err)
 598 {
 599         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 0);
 600 }
 601 
 602 int cudbg_collect_cim_ibq_tp1(struct cudbg_init *pdbg_init,
 603                               struct cudbg_buffer *dbg_buff,
 604                               struct cudbg_error *cudbg_err)
 605 {
 606         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 1);
 607 }
 608 
 609 int cudbg_collect_cim_ibq_ulp(struct cudbg_init *pdbg_init,
 610                               struct cudbg_buffer *dbg_buff,
 611                               struct cudbg_error *cudbg_err)
 612 {
 613         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 2);
 614 }
 615 
 616 int cudbg_collect_cim_ibq_sge0(struct cudbg_init *pdbg_init,
 617                                struct cudbg_buffer *dbg_buff,
 618                                struct cudbg_error *cudbg_err)
 619 {
 620         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 3);
 621 }
 622 
 623 int cudbg_collect_cim_ibq_sge1(struct cudbg_init *pdbg_init,
 624                                struct cudbg_buffer *dbg_buff,
 625                                struct cudbg_error *cudbg_err)
 626 {
 627         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 4);
 628 }
 629 
 630 int cudbg_collect_cim_ibq_ncsi(struct cudbg_init *pdbg_init,
 631                                struct cudbg_buffer *dbg_buff,
 632                                struct cudbg_error *cudbg_err)
 633 {
 634         return cudbg_read_cim_ibq(pdbg_init, dbg_buff, cudbg_err, 5);
 635 }
 636 
 637 u32 cudbg_cim_obq_size(struct adapter *padap, int qid)
 638 {
 639         u32 value;
 640 
 641         t4_write_reg(padap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
 642                      QUENUMSELECT_V(qid));
 643         value = t4_read_reg(padap, CIM_QUEUE_CONFIG_CTRL_A);
 644         value = CIMQSIZE_G(value) * 64; /* size in number of words */
 645         return value * sizeof(u32);
 646 }
 647 
 648 static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
 649                               struct cudbg_buffer *dbg_buff,
 650                               struct cudbg_error *cudbg_err, int qid)
 651 {
 652         struct adapter *padap = pdbg_init->adap;
 653         struct cudbg_buffer temp_buff = { 0 };
 654         int no_of_read_words, rc = 0;
 655         u32 qsize;
 656 
 657         /* collect CIM OBQ */
 658         qsize =  cudbg_cim_obq_size(padap, qid);
 659         rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
 660         if (rc)
 661                 return rc;
 662 
 663         /* t4_read_cim_obq will return no. of read words or error */
 664         no_of_read_words = t4_read_cim_obq(padap, qid,
 665                                            (u32 *)temp_buff.data, qsize);
 666         /* no_of_read_words is less than or equal to 0 means error */
 667         if (no_of_read_words <= 0) {
 668                 if (!no_of_read_words)
 669                         rc = CUDBG_SYSTEM_ERROR;
 670                 else
 671                         rc = no_of_read_words;
 672                 cudbg_err->sys_err = rc;
 673                 cudbg_put_buff(pdbg_init, &temp_buff);
 674                 return rc;
 675         }
 676         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
 677 }
 678 
 679 int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
 680                                struct cudbg_buffer *dbg_buff,
 681                                struct cudbg_error *cudbg_err)
 682 {
 683         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 0);
 684 }
 685 
 686 int cudbg_collect_cim_obq_ulp1(struct cudbg_init *pdbg_init,
 687                                struct cudbg_buffer *dbg_buff,
 688                                struct cudbg_error *cudbg_err)
 689 {
 690         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 1);
 691 }
 692 
 693 int cudbg_collect_cim_obq_ulp2(struct cudbg_init *pdbg_init,
 694                                struct cudbg_buffer *dbg_buff,
 695                                struct cudbg_error *cudbg_err)
 696 {
 697         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 2);
 698 }
 699 
 700 int cudbg_collect_cim_obq_ulp3(struct cudbg_init *pdbg_init,
 701                                struct cudbg_buffer *dbg_buff,
 702                                struct cudbg_error *cudbg_err)
 703 {
 704         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 3);
 705 }
 706 
 707 int cudbg_collect_cim_obq_sge(struct cudbg_init *pdbg_init,
 708                               struct cudbg_buffer *dbg_buff,
 709                               struct cudbg_error *cudbg_err)
 710 {
 711         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 4);
 712 }
 713 
 714 int cudbg_collect_cim_obq_ncsi(struct cudbg_init *pdbg_init,
 715                                struct cudbg_buffer *dbg_buff,
 716                                struct cudbg_error *cudbg_err)
 717 {
 718         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 5);
 719 }
 720 
 721 int cudbg_collect_obq_sge_rx_q0(struct cudbg_init *pdbg_init,
 722                                 struct cudbg_buffer *dbg_buff,
 723                                 struct cudbg_error *cudbg_err)
 724 {
 725         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 6);
 726 }
 727 
 728 int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
 729                                 struct cudbg_buffer *dbg_buff,
 730                                 struct cudbg_error *cudbg_err)
 731 {
 732         return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
 733 }
 734 
 735 static int cudbg_meminfo_get_mem_index(struct adapter *padap,
 736                                        struct cudbg_meminfo *mem_info,
 737                                        u8 mem_type, u8 *idx)
 738 {
 739         u8 i, flag;
 740 
 741         switch (mem_type) {
 742         case MEM_EDC0:
 743                 flag = EDC0_FLAG;
 744                 break;
 745         case MEM_EDC1:
 746                 flag = EDC1_FLAG;
 747                 break;
 748         case MEM_MC0:
 749                 /* Some T5 cards have both MC0 and MC1. */
 750                 flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
 751                 break;
 752         case MEM_MC1:
 753                 flag = MC1_FLAG;
 754                 break;
 755         case MEM_HMA:
 756                 flag = HMA_FLAG;
 757                 break;
 758         default:
 759                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
 760         }
 761 
 762         for (i = 0; i < mem_info->avail_c; i++) {
 763                 if (mem_info->avail[i].idx == flag) {
 764                         *idx = i;
 765                         return 0;
 766                 }
 767         }
 768 
 769         return CUDBG_STATUS_ENTITY_NOT_FOUND;
 770 }
 771 
 772 /* Fetch the @region_name's start and end from @meminfo. */
 773 static int cudbg_get_mem_region(struct adapter *padap,
 774                                 struct cudbg_meminfo *meminfo,
 775                                 u8 mem_type, const char *region_name,
 776                                 struct cudbg_mem_desc *mem_desc)
 777 {
 778         u8 mc, found = 0;
 779         u32 i, idx = 0;
 780         int rc;
 781 
 782         rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
 783         if (rc)
 784                 return rc;
 785 
 786         for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
 787                 if (!strcmp(cudbg_region[i], region_name)) {
 788                         found = 1;
 789                         idx = i;
 790                         break;
 791                 }
 792         }
 793         if (!found)
 794                 return -EINVAL;
 795 
 796         found = 0;
 797         for (i = 0; i < meminfo->mem_c; i++) {
 798                 if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
 799                         continue; /* Skip holes */
 800 
 801                 if (!(meminfo->mem[i].limit))
 802                         meminfo->mem[i].limit =
 803                                 i < meminfo->mem_c - 1 ?
 804                                 meminfo->mem[i + 1].base - 1 : ~0;
 805 
 806                 if (meminfo->mem[i].idx == idx) {
 807                         /* Check if the region exists in @mem_type memory */
 808                         if (meminfo->mem[i].base < meminfo->avail[mc].base &&
 809                             meminfo->mem[i].limit < meminfo->avail[mc].base)
 810                                 return -EINVAL;
 811 
 812                         if (meminfo->mem[i].base > meminfo->avail[mc].limit)
 813                                 return -EINVAL;
 814 
 815                         memcpy(mem_desc, &meminfo->mem[i],
 816                                sizeof(struct cudbg_mem_desc));
 817                         found = 1;
 818                         break;
 819                 }
 820         }
 821         if (!found)
 822                 return -EINVAL;
 823 
 824         return 0;
 825 }
 826 
 827 /* Fetch and update the start and end of the requested memory region w.r.t 0
 828  * in the corresponding EDC/MC/HMA.
 829  */
 830 static int cudbg_get_mem_relative(struct adapter *padap,
 831                                   struct cudbg_meminfo *meminfo,
 832                                   u8 mem_type, u32 *out_base, u32 *out_end)
 833 {
 834         u8 mc_idx;
 835         int rc;
 836 
 837         rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
 838         if (rc)
 839                 return rc;
 840 
 841         if (*out_base < meminfo->avail[mc_idx].base)
 842                 *out_base = 0;
 843         else
 844                 *out_base -= meminfo->avail[mc_idx].base;
 845 
 846         if (*out_end > meminfo->avail[mc_idx].limit)
 847                 *out_end = meminfo->avail[mc_idx].limit;
 848         else
 849                 *out_end -= meminfo->avail[mc_idx].base;
 850 
 851         return 0;
 852 }
 853 
 854 /* Get TX and RX Payload region */
 855 static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
 856                                    const char *region_name,
 857                                    struct cudbg_region_info *payload)
 858 {
 859         struct cudbg_mem_desc mem_desc = { 0 };
 860         struct cudbg_meminfo meminfo;
 861         int rc;
 862 
 863         rc = cudbg_fill_meminfo(padap, &meminfo);
 864         if (rc)
 865                 return rc;
 866 
 867         rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
 868                                   &mem_desc);
 869         if (rc) {
 870                 payload->exist = false;
 871                 return 0;
 872         }
 873 
 874         payload->exist = true;
 875         payload->start = mem_desc.base;
 876         payload->end = mem_desc.limit;
 877 
 878         return cudbg_get_mem_relative(padap, &meminfo, mem_type,
 879                                       &payload->start, &payload->end);
 880 }
 881 
 882 static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
 883                              int mtype, u32 addr, u32 len, void *hbuf)
 884 {
 885         u32 win_pf, memoffset, mem_aperture, mem_base;
 886         struct adapter *adap = pdbg_init->adap;
 887         u32 pos, offset, resid;
 888         u32 *res_buf;
 889         u64 *buf;
 890         int ret;
 891 
 892         /* Argument sanity checks ...
 893          */
 894         if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
 895                 return -EINVAL;
 896 
 897         buf = (u64 *)hbuf;
 898 
 899         /* Try to do 64-bit reads.  Residual will be handled later. */
 900         resid = len & 0x7;
 901         len -= resid;
 902 
 903         ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
 904                                 &mem_aperture);
 905         if (ret)
 906                 return ret;
 907 
 908         addr = addr + memoffset;
 909         win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
 910 
 911         pos = addr & ~(mem_aperture - 1);
 912         offset = addr - pos;
 913 
 914         /* Set up initial PCI-E Memory Window to cover the start of our
 915          * transfer.
 916          */
 917         t4_memory_update_win(adap, win, pos | win_pf);
 918 
 919         /* Transfer data from the adapter */
 920         while (len > 0) {
 921                 *buf++ = le64_to_cpu((__force __le64)
 922                                      t4_read_reg64(adap, mem_base + offset));
 923                 offset += sizeof(u64);
 924                 len -= sizeof(u64);
 925 
 926                 /* If we've reached the end of our current window aperture,
 927                  * move the PCI-E Memory Window on to the next.
 928                  */
 929                 if (offset == mem_aperture) {
 930                         pos += mem_aperture;
 931                         offset = 0;
 932                         t4_memory_update_win(adap, win, pos | win_pf);
 933                 }
 934         }
 935 
 936         res_buf = (u32 *)buf;
 937         /* Read residual in 32-bit multiples */
 938         while (resid > sizeof(u32)) {
 939                 *res_buf++ = le32_to_cpu((__force __le32)
 940                                          t4_read_reg(adap, mem_base + offset));
 941                 offset += sizeof(u32);
 942                 resid -= sizeof(u32);
 943 
 944                 /* If we've reached the end of our current window aperture,
 945                  * move the PCI-E Memory Window on to the next.
 946                  */
 947                 if (offset == mem_aperture) {
 948                         pos += mem_aperture;
 949                         offset = 0;
 950                         t4_memory_update_win(adap, win, pos | win_pf);
 951                 }
 952         }
 953 
 954         /* Transfer residual < 32-bits */
 955         if (resid)
 956                 t4_memory_rw_residual(adap, resid, mem_base + offset,
 957                                       (u8 *)res_buf, T4_MEMORY_READ);
 958 
 959         return 0;
 960 }
 961 
 962 #define CUDBG_YIELD_ITERATION 256
 963 
 964 static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
 965                              struct cudbg_buffer *dbg_buff, u8 mem_type,
 966                              unsigned long tot_len,
 967                              struct cudbg_error *cudbg_err)
 968 {
 969         static const char * const region_name[] = { "Tx payload:",
 970                                                     "Rx payload:" };
 971         unsigned long bytes, bytes_left, bytes_read = 0;
 972         struct adapter *padap = pdbg_init->adap;
 973         struct cudbg_buffer temp_buff = { 0 };
 974         struct cudbg_region_info payload[2];
 975         u32 yield_count = 0;
 976         int rc = 0;
 977         u8 i;
 978 
 979         /* Get TX/RX Payload region range if they exist */
 980         memset(payload, 0, sizeof(payload));
 981         for (i = 0; i < ARRAY_SIZE(region_name); i++) {
 982                 rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
 983                                              &payload[i]);
 984                 if (rc)
 985                         return rc;
 986 
 987                 if (payload[i].exist) {
 988                         /* Align start and end to avoid wrap around */
 989                         payload[i].start = roundup(payload[i].start,
 990                                                    CUDBG_CHUNK_SIZE);
 991                         payload[i].end = rounddown(payload[i].end,
 992                                                    CUDBG_CHUNK_SIZE);
 993                 }
 994         }
 995 
 996         bytes_left = tot_len;
 997         while (bytes_left > 0) {
 998                 /* As MC size is huge and read through PIO access, this
 999                  * loop will hold cpu for a longer time. OS may think that
1000                  * the process is hanged and will generate CPU stall traces.
1001                  * So yield the cpu regularly.
1002                  */
1003                 yield_count++;
1004                 if (!(yield_count % CUDBG_YIELD_ITERATION))
1005                         schedule();
1006 
1007                 bytes = min_t(unsigned long, bytes_left,
1008                               (unsigned long)CUDBG_CHUNK_SIZE);
1009                 rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
1010                 if (rc)
1011                         return rc;
1012 
1013                 for (i = 0; i < ARRAY_SIZE(payload); i++)
1014                         if (payload[i].exist &&
1015                             bytes_read >= payload[i].start &&
1016                             bytes_read + bytes <= payload[i].end)
1017                                 /* TX and RX Payload regions can't overlap */
1018                                 goto skip_read;
1019 
1020                 spin_lock(&padap->win0_lock);
1021                 rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
1022                                        bytes_read, bytes, temp_buff.data);
1023                 spin_unlock(&padap->win0_lock);
1024                 if (rc) {
1025                         cudbg_err->sys_err = rc;
1026                         cudbg_put_buff(pdbg_init, &temp_buff);
1027                         return rc;
1028                 }
1029 
1030 skip_read:
1031                 bytes_left -= bytes;
1032                 bytes_read += bytes;
1033                 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
1034                                                   dbg_buff);
1035                 if (rc) {
1036                         cudbg_put_buff(pdbg_init, &temp_buff);
1037                         return rc;
1038                 }
1039         }
1040         return rc;
1041 }
1042 
1043 static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
1044                              struct cudbg_error *cudbg_err)
1045 {
1046         struct adapter *padap = pdbg_init->adap;
1047         int rc;
1048 
1049         if (is_fw_attached(pdbg_init)) {
1050                 /* Flush uP dcache before reading edcX/mcX  */
1051                 rc = t4_fwcache(padap, FW_PARAM_DEV_FWCACHE_FLUSH);
1052                 if (rc)
1053                         cudbg_err->sys_warn = rc;
1054         }
1055 }
1056 
1057 static int cudbg_mem_region_size(struct cudbg_init *pdbg_init,
1058                                  struct cudbg_error *cudbg_err,
1059                                  u8 mem_type, unsigned long *region_size)
1060 {
1061         struct adapter *padap = pdbg_init->adap;
1062         struct cudbg_meminfo mem_info;
1063         u8 mc_idx;
1064         int rc;
1065 
1066         memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
1067         rc = cudbg_fill_meminfo(padap, &mem_info);
1068         if (rc) {
1069                 cudbg_err->sys_err = rc;
1070                 return rc;
1071         }
1072 
1073         cudbg_t4_fwcache(pdbg_init, cudbg_err);
1074         rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
1075         if (rc) {
1076                 cudbg_err->sys_err = rc;
1077                 return rc;
1078         }
1079 
1080         if (region_size)
1081                 *region_size = mem_info.avail[mc_idx].limit -
1082                                mem_info.avail[mc_idx].base;
1083 
1084         return 0;
1085 }
1086 
1087 static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
1088                                     struct cudbg_buffer *dbg_buff,
1089                                     struct cudbg_error *cudbg_err,
1090                                     u8 mem_type)
1091 {
1092         unsigned long size = 0;
1093         int rc;
1094 
1095         rc = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type, &size);
1096         if (rc)
1097                 return rc;
1098 
1099         return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
1100                                  cudbg_err);
1101 }
1102 
1103 int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
1104                                struct cudbg_buffer *dbg_buff,
1105                                struct cudbg_error *cudbg_err)
1106 {
1107         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1108                                         MEM_EDC0);
1109 }
1110 
1111 int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
1112                                struct cudbg_buffer *dbg_buff,
1113                                struct cudbg_error *cudbg_err)
1114 {
1115         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1116                                         MEM_EDC1);
1117 }
1118 
1119 int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
1120                               struct cudbg_buffer *dbg_buff,
1121                               struct cudbg_error *cudbg_err)
1122 {
1123         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1124                                         MEM_MC0);
1125 }
1126 
1127 int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
1128                               struct cudbg_buffer *dbg_buff,
1129                               struct cudbg_error *cudbg_err)
1130 {
1131         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1132                                         MEM_MC1);
1133 }
1134 
1135 int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
1136                               struct cudbg_buffer *dbg_buff,
1137                               struct cudbg_error *cudbg_err)
1138 {
1139         return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
1140                                         MEM_HMA);
1141 }
1142 
1143 int cudbg_collect_rss(struct cudbg_init *pdbg_init,
1144                       struct cudbg_buffer *dbg_buff,
1145                       struct cudbg_error *cudbg_err)
1146 {
1147         struct adapter *padap = pdbg_init->adap;
1148         struct cudbg_buffer temp_buff = { 0 };
1149         int rc, nentries;
1150 
1151         nentries = t4_chip_rss_size(padap);
1152         rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
1153                             &temp_buff);
1154         if (rc)
1155                 return rc;
1156 
1157         rc = t4_read_rss(padap, (u16 *)temp_buff.data);
1158         if (rc) {
1159                 cudbg_err->sys_err = rc;
1160                 cudbg_put_buff(pdbg_init, &temp_buff);
1161                 return rc;
1162         }
1163         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1164 }
1165 
1166 int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
1167                                 struct cudbg_buffer *dbg_buff,
1168                                 struct cudbg_error *cudbg_err)
1169 {
1170         struct adapter *padap = pdbg_init->adap;
1171         struct cudbg_buffer temp_buff = { 0 };
1172         struct cudbg_rss_vf_conf *vfconf;
1173         int vf, rc, vf_count;
1174 
1175         vf_count = padap->params.arch.vfcount;
1176         rc = cudbg_get_buff(pdbg_init, dbg_buff,
1177                             vf_count * sizeof(struct cudbg_rss_vf_conf),
1178                             &temp_buff);
1179         if (rc)
1180                 return rc;
1181 
1182         vfconf = (struct cudbg_rss_vf_conf *)temp_buff.data;
1183         for (vf = 0; vf < vf_count; vf++)
1184                 t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
1185                                       &vfconf[vf].rss_vf_vfh, true);
1186         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1187 }
1188 
1189 int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
1190                            struct cudbg_buffer *dbg_buff,
1191                            struct cudbg_error *cudbg_err)
1192 {
1193         struct adapter *padap = pdbg_init->adap;
1194         struct cudbg_buffer temp_buff = { 0 };
1195         int rc;
1196 
1197         rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
1198                             &temp_buff);
1199         if (rc)
1200                 return rc;
1201 
1202         t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
1203         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1204 }
1205 
1206 int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
1207                            struct cudbg_buffer *dbg_buff,
1208                            struct cudbg_error *cudbg_err)
1209 {
1210         struct adapter *padap = pdbg_init->adap;
1211         struct cudbg_buffer temp_buff = { 0 };
1212         struct cudbg_pm_stats *pm_stats_buff;
1213         int rc;
1214 
1215         rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
1216                             &temp_buff);
1217         if (rc)
1218                 return rc;
1219 
1220         pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
1221         t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
1222         t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
1223         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1224 }
1225 
1226 int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
1227                            struct cudbg_buffer *dbg_buff,
1228                            struct cudbg_error *cudbg_err)
1229 {
1230         struct adapter *padap = pdbg_init->adap;
1231         struct cudbg_buffer temp_buff = { 0 };
1232         struct cudbg_hw_sched *hw_sched_buff;
1233         int i, rc = 0;
1234 
1235         if (!padap->params.vpd.cclk)
1236                 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1237 
1238         rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
1239                             &temp_buff);
1240 
1241         if (rc)
1242                 return rc;
1243 
1244         hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
1245         hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
1246         hw_sched_buff->mode = TIMERMODE_G(t4_read_reg(padap, TP_MOD_CONFIG_A));
1247         t4_read_pace_tbl(padap, hw_sched_buff->pace_tab);
1248         for (i = 0; i < NTX_SCHED; ++i)
1249                 t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
1250                                 &hw_sched_buff->ipg[i], true);
1251         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1252 }
1253 
1254 int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
1255                               struct cudbg_buffer *dbg_buff,
1256                               struct cudbg_error *cudbg_err)
1257 {
1258         struct adapter *padap = pdbg_init->adap;
1259         struct cudbg_buffer temp_buff = { 0 };
1260         struct ireg_buf *ch_tp_pio;
1261         int i, rc, n = 0;
1262         u32 size;
1263 
1264         if (is_t5(padap->params.chip))
1265                 n = sizeof(t5_tp_pio_array) +
1266                     sizeof(t5_tp_tm_pio_array) +
1267                     sizeof(t5_tp_mib_index_array);
1268         else
1269                 n = sizeof(t6_tp_pio_array) +
1270                     sizeof(t6_tp_tm_pio_array) +
1271                     sizeof(t6_tp_mib_index_array);
1272 
1273         n = n / (IREG_NUM_ELEM * sizeof(u32));
1274         size = sizeof(struct ireg_buf) * n;
1275         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1276         if (rc)
1277                 return rc;
1278 
1279         ch_tp_pio = (struct ireg_buf *)temp_buff.data;
1280 
1281         /* TP_PIO */
1282         if (is_t5(padap->params.chip))
1283                 n = sizeof(t5_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1284         else if (is_t6(padap->params.chip))
1285                 n = sizeof(t6_tp_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1286 
1287         for (i = 0; i < n; i++) {
1288                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1289                 u32 *buff = ch_tp_pio->outbuf;
1290 
1291                 if (is_t5(padap->params.chip)) {
1292                         tp_pio->ireg_addr = t5_tp_pio_array[i][0];
1293                         tp_pio->ireg_data = t5_tp_pio_array[i][1];
1294                         tp_pio->ireg_local_offset = t5_tp_pio_array[i][2];
1295                         tp_pio->ireg_offset_range = t5_tp_pio_array[i][3];
1296                 } else if (is_t6(padap->params.chip)) {
1297                         tp_pio->ireg_addr = t6_tp_pio_array[i][0];
1298                         tp_pio->ireg_data = t6_tp_pio_array[i][1];
1299                         tp_pio->ireg_local_offset = t6_tp_pio_array[i][2];
1300                         tp_pio->ireg_offset_range = t6_tp_pio_array[i][3];
1301                 }
1302                 t4_tp_pio_read(padap, buff, tp_pio->ireg_offset_range,
1303                                tp_pio->ireg_local_offset, true);
1304                 ch_tp_pio++;
1305         }
1306 
1307         /* TP_TM_PIO */
1308         if (is_t5(padap->params.chip))
1309                 n = sizeof(t5_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1310         else if (is_t6(padap->params.chip))
1311                 n = sizeof(t6_tp_tm_pio_array) / (IREG_NUM_ELEM * sizeof(u32));
1312 
1313         for (i = 0; i < n; i++) {
1314                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1315                 u32 *buff = ch_tp_pio->outbuf;
1316 
1317                 if (is_t5(padap->params.chip)) {
1318                         tp_pio->ireg_addr = t5_tp_tm_pio_array[i][0];
1319                         tp_pio->ireg_data = t5_tp_tm_pio_array[i][1];
1320                         tp_pio->ireg_local_offset = t5_tp_tm_pio_array[i][2];
1321                         tp_pio->ireg_offset_range = t5_tp_tm_pio_array[i][3];
1322                 } else if (is_t6(padap->params.chip)) {
1323                         tp_pio->ireg_addr = t6_tp_tm_pio_array[i][0];
1324                         tp_pio->ireg_data = t6_tp_tm_pio_array[i][1];
1325                         tp_pio->ireg_local_offset = t6_tp_tm_pio_array[i][2];
1326                         tp_pio->ireg_offset_range = t6_tp_tm_pio_array[i][3];
1327                 }
1328                 t4_tp_tm_pio_read(padap, buff, tp_pio->ireg_offset_range,
1329                                   tp_pio->ireg_local_offset, true);
1330                 ch_tp_pio++;
1331         }
1332 
1333         /* TP_MIB_INDEX */
1334         if (is_t5(padap->params.chip))
1335                 n = sizeof(t5_tp_mib_index_array) /
1336                     (IREG_NUM_ELEM * sizeof(u32));
1337         else if (is_t6(padap->params.chip))
1338                 n = sizeof(t6_tp_mib_index_array) /
1339                     (IREG_NUM_ELEM * sizeof(u32));
1340 
1341         for (i = 0; i < n ; i++) {
1342                 struct ireg_field *tp_pio = &ch_tp_pio->tp_pio;
1343                 u32 *buff = ch_tp_pio->outbuf;
1344 
1345                 if (is_t5(padap->params.chip)) {
1346                         tp_pio->ireg_addr = t5_tp_mib_index_array[i][0];
1347                         tp_pio->ireg_data = t5_tp_mib_index_array[i][1];
1348                         tp_pio->ireg_local_offset =
1349                                 t5_tp_mib_index_array[i][2];
1350                         tp_pio->ireg_offset_range =
1351                                 t5_tp_mib_index_array[i][3];
1352                 } else if (is_t6(padap->params.chip)) {
1353                         tp_pio->ireg_addr = t6_tp_mib_index_array[i][0];
1354                         tp_pio->ireg_data = t6_tp_mib_index_array[i][1];
1355                         tp_pio->ireg_local_offset =
1356                                 t6_tp_mib_index_array[i][2];
1357                         tp_pio->ireg_offset_range =
1358                                 t6_tp_mib_index_array[i][3];
1359                 }
1360                 t4_tp_mib_read(padap, buff, tp_pio->ireg_offset_range,
1361                                tp_pio->ireg_local_offset, true);
1362                 ch_tp_pio++;
1363         }
1364         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1365 }
1366 
1367 static void cudbg_read_sge_qbase_indirect_reg(struct adapter *padap,
1368                                               struct sge_qbase_reg_field *qbase,
1369                                               u32 func, bool is_pf)
1370 {
1371         u32 *buff, i;
1372 
1373         if (is_pf) {
1374                 buff = qbase->pf_data_value[func];
1375         } else {
1376                 buff = qbase->vf_data_value[func];
1377                 /* In SGE_QBASE_INDEX,
1378                  * Entries 0->7 are PF0->7, Entries 8->263 are VFID0->256.
1379                  */
1380                 func += 8;
1381         }
1382 
1383         t4_write_reg(padap, qbase->reg_addr, func);
1384         for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++, buff++)
1385                 *buff = t4_read_reg(padap, qbase->reg_data[i]);
1386 }
1387 
1388 int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
1389                                struct cudbg_buffer *dbg_buff,
1390                                struct cudbg_error *cudbg_err)
1391 {
1392         struct adapter *padap = pdbg_init->adap;
1393         struct cudbg_buffer temp_buff = { 0 };
1394         struct sge_qbase_reg_field *sge_qbase;
1395         struct ireg_buf *ch_sge_dbg;
1396         int i, rc;
1397 
1398         rc = cudbg_get_buff(pdbg_init, dbg_buff,
1399                             sizeof(*ch_sge_dbg) * 2 + sizeof(*sge_qbase),
1400                             &temp_buff);
1401         if (rc)
1402                 return rc;
1403 
1404         ch_sge_dbg = (struct ireg_buf *)temp_buff.data;
1405         for (i = 0; i < 2; i++) {
1406                 struct ireg_field *sge_pio = &ch_sge_dbg->tp_pio;
1407                 u32 *buff = ch_sge_dbg->outbuf;
1408 
1409                 sge_pio->ireg_addr = t5_sge_dbg_index_array[i][0];
1410                 sge_pio->ireg_data = t5_sge_dbg_index_array[i][1];
1411                 sge_pio->ireg_local_offset = t5_sge_dbg_index_array[i][2];
1412                 sge_pio->ireg_offset_range = t5_sge_dbg_index_array[i][3];
1413                 t4_read_indirect(padap,
1414                                  sge_pio->ireg_addr,
1415                                  sge_pio->ireg_data,
1416                                  buff,
1417                                  sge_pio->ireg_offset_range,
1418                                  sge_pio->ireg_local_offset);
1419                 ch_sge_dbg++;
1420         }
1421 
1422         if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
1423                 sge_qbase = (struct sge_qbase_reg_field *)ch_sge_dbg;
1424                 /* 1 addr reg SGE_QBASE_INDEX and 4 data reg
1425                  * SGE_QBASE_MAP[0-3]
1426                  */
1427                 sge_qbase->reg_addr = t6_sge_qbase_index_array[0];
1428                 for (i = 0; i < SGE_QBASE_DATA_REG_NUM; i++)
1429                         sge_qbase->reg_data[i] =
1430                                 t6_sge_qbase_index_array[i + 1];
1431 
1432                 for (i = 0; i <= PCIE_FW_MASTER_M; i++)
1433                         cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1434                                                           i, true);
1435 
1436                 for (i = 0; i < padap->params.arch.vfcount; i++)
1437                         cudbg_read_sge_qbase_indirect_reg(padap, sge_qbase,
1438                                                           i, false);
1439 
1440                 sge_qbase->vfcount = padap->params.arch.vfcount;
1441         }
1442 
1443         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1444 }
1445 
1446 int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
1447                            struct cudbg_buffer *dbg_buff,
1448                            struct cudbg_error *cudbg_err)
1449 {
1450         struct adapter *padap = pdbg_init->adap;
1451         struct cudbg_buffer temp_buff = { 0 };
1452         struct cudbg_ulprx_la *ulprx_la_buff;
1453         int rc;
1454 
1455         rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
1456                             &temp_buff);
1457         if (rc)
1458                 return rc;
1459 
1460         ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
1461         t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
1462         ulprx_la_buff->size = ULPRX_LA_SIZE;
1463         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1464 }
1465 
1466 int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
1467                         struct cudbg_buffer *dbg_buff,
1468                         struct cudbg_error *cudbg_err)
1469 {
1470         struct adapter *padap = pdbg_init->adap;
1471         struct cudbg_buffer temp_buff = { 0 };
1472         struct cudbg_tp_la *tp_la_buff;
1473         int size, rc;
1474 
1475         size = sizeof(struct cudbg_tp_la) + TPLA_SIZE *  sizeof(u64);
1476         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1477         if (rc)
1478                 return rc;
1479 
1480         tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
1481         tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
1482         t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
1483         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1484 }
1485 
1486 int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
1487                           struct cudbg_buffer *dbg_buff,
1488                           struct cudbg_error *cudbg_err)
1489 {
1490         struct adapter *padap = pdbg_init->adap;
1491         struct cudbg_buffer temp_buff = { 0 };
1492         struct cudbg_meminfo *meminfo_buff;
1493         struct cudbg_ver_hdr *ver_hdr;
1494         int rc;
1495 
1496         rc = cudbg_get_buff(pdbg_init, dbg_buff,
1497                             sizeof(struct cudbg_ver_hdr) +
1498                             sizeof(struct cudbg_meminfo),
1499                             &temp_buff);
1500         if (rc)
1501                 return rc;
1502 
1503         ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
1504         ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
1505         ver_hdr->revision = CUDBG_MEMINFO_REV;
1506         ver_hdr->size = sizeof(struct cudbg_meminfo);
1507 
1508         meminfo_buff = (struct cudbg_meminfo *)(temp_buff.data +
1509                                                 sizeof(*ver_hdr));
1510         rc = cudbg_fill_meminfo(padap, meminfo_buff);
1511         if (rc) {
1512                 cudbg_err->sys_err = rc;
1513                 cudbg_put_buff(pdbg_init, &temp_buff);
1514                 return rc;
1515         }
1516 
1517         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1518 }
1519 
1520 int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
1521                              struct cudbg_buffer *dbg_buff,
1522                              struct cudbg_error *cudbg_err)
1523 {
1524         struct cudbg_cim_pif_la *cim_pif_la_buff;
1525         struct adapter *padap = pdbg_init->adap;
1526         struct cudbg_buffer temp_buff = { 0 };
1527         int size, rc;
1528 
1529         size = sizeof(struct cudbg_cim_pif_la) +
1530                2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
1531         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1532         if (rc)
1533                 return rc;
1534 
1535         cim_pif_la_buff = (struct cudbg_cim_pif_la *)temp_buff.data;
1536         cim_pif_la_buff->size = CIM_PIFLA_SIZE;
1537         t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
1538                            (u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
1539                            NULL, NULL);
1540         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1541 }
1542 
1543 int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
1544                            struct cudbg_buffer *dbg_buff,
1545                            struct cudbg_error *cudbg_err)
1546 {
1547         struct adapter *padap = pdbg_init->adap;
1548         struct cudbg_buffer temp_buff = { 0 };
1549         struct cudbg_clk_info *clk_info_buff;
1550         u64 tp_tick_us;
1551         int rc;
1552 
1553         if (!padap->params.vpd.cclk)
1554                 return CUDBG_STATUS_CCLK_NOT_DEFINED;
1555 
1556         rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
1557                             &temp_buff);
1558         if (rc)
1559                 return rc;
1560 
1561         clk_info_buff = (struct cudbg_clk_info *)temp_buff.data;
1562         clk_info_buff->cclk_ps = 1000000000 / padap->params.vpd.cclk; /* psec */
1563         clk_info_buff->res = t4_read_reg(padap, TP_TIMER_RESOLUTION_A);
1564         clk_info_buff->tre = TIMERRESOLUTION_G(clk_info_buff->res);
1565         clk_info_buff->dack_re = DELAYEDACKRESOLUTION_G(clk_info_buff->res);
1566         tp_tick_us = (clk_info_buff->cclk_ps << clk_info_buff->tre) / 1000000;
1567 
1568         clk_info_buff->dack_timer =
1569                 (clk_info_buff->cclk_ps << clk_info_buff->dack_re) / 1000000 *
1570                 t4_read_reg(padap, TP_DACK_TIMER_A);
1571         clk_info_buff->retransmit_min =
1572                 tp_tick_us * t4_read_reg(padap, TP_RXT_MIN_A);
1573         clk_info_buff->retransmit_max =
1574                 tp_tick_us * t4_read_reg(padap, TP_RXT_MAX_A);
1575         clk_info_buff->persist_timer_min =
1576                 tp_tick_us * t4_read_reg(padap, TP_PERS_MIN_A);
1577         clk_info_buff->persist_timer_max =
1578                 tp_tick_us * t4_read_reg(padap, TP_PERS_MAX_A);
1579         clk_info_buff->keepalive_idle_timer =
1580                 tp_tick_us * t4_read_reg(padap, TP_KEEP_IDLE_A);
1581         clk_info_buff->keepalive_interval =
1582                 tp_tick_us * t4_read_reg(padap, TP_KEEP_INTVL_A);
1583         clk_info_buff->initial_srtt =
1584                 tp_tick_us * INITSRTT_G(t4_read_reg(padap, TP_INIT_SRTT_A));
1585         clk_info_buff->finwait2_timer =
1586                 tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
1587 
1588         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1589 }
1590 
1591 int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
1592                                 struct cudbg_buffer *dbg_buff,
1593                                 struct cudbg_error *cudbg_err)
1594 {
1595         struct adapter *padap = pdbg_init->adap;
1596         struct cudbg_buffer temp_buff = { 0 };
1597         struct ireg_buf *ch_pcie;
1598         int i, rc, n;
1599         u32 size;
1600 
1601         n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1602         size = sizeof(struct ireg_buf) * n * 2;
1603         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1604         if (rc)
1605                 return rc;
1606 
1607         ch_pcie = (struct ireg_buf *)temp_buff.data;
1608         /* PCIE_PDBG */
1609         for (i = 0; i < n; i++) {
1610                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1611                 u32 *buff = ch_pcie->outbuf;
1612 
1613                 pcie_pio->ireg_addr = t5_pcie_pdbg_array[i][0];
1614                 pcie_pio->ireg_data = t5_pcie_pdbg_array[i][1];
1615                 pcie_pio->ireg_local_offset = t5_pcie_pdbg_array[i][2];
1616                 pcie_pio->ireg_offset_range = t5_pcie_pdbg_array[i][3];
1617                 t4_read_indirect(padap,
1618                                  pcie_pio->ireg_addr,
1619                                  pcie_pio->ireg_data,
1620                                  buff,
1621                                  pcie_pio->ireg_offset_range,
1622                                  pcie_pio->ireg_local_offset);
1623                 ch_pcie++;
1624         }
1625 
1626         /* PCIE_CDBG */
1627         n = sizeof(t5_pcie_cdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
1628         for (i = 0; i < n; i++) {
1629                 struct ireg_field *pcie_pio = &ch_pcie->tp_pio;
1630                 u32 *buff = ch_pcie->outbuf;
1631 
1632                 pcie_pio->ireg_addr = t5_pcie_cdbg_array[i][0];
1633                 pcie_pio->ireg_data = t5_pcie_cdbg_array[i][1];
1634                 pcie_pio->ireg_local_offset = t5_pcie_cdbg_array[i][2];
1635                 pcie_pio->ireg_offset_range = t5_pcie_cdbg_array[i][3];
1636                 t4_read_indirect(padap,
1637                                  pcie_pio->ireg_addr,
1638                                  pcie_pio->ireg_data,
1639                                  buff,
1640                                  pcie_pio->ireg_offset_range,
1641                                  pcie_pio->ireg_local_offset);
1642                 ch_pcie++;
1643         }
1644         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1645 }
1646 
1647 int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
1648                               struct cudbg_buffer *dbg_buff,
1649                               struct cudbg_error *cudbg_err)
1650 {
1651         struct adapter *padap = pdbg_init->adap;
1652         struct cudbg_buffer temp_buff = { 0 };
1653         struct ireg_buf *ch_pm;
1654         int i, rc, n;
1655         u32 size;
1656 
1657         n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
1658         size = sizeof(struct ireg_buf) * n * 2;
1659         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1660         if (rc)
1661                 return rc;
1662 
1663         ch_pm = (struct ireg_buf *)temp_buff.data;
1664         /* PM_RX */
1665         for (i = 0; i < n; i++) {
1666                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
1667                 u32 *buff = ch_pm->outbuf;
1668 
1669                 pm_pio->ireg_addr = t5_pm_rx_array[i][0];
1670                 pm_pio->ireg_data = t5_pm_rx_array[i][1];
1671                 pm_pio->ireg_local_offset = t5_pm_rx_array[i][2];
1672                 pm_pio->ireg_offset_range = t5_pm_rx_array[i][3];
1673                 t4_read_indirect(padap,
1674                                  pm_pio->ireg_addr,
1675                                  pm_pio->ireg_data,
1676                                  buff,
1677                                  pm_pio->ireg_offset_range,
1678                                  pm_pio->ireg_local_offset);
1679                 ch_pm++;
1680         }
1681 
1682         /* PM_TX */
1683         n = sizeof(t5_pm_tx_array) / (IREG_NUM_ELEM * sizeof(u32));
1684         for (i = 0; i < n; i++) {
1685                 struct ireg_field *pm_pio = &ch_pm->tp_pio;
1686                 u32 *buff = ch_pm->outbuf;
1687 
1688                 pm_pio->ireg_addr = t5_pm_tx_array[i][0];
1689                 pm_pio->ireg_data = t5_pm_tx_array[i][1];
1690                 pm_pio->ireg_local_offset = t5_pm_tx_array[i][2];
1691                 pm_pio->ireg_offset_range = t5_pm_tx_array[i][3];
1692                 t4_read_indirect(padap,
1693                                  pm_pio->ireg_addr,
1694                                  pm_pio->ireg_data,
1695                                  buff,
1696                                  pm_pio->ireg_offset_range,
1697                                  pm_pio->ireg_local_offset);
1698                 ch_pm++;
1699         }
1700         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1701 }
1702 
1703 int cudbg_collect_tid(struct cudbg_init *pdbg_init,
1704                       struct cudbg_buffer *dbg_buff,
1705                       struct cudbg_error *cudbg_err)
1706 {
1707         struct adapter *padap = pdbg_init->adap;
1708         struct cudbg_tid_info_region_rev1 *tid1;
1709         struct cudbg_buffer temp_buff = { 0 };
1710         struct cudbg_tid_info_region *tid;
1711         u32 para[2], val[2];
1712         int rc;
1713 
1714         rc = cudbg_get_buff(pdbg_init, dbg_buff,
1715                             sizeof(struct cudbg_tid_info_region_rev1),
1716                             &temp_buff);
1717         if (rc)
1718                 return rc;
1719 
1720         tid1 = (struct cudbg_tid_info_region_rev1 *)temp_buff.data;
1721         tid = &tid1->tid;
1722         tid1->ver_hdr.signature = CUDBG_ENTITY_SIGNATURE;
1723         tid1->ver_hdr.revision = CUDBG_TID_INFO_REV;
1724         tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
1725                              sizeof(struct cudbg_ver_hdr);
1726 
1727         /* If firmware is not attached/alive, use backdoor register
1728          * access to collect dump.
1729          */
1730         if (!is_fw_attached(pdbg_init))
1731                 goto fill_tid;
1732 
1733 #define FW_PARAM_PFVF_A(param) \
1734         (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
1735          FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
1736          FW_PARAMS_PARAM_Y_V(0) | \
1737          FW_PARAMS_PARAM_Z_V(0))
1738 
1739         para[0] = FW_PARAM_PFVF_A(ETHOFLD_START);
1740         para[1] = FW_PARAM_PFVF_A(ETHOFLD_END);
1741         rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
1742         if (rc <  0) {
1743                 cudbg_err->sys_err = rc;
1744                 cudbg_put_buff(pdbg_init, &temp_buff);
1745                 return rc;
1746         }
1747         tid->uotid_base = val[0];
1748         tid->nuotids = val[1] - val[0] + 1;
1749 
1750         if (is_t5(padap->params.chip)) {
1751                 tid->sb = t4_read_reg(padap, LE_DB_SERVER_INDEX_A) / 4;
1752         } else if (is_t6(padap->params.chip)) {
1753                 tid1->tid_start =
1754                         t4_read_reg(padap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
1755                 tid->sb = t4_read_reg(padap, LE_DB_SRVR_START_INDEX_A);
1756 
1757                 para[0] = FW_PARAM_PFVF_A(HPFILTER_START);
1758                 para[1] = FW_PARAM_PFVF_A(HPFILTER_END);
1759                 rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2,
1760                                      para, val);
1761                 if (rc < 0) {
1762                         cudbg_err->sys_err = rc;
1763                         cudbg_put_buff(pdbg_init, &temp_buff);
1764                         return rc;
1765                 }
1766                 tid->hpftid_base = val[0];
1767                 tid->nhpftids = val[1] - val[0] + 1;
1768         }
1769 
1770 #undef FW_PARAM_PFVF_A
1771 
1772 fill_tid:
1773         tid->ntids = padap->tids.ntids;
1774         tid->nstids = padap->tids.nstids;
1775         tid->stid_base = padap->tids.stid_base;
1776         tid->hash_base = padap->tids.hash_base;
1777 
1778         tid->natids = padap->tids.natids;
1779         tid->nftids = padap->tids.nftids;
1780         tid->ftid_base = padap->tids.ftid_base;
1781         tid->aftid_base = padap->tids.aftid_base;
1782         tid->aftid_end = padap->tids.aftid_end;
1783 
1784         tid->sftid_base = padap->tids.sftid_base;
1785         tid->nsftids = padap->tids.nsftids;
1786 
1787         tid->flags = padap->flags;
1788         tid->le_db_conf = t4_read_reg(padap, LE_DB_CONFIG_A);
1789         tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
1790         tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
1791 
1792         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1793 }
1794 
1795 int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
1796                               struct cudbg_buffer *dbg_buff,
1797                               struct cudbg_error *cudbg_err)
1798 {
1799         struct adapter *padap = pdbg_init->adap;
1800         struct cudbg_buffer temp_buff = { 0 };
1801         u32 size, *value, j;
1802         int i, rc, n;
1803 
1804         size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
1805         n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
1806         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1807         if (rc)
1808                 return rc;
1809 
1810         value = (u32 *)temp_buff.data;
1811         for (i = 0; i < n; i++) {
1812                 for (j = t5_pcie_config_array[i][0];
1813                      j <= t5_pcie_config_array[i][1]; j += 4) {
1814                         t4_hw_pci_read_cfg4(padap, j, value);
1815                         value++;
1816                 }
1817         }
1818         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
1819 }
1820 
1821 static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
1822 {
1823         int index, bit, bit_pos = 0;
1824 
1825         switch (type) {
1826         case CTXT_EGRESS:
1827                 bit_pos = 176;
1828                 break;
1829         case CTXT_INGRESS:
1830                 bit_pos = 141;
1831                 break;
1832         case CTXT_FLM:
1833                 bit_pos = 89;
1834                 break;
1835         }
1836         index = bit_pos / 32;
1837         bit =  bit_pos % 32;
1838         return buf[index] & (1U << bit);
1839 }
1840 
1841 static int cudbg_get_ctxt_region_info(struct adapter *padap,
1842                                       struct cudbg_region_info *ctx_info,
1843                                       u8 *mem_type)
1844 {
1845         struct cudbg_mem_desc mem_desc;
1846         struct cudbg_meminfo meminfo;
1847         u32 i, j, value, found;
1848         u8 flq;
1849         int rc;
1850 
1851         rc = cudbg_fill_meminfo(padap, &meminfo);
1852         if (rc)
1853                 return rc;
1854 
1855         /* Get EGRESS and INGRESS context region size */
1856         for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
1857                 found = 0;
1858                 memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
1859                 for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
1860                         rc = cudbg_get_mem_region(padap, &meminfo, j,
1861                                                   cudbg_region[i],
1862                                                   &mem_desc);
1863                         if (!rc) {
1864                                 found = 1;
1865                                 rc = cudbg_get_mem_relative(padap, &meminfo, j,
1866                                                             &mem_desc.base,
1867                                                             &mem_desc.limit);
1868                                 if (rc) {
1869                                         ctx_info[i].exist = false;
1870                                         break;
1871                                 }
1872                                 ctx_info[i].exist = true;
1873                                 ctx_info[i].start = mem_desc.base;
1874                                 ctx_info[i].end = mem_desc.limit;
1875                                 mem_type[i] = j;
1876                                 break;
1877                         }
1878                 }
1879                 if (!found)
1880                         ctx_info[i].exist = false;
1881         }
1882 
1883         /* Get FLM and CNM max qid. */
1884         value = t4_read_reg(padap, SGE_FLM_CFG_A);
1885 
1886         /* Get number of data freelist queues */
1887         flq = HDRSTARTFLQ_G(value);
1888         ctx_info[CTXT_FLM].exist = true;
1889         ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
1890 
1891         /* The number of CONM contexts are same as number of freelist
1892          * queues.
1893          */
1894         ctx_info[CTXT_CNM].exist = true;
1895         ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
1896 
1897         return 0;
1898 }
1899 
1900 int cudbg_dump_context_size(struct adapter *padap)
1901 {
1902         struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1903         u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1904         u32 i, size = 0;
1905         int rc;
1906 
1907         /* Get max valid qid for each type of queue */
1908         rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1909         if (rc)
1910                 return rc;
1911 
1912         for (i = 0; i < CTXT_CNM; i++) {
1913                 if (!region_info[i].exist) {
1914                         if (i == CTXT_EGRESS || i == CTXT_INGRESS)
1915                                 size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
1916                                         SGE_CTXT_SIZE;
1917                         continue;
1918                 }
1919 
1920                 size += (region_info[i].end - region_info[i].start + 1) /
1921                         SGE_CTXT_SIZE;
1922         }
1923         return size * sizeof(struct cudbg_ch_cntxt);
1924 }
1925 
1926 static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
1927                                 enum ctxt_type ctype, u32 *data)
1928 {
1929         struct adapter *padap = pdbg_init->adap;
1930         int rc = -1;
1931 
1932         /* Under heavy traffic, the SGE Queue contexts registers will be
1933          * frequently accessed by firmware.
1934          *
1935          * To avoid conflicts with firmware, always ask firmware to fetch
1936          * the SGE Queue contexts via mailbox. On failure, fallback to
1937          * accessing hardware registers directly.
1938          */
1939         if (is_fw_attached(pdbg_init))
1940                 rc = t4_sge_ctxt_rd(padap, padap->mbox, cid, ctype, data);
1941         if (rc)
1942                 t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
1943 }
1944 
1945 static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
1946                                   u8 ctxt_type,
1947                                   struct cudbg_ch_cntxt **out_buff)
1948 {
1949         struct cudbg_ch_cntxt *buff = *out_buff;
1950         int rc;
1951         u32 j;
1952 
1953         for (j = 0; j < max_qid; j++) {
1954                 cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
1955                 rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
1956                 if (!rc)
1957                         continue;
1958 
1959                 buff->cntxt_type = ctxt_type;
1960                 buff->cntxt_id = j;
1961                 buff++;
1962                 if (ctxt_type == CTXT_FLM) {
1963                         cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
1964                         buff->cntxt_type = CTXT_CNM;
1965                         buff->cntxt_id = j;
1966                         buff++;
1967                 }
1968         }
1969 
1970         *out_buff = buff;
1971 }
1972 
1973 int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
1974                                struct cudbg_buffer *dbg_buff,
1975                                struct cudbg_error *cudbg_err)
1976 {
1977         struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
1978         struct adapter *padap = pdbg_init->adap;
1979         u32 j, size, max_ctx_size, max_ctx_qid;
1980         u8 mem_type[CTXT_INGRESS + 1] = { 0 };
1981         struct cudbg_buffer temp_buff = { 0 };
1982         struct cudbg_ch_cntxt *buff;
1983         u64 *dst_off, *src_off;
1984         u8 *ctx_buf;
1985         u8 i, k;
1986         int rc;
1987 
1988         /* Get max valid qid for each type of queue */
1989         rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
1990         if (rc)
1991                 return rc;
1992 
1993         rc = cudbg_dump_context_size(padap);
1994         if (rc <= 0)
1995                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
1996 
1997         size = rc;
1998         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
1999         if (rc)
2000                 return rc;
2001 
2002         /* Get buffer with enough space to read the biggest context
2003          * region in memory.
2004          */
2005         max_ctx_size = max(region_info[CTXT_EGRESS].end -
2006                            region_info[CTXT_EGRESS].start + 1,
2007                            region_info[CTXT_INGRESS].end -
2008                            region_info[CTXT_INGRESS].start + 1);
2009 
2010         ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
2011         if (!ctx_buf) {
2012                 cudbg_put_buff(pdbg_init, &temp_buff);
2013                 return -ENOMEM;
2014         }
2015 
2016         buff = (struct cudbg_ch_cntxt *)temp_buff.data;
2017 
2018         /* Collect EGRESS and INGRESS context data.
2019          * In case of failures, fallback to collecting via FW or
2020          * backdoor access.
2021          */
2022         for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
2023                 if (!region_info[i].exist) {
2024                         max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2025                         cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2026                                               &buff);
2027                         continue;
2028                 }
2029 
2030                 max_ctx_size = region_info[i].end - region_info[i].start + 1;
2031                 max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2032 
2033                 /* If firmware is not attached/alive, use backdoor register
2034                  * access to collect dump.
2035                  */
2036                 if (is_fw_attached(pdbg_init)) {
2037                         t4_sge_ctxt_flush(padap, padap->mbox, i);
2038 
2039                         rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
2040                                           region_info[i].start, max_ctx_size,
2041                                           (__be32 *)ctx_buf, 1);
2042                 }
2043 
2044                 if (rc || !is_fw_attached(pdbg_init)) {
2045                         max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
2046                         cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
2047                                               &buff);
2048                         continue;
2049                 }
2050 
2051                 for (j = 0; j < max_ctx_qid; j++) {
2052                         src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
2053                         dst_off = (u64 *)buff->data;
2054 
2055                         /* The data is stored in 64-bit cpu order.  Convert it
2056                          * to big endian before parsing.
2057                          */
2058                         for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
2059                                 dst_off[k] = cpu_to_be64(src_off[k]);
2060 
2061                         rc = cudbg_sge_ctxt_check_valid(buff->data, i);
2062                         if (!rc)
2063                                 continue;
2064 
2065                         buff->cntxt_type = i;
2066                         buff->cntxt_id = j;
2067                         buff++;
2068                 }
2069         }
2070 
2071         kvfree(ctx_buf);
2072 
2073         /* Collect FREELIST and CONGESTION MANAGER contexts */
2074         max_ctx_size = region_info[CTXT_FLM].end -
2075                        region_info[CTXT_FLM].start + 1;
2076         max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
2077         /* Since FLM and CONM are 1-to-1 mapped, the below function
2078          * will fetch both FLM and CONM contexts.
2079          */
2080         cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
2081 
2082         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2083 }
2084 
2085 static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
2086 {
2087         *mask = x | y;
2088         y = (__force u64)cpu_to_be64(y);
2089         memcpy(addr, (char *)&y + 2, ETH_ALEN);
2090 }
2091 
2092 static void cudbg_mps_rpl_backdoor(struct adapter *padap,
2093                                    struct fw_ldst_mps_rplc *mps_rplc)
2094 {
2095         if (is_t5(padap->params.chip)) {
2096                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2097                                                           MPS_VF_RPLCT_MAP3_A));
2098                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2099                                                           MPS_VF_RPLCT_MAP2_A));
2100                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2101                                                           MPS_VF_RPLCT_MAP1_A));
2102                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2103                                                           MPS_VF_RPLCT_MAP0_A));
2104         } else {
2105                 mps_rplc->rplc255_224 = htonl(t4_read_reg(padap,
2106                                                           MPS_VF_RPLCT_MAP7_A));
2107                 mps_rplc->rplc223_192 = htonl(t4_read_reg(padap,
2108                                                           MPS_VF_RPLCT_MAP6_A));
2109                 mps_rplc->rplc191_160 = htonl(t4_read_reg(padap,
2110                                                           MPS_VF_RPLCT_MAP5_A));
2111                 mps_rplc->rplc159_128 = htonl(t4_read_reg(padap,
2112                                                           MPS_VF_RPLCT_MAP4_A));
2113         }
2114         mps_rplc->rplc127_96 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP3_A));
2115         mps_rplc->rplc95_64 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP2_A));
2116         mps_rplc->rplc63_32 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP1_A));
2117         mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
2118 }
2119 
2120 static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
2121                                     struct cudbg_mps_tcam *tcam, u32 idx)
2122 {
2123         struct adapter *padap = pdbg_init->adap;
2124         u64 tcamy, tcamx, val;
2125         u32 ctl, data2;
2126         int rc = 0;
2127 
2128         if (CHELSIO_CHIP_VERSION(padap->params.chip) >= CHELSIO_T6) {
2129                 /* CtlReqID   - 1: use Host Driver Requester ID
2130                  * CtlCmdType - 0: Read, 1: Write
2131                  * CtlTcamSel - 0: TCAM0, 1: TCAM1
2132                  * CtlXYBitSel- 0: Y bit, 1: X bit
2133                  */
2134 
2135                 /* Read tcamy */
2136                 ctl = CTLREQID_V(1) | CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
2137                 if (idx < 256)
2138                         ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
2139                 else
2140                         ctl |= CTLTCAMINDEX_V(idx - 256) | CTLTCAMSEL_V(1);
2141 
2142                 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2143                 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2144                 tcamy = DMACH_G(val) << 32;
2145                 tcamy |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2146                 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2147                 tcam->lookup_type = DATALKPTYPE_G(data2);
2148 
2149                 /* 0 - Outer header, 1 - Inner header
2150                  * [71:48] bit locations are overloaded for
2151                  * outer vs. inner lookup types.
2152                  */
2153                 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2154                         /* Inner header VNI */
2155                         tcam->vniy = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2156                         tcam->vniy = (tcam->vniy << 16) | VIDL_G(val);
2157                         tcam->dip_hit = data2 & DATADIPHIT_F;
2158                 } else {
2159                         tcam->vlan_vld = data2 & DATAVIDH2_F;
2160                         tcam->ivlan = VIDL_G(val);
2161                 }
2162 
2163                 tcam->port_num = DATAPORTNUM_G(data2);
2164 
2165                 /* Read tcamx. Change the control param */
2166                 ctl |= CTLXYBITSEL_V(1);
2167                 t4_write_reg(padap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
2168                 val = t4_read_reg(padap, MPS_CLS_TCAM_RDATA1_REQ_ID1_A);
2169                 tcamx = DMACH_G(val) << 32;
2170                 tcamx |= t4_read_reg(padap, MPS_CLS_TCAM_RDATA0_REQ_ID1_A);
2171                 data2 = t4_read_reg(padap, MPS_CLS_TCAM_RDATA2_REQ_ID1_A);
2172                 if (tcam->lookup_type && tcam->lookup_type != DATALKPTYPE_M) {
2173                         /* Inner header VNI mask */
2174                         tcam->vnix = (data2 & DATAVIDH2_F) | DATAVIDH1_G(data2);
2175                         tcam->vnix = (tcam->vnix << 16) | VIDL_G(val);
2176                 }
2177         } else {
2178                 tcamy = t4_read_reg64(padap, MPS_CLS_TCAM_Y_L(idx));
2179                 tcamx = t4_read_reg64(padap, MPS_CLS_TCAM_X_L(idx));
2180         }
2181 
2182         /* If no entry, return */
2183         if (tcamx & tcamy)
2184                 return rc;
2185 
2186         tcam->cls_lo = t4_read_reg(padap, MPS_CLS_SRAM_L(idx));
2187         tcam->cls_hi = t4_read_reg(padap, MPS_CLS_SRAM_H(idx));
2188 
2189         if (is_t5(padap->params.chip))
2190                 tcam->repli = (tcam->cls_lo & REPLICATE_F);
2191         else if (is_t6(padap->params.chip))
2192                 tcam->repli = (tcam->cls_lo & T6_REPLICATE_F);
2193 
2194         if (tcam->repli) {
2195                 struct fw_ldst_cmd ldst_cmd;
2196                 struct fw_ldst_mps_rplc mps_rplc;
2197 
2198                 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
2199                 ldst_cmd.op_to_addrspace =
2200                         htonl(FW_CMD_OP_V(FW_LDST_CMD) |
2201                               FW_CMD_REQUEST_F | FW_CMD_READ_F |
2202                               FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS));
2203                 ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
2204                 ldst_cmd.u.mps.rplc.fid_idx =
2205                         htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
2206                               FW_LDST_CMD_IDX_V(idx));
2207 
2208                 /* If firmware is not attached/alive, use backdoor register
2209                  * access to collect dump.
2210                  */
2211                 if (is_fw_attached(pdbg_init))
2212                         rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
2213                                         sizeof(ldst_cmd), &ldst_cmd);
2214 
2215                 if (rc || !is_fw_attached(pdbg_init)) {
2216                         cudbg_mps_rpl_backdoor(padap, &mps_rplc);
2217                         /* Ignore error since we collected directly from
2218                          * reading registers.
2219                          */
2220                         rc = 0;
2221                 } else {
2222                         mps_rplc = ldst_cmd.u.mps.rplc;
2223                 }
2224 
2225                 tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
2226                 tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
2227                 tcam->rplc[2] = ntohl(mps_rplc.rplc95_64);
2228                 tcam->rplc[3] = ntohl(mps_rplc.rplc127_96);
2229                 if (padap->params.arch.mps_rplc_size > CUDBG_MAX_RPLC_SIZE) {
2230                         tcam->rplc[4] = ntohl(mps_rplc.rplc159_128);
2231                         tcam->rplc[5] = ntohl(mps_rplc.rplc191_160);
2232                         tcam->rplc[6] = ntohl(mps_rplc.rplc223_192);
2233                         tcam->rplc[7] = ntohl(mps_rplc.rplc255_224);
2234                 }
2235         }
2236         cudbg_tcamxy2valmask(tcamx, tcamy, tcam->addr, &tcam->mask);
2237         tcam->idx = idx;
2238         tcam->rplc_size = padap->params.arch.mps_rplc_size;
2239         return rc;
2240 }
2241 
2242 int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
2243                            struct cudbg_buffer *dbg_buff,
2244                            struct cudbg_error *cudbg_err)
2245 {
2246         struct adapter *padap = pdbg_init->adap;
2247         struct cudbg_buffer temp_buff = { 0 };
2248         u32 size = 0, i, n, total_size = 0;
2249         struct cudbg_mps_tcam *tcam;
2250         int rc;
2251 
2252         n = padap->params.arch.mps_tcam_size;
2253         size = sizeof(struct cudbg_mps_tcam) * n;
2254         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2255         if (rc)
2256                 return rc;
2257 
2258         tcam = (struct cudbg_mps_tcam *)temp_buff.data;
2259         for (i = 0; i < n; i++) {
2260                 rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
2261                 if (rc) {
2262                         cudbg_err->sys_err = rc;
2263                         cudbg_put_buff(pdbg_init, &temp_buff);
2264                         return rc;
2265                 }
2266                 total_size += sizeof(struct cudbg_mps_tcam);
2267                 tcam++;
2268         }
2269 
2270         if (!total_size) {
2271                 rc = CUDBG_SYSTEM_ERROR;
2272                 cudbg_err->sys_err = rc;
2273                 cudbg_put_buff(pdbg_init, &temp_buff);
2274                 return rc;
2275         }
2276         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2277 }
2278 
2279 int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
2280                            struct cudbg_buffer *dbg_buff,
2281                            struct cudbg_error *cudbg_err)
2282 {
2283         struct adapter *padap = pdbg_init->adap;
2284         struct cudbg_buffer temp_buff = { 0 };
2285         char vpd_str[CUDBG_VPD_VER_LEN + 1];
2286         u32 scfg_vers, vpd_vers, fw_vers;
2287         struct cudbg_vpd_data *vpd_data;
2288         struct vpd_params vpd = { 0 };
2289         int rc, ret;
2290 
2291         rc = t4_get_raw_vpd_params(padap, &vpd);
2292         if (rc)
2293                 return rc;
2294 
2295         rc = t4_get_fw_version(padap, &fw_vers);
2296         if (rc)
2297                 return rc;
2298 
2299         /* Serial Configuration Version is located beyond the PF's vpd size.
2300          * Temporarily give access to entire EEPROM to get it.
2301          */
2302         rc = pci_set_vpd_size(padap->pdev, EEPROMVSIZE);
2303         if (rc < 0)
2304                 return rc;
2305 
2306         ret = cudbg_read_vpd_reg(padap, CUDBG_SCFG_VER_ADDR, CUDBG_SCFG_VER_LEN,
2307                                  &scfg_vers);
2308 
2309         /* Restore back to original PF's vpd size */
2310         rc = pci_set_vpd_size(padap->pdev, CUDBG_VPD_PF_SIZE);
2311         if (rc < 0)
2312                 return rc;
2313 
2314         if (ret)
2315                 return ret;
2316 
2317         rc = cudbg_read_vpd_reg(padap, CUDBG_VPD_VER_ADDR, CUDBG_VPD_VER_LEN,
2318                                 vpd_str);
2319         if (rc)
2320                 return rc;
2321 
2322         vpd_str[CUDBG_VPD_VER_LEN] = '\0';
2323         rc = kstrtouint(vpd_str, 0, &vpd_vers);
2324         if (rc)
2325                 return rc;
2326 
2327         rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
2328                             &temp_buff);
2329         if (rc)
2330                 return rc;
2331 
2332         vpd_data = (struct cudbg_vpd_data *)temp_buff.data;
2333         memcpy(vpd_data->sn, vpd.sn, SERNUM_LEN + 1);
2334         memcpy(vpd_data->bn, vpd.pn, PN_LEN + 1);
2335         memcpy(vpd_data->na, vpd.na, MACADDR_LEN + 1);
2336         memcpy(vpd_data->mn, vpd.id, ID_LEN + 1);
2337         vpd_data->scfg_vers = scfg_vers;
2338         vpd_data->vpd_vers = vpd_vers;
2339         vpd_data->fw_major = FW_HDR_FW_VER_MAJOR_G(fw_vers);
2340         vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
2341         vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
2342         vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
2343         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2344 }
2345 
2346 static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
2347                           struct cudbg_tid_data *tid_data)
2348 {
2349         struct adapter *padap = pdbg_init->adap;
2350         int i, cmd_retry = 8;
2351         u32 val;
2352 
2353         /* Fill REQ_DATA regs with 0's */
2354         for (i = 0; i < NUM_LE_DB_DBGI_REQ_DATA_INSTANCES; i++)
2355                 t4_write_reg(padap, LE_DB_DBGI_REQ_DATA_A + (i << 2), 0);
2356 
2357         /* Write DBIG command */
2358         val = DBGICMD_V(4) | DBGITID_V(tid);
2359         t4_write_reg(padap, LE_DB_DBGI_REQ_TCAM_CMD_A, val);
2360         tid_data->dbig_cmd = val;
2361 
2362         val = DBGICMDSTRT_F | DBGICMDMODE_V(1); /* LE mode */
2363         t4_write_reg(padap, LE_DB_DBGI_CONFIG_A, val);
2364         tid_data->dbig_conf = val;
2365 
2366         /* Poll the DBGICMDBUSY bit */
2367         val = 1;
2368         while (val) {
2369                 val = t4_read_reg(padap, LE_DB_DBGI_CONFIG_A);
2370                 val = val & DBGICMDBUSY_F;
2371                 cmd_retry--;
2372                 if (!cmd_retry)
2373                         return CUDBG_SYSTEM_ERROR;
2374         }
2375 
2376         /* Check RESP status */
2377         val = t4_read_reg(padap, LE_DB_DBGI_RSP_STATUS_A);
2378         tid_data->dbig_rsp_stat = val;
2379         if (!(val & 1))
2380                 return CUDBG_SYSTEM_ERROR;
2381 
2382         /* Read RESP data */
2383         for (i = 0; i < NUM_LE_DB_DBGI_RSP_DATA_INSTANCES; i++)
2384                 tid_data->data[i] = t4_read_reg(padap,
2385                                                 LE_DB_DBGI_RSP_DATA_A +
2386                                                 (i << 2));
2387         tid_data->tid = tid;
2388         return 0;
2389 }
2390 
2391 static int cudbg_get_le_type(u32 tid, struct cudbg_tcam tcam_region)
2392 {
2393         int type = LE_ET_UNKNOWN;
2394 
2395         if (tid < tcam_region.server_start)
2396                 type = LE_ET_TCAM_CON;
2397         else if (tid < tcam_region.filter_start)
2398                 type = LE_ET_TCAM_SERVER;
2399         else if (tid < tcam_region.clip_start)
2400                 type = LE_ET_TCAM_FILTER;
2401         else if (tid < tcam_region.routing_start)
2402                 type = LE_ET_TCAM_CLIP;
2403         else if (tid < tcam_region.tid_hash_base)
2404                 type = LE_ET_TCAM_ROUTING;
2405         else if (tid < tcam_region.max_tid)
2406                 type = LE_ET_HASH_CON;
2407         else
2408                 type = LE_ET_INVALID_TID;
2409 
2410         return type;
2411 }
2412 
2413 static int cudbg_is_ipv6_entry(struct cudbg_tid_data *tid_data,
2414                                struct cudbg_tcam tcam_region)
2415 {
2416         int ipv6 = 0;
2417         int le_type;
2418 
2419         le_type = cudbg_get_le_type(tid_data->tid, tcam_region);
2420         if (tid_data->tid & 1)
2421                 return 0;
2422 
2423         if (le_type == LE_ET_HASH_CON) {
2424                 ipv6 = tid_data->data[16] & 0x8000;
2425         } else if (le_type == LE_ET_TCAM_CON) {
2426                 ipv6 = tid_data->data[16] & 0x8000;
2427                 if (ipv6)
2428                         ipv6 = tid_data->data[9] == 0x00C00000;
2429         } else {
2430                 ipv6 = 0;
2431         }
2432         return ipv6;
2433 }
2434 
2435 void cudbg_fill_le_tcam_info(struct adapter *padap,
2436                              struct cudbg_tcam *tcam_region)
2437 {
2438         u32 value;
2439 
2440         /* Get the LE regions */
2441         value = t4_read_reg(padap, LE_DB_TID_HASHBASE_A); /* hash base index */
2442         tcam_region->tid_hash_base = value;
2443 
2444         /* Get routing table index */
2445         value = t4_read_reg(padap, LE_DB_ROUTING_TABLE_INDEX_A);
2446         tcam_region->routing_start = value;
2447 
2448         /* Get clip table index. For T6 there is separate CLIP TCAM */
2449         if (is_t6(padap->params.chip))
2450                 value = t4_read_reg(padap, LE_DB_CLCAM_TID_BASE_A);
2451         else
2452                 value = t4_read_reg(padap, LE_DB_CLIP_TABLE_INDEX_A);
2453         tcam_region->clip_start = value;
2454 
2455         /* Get filter table index */
2456         value = t4_read_reg(padap, LE_DB_FILTER_TABLE_INDEX_A);
2457         tcam_region->filter_start = value;
2458 
2459         /* Get server table index */
2460         value = t4_read_reg(padap, LE_DB_SERVER_INDEX_A);
2461         tcam_region->server_start = value;
2462 
2463         /* Check whether hash is enabled and calculate the max tids */
2464         value = t4_read_reg(padap, LE_DB_CONFIG_A);
2465         if ((value >> HASHEN_S) & 1) {
2466                 value = t4_read_reg(padap, LE_DB_HASH_CONFIG_A);
2467                 if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5) {
2468                         tcam_region->max_tid = (value & 0xFFFFF) +
2469                                                tcam_region->tid_hash_base;
2470                 } else {
2471                         value = HASHTIDSIZE_G(value);
2472                         value = 1 << value;
2473                         tcam_region->max_tid = value +
2474                                                tcam_region->tid_hash_base;
2475                 }
2476         } else { /* hash not enabled */
2477                 if (is_t6(padap->params.chip))
2478                         tcam_region->max_tid = (value & ASLIPCOMPEN_F) ?
2479                                                CUDBG_MAX_TID_COMP_EN :
2480                                                CUDBG_MAX_TID_COMP_DIS;
2481                 else
2482                         tcam_region->max_tid = CUDBG_MAX_TCAM_TID;
2483         }
2484 
2485         if (is_t6(padap->params.chip))
2486                 tcam_region->max_tid += CUDBG_T6_CLIP;
2487 }
2488 
2489 int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
2490                           struct cudbg_buffer *dbg_buff,
2491                           struct cudbg_error *cudbg_err)
2492 {
2493         struct adapter *padap = pdbg_init->adap;
2494         struct cudbg_buffer temp_buff = { 0 };
2495         struct cudbg_tcam tcam_region = { 0 };
2496         struct cudbg_tid_data *tid_data;
2497         u32 bytes = 0;
2498         int rc, size;
2499         u32 i;
2500 
2501         cudbg_fill_le_tcam_info(padap, &tcam_region);
2502 
2503         size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
2504         size += sizeof(struct cudbg_tcam);
2505         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2506         if (rc)
2507                 return rc;
2508 
2509         memcpy(temp_buff.data, &tcam_region, sizeof(struct cudbg_tcam));
2510         bytes = sizeof(struct cudbg_tcam);
2511         tid_data = (struct cudbg_tid_data *)(temp_buff.data + bytes);
2512         /* read all tid */
2513         for (i = 0; i < tcam_region.max_tid; ) {
2514                 rc = cudbg_read_tid(pdbg_init, i, tid_data);
2515                 if (rc) {
2516                         cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
2517                         /* Update tcam header and exit */
2518                         tcam_region.max_tid = i;
2519                         memcpy(temp_buff.data, &tcam_region,
2520                                sizeof(struct cudbg_tcam));
2521                         goto out;
2522                 }
2523 
2524                 if (cudbg_is_ipv6_entry(tid_data, tcam_region)) {
2525                         /* T6 CLIP TCAM: ipv6 takes 4 entries */
2526                         if (is_t6(padap->params.chip) &&
2527                             i >= tcam_region.clip_start &&
2528                             i < tcam_region.clip_start + CUDBG_T6_CLIP)
2529                                 i += 4;
2530                         else /* Main TCAM: ipv6 takes two tids */
2531                                 i += 2;
2532                 } else {
2533                         i++;
2534                 }
2535 
2536                 tid_data++;
2537                 bytes += sizeof(struct cudbg_tid_data);
2538         }
2539 
2540 out:
2541         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2542 }
2543 
2544 int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
2545                         struct cudbg_buffer *dbg_buff,
2546                         struct cudbg_error *cudbg_err)
2547 {
2548         struct adapter *padap = pdbg_init->adap;
2549         struct cudbg_buffer temp_buff = { 0 };
2550         u32 size;
2551         int rc;
2552 
2553         size = sizeof(u16) * NMTUS * NCCTRL_WIN;
2554         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2555         if (rc)
2556                 return rc;
2557 
2558         t4_read_cong_tbl(padap, (void *)temp_buff.data);
2559         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2560 }
2561 
2562 int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
2563                               struct cudbg_buffer *dbg_buff,
2564                               struct cudbg_error *cudbg_err)
2565 {
2566         struct adapter *padap = pdbg_init->adap;
2567         struct cudbg_buffer temp_buff = { 0 };
2568         struct ireg_buf *ma_indr;
2569         int i, rc, n;
2570         u32 size, j;
2571 
2572         if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2573                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2574 
2575         n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2576         size = sizeof(struct ireg_buf) * n * 2;
2577         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2578         if (rc)
2579                 return rc;
2580 
2581         ma_indr = (struct ireg_buf *)temp_buff.data;
2582         for (i = 0; i < n; i++) {
2583                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2584                 u32 *buff = ma_indr->outbuf;
2585 
2586                 ma_fli->ireg_addr = t6_ma_ireg_array[i][0];
2587                 ma_fli->ireg_data = t6_ma_ireg_array[i][1];
2588                 ma_fli->ireg_local_offset = t6_ma_ireg_array[i][2];
2589                 ma_fli->ireg_offset_range = t6_ma_ireg_array[i][3];
2590                 t4_read_indirect(padap, ma_fli->ireg_addr, ma_fli->ireg_data,
2591                                  buff, ma_fli->ireg_offset_range,
2592                                  ma_fli->ireg_local_offset);
2593                 ma_indr++;
2594         }
2595 
2596         n = sizeof(t6_ma_ireg_array2) / (IREG_NUM_ELEM * sizeof(u32));
2597         for (i = 0; i < n; i++) {
2598                 struct ireg_field *ma_fli = &ma_indr->tp_pio;
2599                 u32 *buff = ma_indr->outbuf;
2600 
2601                 ma_fli->ireg_addr = t6_ma_ireg_array2[i][0];
2602                 ma_fli->ireg_data = t6_ma_ireg_array2[i][1];
2603                 ma_fli->ireg_local_offset = t6_ma_ireg_array2[i][2];
2604                 for (j = 0; j < t6_ma_ireg_array2[i][3]; j++) {
2605                         t4_read_indirect(padap, ma_fli->ireg_addr,
2606                                          ma_fli->ireg_data, buff, 1,
2607                                          ma_fli->ireg_local_offset);
2608                         buff++;
2609                         ma_fli->ireg_local_offset += 0x20;
2610                 }
2611                 ma_indr++;
2612         }
2613         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2614 }
2615 
2616 int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
2617                            struct cudbg_buffer *dbg_buff,
2618                            struct cudbg_error *cudbg_err)
2619 {
2620         struct adapter *padap = pdbg_init->adap;
2621         struct cudbg_buffer temp_buff = { 0 };
2622         struct cudbg_ulptx_la *ulptx_la_buff;
2623         struct cudbg_ver_hdr *ver_hdr;
2624         u32 i, j;
2625         int rc;
2626 
2627         rc = cudbg_get_buff(pdbg_init, dbg_buff,
2628                             sizeof(struct cudbg_ver_hdr) +
2629                             sizeof(struct cudbg_ulptx_la),
2630                             &temp_buff);
2631         if (rc)
2632                 return rc;
2633 
2634         ver_hdr = (struct cudbg_ver_hdr *)temp_buff.data;
2635         ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2636         ver_hdr->revision = CUDBG_ULPTX_LA_REV;
2637         ver_hdr->size = sizeof(struct cudbg_ulptx_la);
2638 
2639         ulptx_la_buff = (struct cudbg_ulptx_la *)(temp_buff.data +
2640                                                   sizeof(*ver_hdr));
2641         for (i = 0; i < CUDBG_NUM_ULPTX; i++) {
2642                 ulptx_la_buff->rdptr[i] = t4_read_reg(padap,
2643                                                       ULP_TX_LA_RDPTR_0_A +
2644                                                       0x10 * i);
2645                 ulptx_la_buff->wrptr[i] = t4_read_reg(padap,
2646                                                       ULP_TX_LA_WRPTR_0_A +
2647                                                       0x10 * i);
2648                 ulptx_la_buff->rddata[i] = t4_read_reg(padap,
2649                                                        ULP_TX_LA_RDDATA_0_A +
2650                                                        0x10 * i);
2651                 for (j = 0; j < CUDBG_NUM_ULPTX_READ; j++)
2652                         ulptx_la_buff->rd_data[i][j] =
2653                                 t4_read_reg(padap,
2654                                             ULP_TX_LA_RDDATA_0_A + 0x10 * i);
2655         }
2656 
2657         for (i = 0; i < CUDBG_NUM_ULPTX_ASIC_READ; i++) {
2658                 t4_write_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A, 0x1);
2659                 ulptx_la_buff->rdptr_asic[i] =
2660                                 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_CTRL_A);
2661                 ulptx_la_buff->rddata_asic[i][0] =
2662                                 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_0_A);
2663                 ulptx_la_buff->rddata_asic[i][1] =
2664                                 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_1_A);
2665                 ulptx_la_buff->rddata_asic[i][2] =
2666                                 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_2_A);
2667                 ulptx_la_buff->rddata_asic[i][3] =
2668                                 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_3_A);
2669                 ulptx_la_buff->rddata_asic[i][4] =
2670                                 t4_read_reg(padap, ULP_TX_ASIC_DEBUG_4_A);
2671                 ulptx_la_buff->rddata_asic[i][5] =
2672                                 t4_read_reg(padap, PM_RX_BASE_ADDR);
2673         }
2674 
2675         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2676 }
2677 
2678 int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
2679                                   struct cudbg_buffer *dbg_buff,
2680                                   struct cudbg_error *cudbg_err)
2681 {
2682         struct adapter *padap = pdbg_init->adap;
2683         struct cudbg_buffer temp_buff = { 0 };
2684         u32 local_offset, local_range;
2685         struct ireg_buf *up_cim;
2686         u32 size, j, iter;
2687         u32 instance = 0;
2688         int i, rc, n;
2689 
2690         if (is_t5(padap->params.chip))
2691                 n = sizeof(t5_up_cim_reg_array) /
2692                     ((IREG_NUM_ELEM + 1) * sizeof(u32));
2693         else if (is_t6(padap->params.chip))
2694                 n = sizeof(t6_up_cim_reg_array) /
2695                     ((IREG_NUM_ELEM + 1) * sizeof(u32));
2696         else
2697                 return CUDBG_STATUS_NOT_IMPLEMENTED;
2698 
2699         size = sizeof(struct ireg_buf) * n;
2700         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2701         if (rc)
2702                 return rc;
2703 
2704         up_cim = (struct ireg_buf *)temp_buff.data;
2705         for (i = 0; i < n; i++) {
2706                 struct ireg_field *up_cim_reg = &up_cim->tp_pio;
2707                 u32 *buff = up_cim->outbuf;
2708 
2709                 if (is_t5(padap->params.chip)) {
2710                         up_cim_reg->ireg_addr = t5_up_cim_reg_array[i][0];
2711                         up_cim_reg->ireg_data = t5_up_cim_reg_array[i][1];
2712                         up_cim_reg->ireg_local_offset =
2713                                                 t5_up_cim_reg_array[i][2];
2714                         up_cim_reg->ireg_offset_range =
2715                                                 t5_up_cim_reg_array[i][3];
2716                         instance = t5_up_cim_reg_array[i][4];
2717                 } else if (is_t6(padap->params.chip)) {
2718                         up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
2719                         up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
2720                         up_cim_reg->ireg_local_offset =
2721                                                 t6_up_cim_reg_array[i][2];
2722                         up_cim_reg->ireg_offset_range =
2723                                                 t6_up_cim_reg_array[i][3];
2724                         instance = t6_up_cim_reg_array[i][4];
2725                 }
2726 
2727                 switch (instance) {
2728                 case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
2729                         iter = up_cim_reg->ireg_offset_range;
2730                         local_offset = 0x120;
2731                         local_range = 1;
2732                         break;
2733                 case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
2734                         iter = up_cim_reg->ireg_offset_range;
2735                         local_offset = 0x10;
2736                         local_range = 1;
2737                         break;
2738                 default:
2739                         iter = 1;
2740                         local_offset = 0;
2741                         local_range = up_cim_reg->ireg_offset_range;
2742                         break;
2743                 }
2744 
2745                 for (j = 0; j < iter; j++, buff++) {
2746                         rc = t4_cim_read(padap,
2747                                          up_cim_reg->ireg_local_offset +
2748                                          (j * local_offset), local_range, buff);
2749                         if (rc) {
2750                                 cudbg_put_buff(pdbg_init, &temp_buff);
2751                                 return rc;
2752                         }
2753                 }
2754                 up_cim++;
2755         }
2756         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2757 }
2758 
2759 int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
2760                              struct cudbg_buffer *dbg_buff,
2761                              struct cudbg_error *cudbg_err)
2762 {
2763         struct adapter *padap = pdbg_init->adap;
2764         struct cudbg_buffer temp_buff = { 0 };
2765         struct cudbg_pbt_tables *pbt;
2766         int i, rc;
2767         u32 addr;
2768 
2769         rc = cudbg_get_buff(pdbg_init, dbg_buff,
2770                             sizeof(struct cudbg_pbt_tables),
2771                             &temp_buff);
2772         if (rc)
2773                 return rc;
2774 
2775         pbt = (struct cudbg_pbt_tables *)temp_buff.data;
2776         /* PBT dynamic entries */
2777         addr = CUDBG_CHAC_PBT_ADDR;
2778         for (i = 0; i < CUDBG_PBT_DYNAMIC_ENTRIES; i++) {
2779                 rc = t4_cim_read(padap, addr + (i * 4), 1,
2780                                  &pbt->pbt_dynamic[i]);
2781                 if (rc) {
2782                         cudbg_err->sys_err = rc;
2783                         cudbg_put_buff(pdbg_init, &temp_buff);
2784                         return rc;
2785                 }
2786         }
2787 
2788         /* PBT static entries */
2789         /* static entries start when bit 6 is set */
2790         addr = CUDBG_CHAC_PBT_ADDR + (1 << 6);
2791         for (i = 0; i < CUDBG_PBT_STATIC_ENTRIES; i++) {
2792                 rc = t4_cim_read(padap, addr + (i * 4), 1,
2793                                  &pbt->pbt_static[i]);
2794                 if (rc) {
2795                         cudbg_err->sys_err = rc;
2796                         cudbg_put_buff(pdbg_init, &temp_buff);
2797                         return rc;
2798                 }
2799         }
2800 
2801         /* LRF entries */
2802         addr = CUDBG_CHAC_PBT_LRF;
2803         for (i = 0; i < CUDBG_LRF_ENTRIES; i++) {
2804                 rc = t4_cim_read(padap, addr + (i * 4), 1,
2805                                  &pbt->lrf_table[i]);
2806                 if (rc) {
2807                         cudbg_err->sys_err = rc;
2808                         cudbg_put_buff(pdbg_init, &temp_buff);
2809                         return rc;
2810                 }
2811         }
2812 
2813         /* PBT data entries */
2814         addr = CUDBG_CHAC_PBT_DATA;
2815         for (i = 0; i < CUDBG_PBT_DATA_ENTRIES; i++) {
2816                 rc = t4_cim_read(padap, addr + (i * 4), 1,
2817                                  &pbt->pbt_data[i]);
2818                 if (rc) {
2819                         cudbg_err->sys_err = rc;
2820                         cudbg_put_buff(pdbg_init, &temp_buff);
2821                         return rc;
2822                 }
2823         }
2824         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2825 }
2826 
2827 int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
2828                            struct cudbg_buffer *dbg_buff,
2829                            struct cudbg_error *cudbg_err)
2830 {
2831         struct adapter *padap = pdbg_init->adap;
2832         struct cudbg_mbox_log *mboxlog = NULL;
2833         struct cudbg_buffer temp_buff = { 0 };
2834         struct mbox_cmd_log *log = NULL;
2835         struct mbox_cmd *entry;
2836         unsigned int entry_idx;
2837         u16 mbox_cmds;
2838         int i, k, rc;
2839         u64 flit;
2840         u32 size;
2841 
2842         log = padap->mbox_log;
2843         mbox_cmds = padap->mbox_log->size;
2844         size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
2845         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2846         if (rc)
2847                 return rc;
2848 
2849         mboxlog = (struct cudbg_mbox_log *)temp_buff.data;
2850         for (k = 0; k < mbox_cmds; k++) {
2851                 entry_idx = log->cursor + k;
2852                 if (entry_idx >= log->size)
2853                         entry_idx -= log->size;
2854 
2855                 entry = mbox_cmd_log_entry(log, entry_idx);
2856                 /* skip over unused entries */
2857                 if (entry->timestamp == 0)
2858                         continue;
2859 
2860                 memcpy(&mboxlog->entry, entry, sizeof(struct mbox_cmd));
2861                 for (i = 0; i < MBOX_LEN / 8; i++) {
2862                         flit = entry->cmd[i];
2863                         mboxlog->hi[i] = (u32)(flit >> 32);
2864                         mboxlog->lo[i] = (u32)flit;
2865                 }
2866                 mboxlog++;
2867         }
2868         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2869 }
2870 
2871 int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
2872                                struct cudbg_buffer *dbg_buff,
2873                                struct cudbg_error *cudbg_err)
2874 {
2875         struct adapter *padap = pdbg_init->adap;
2876         struct cudbg_buffer temp_buff = { 0 };
2877         struct ireg_buf *hma_indr;
2878         int i, rc, n;
2879         u32 size;
2880 
2881         if (CHELSIO_CHIP_VERSION(padap->params.chip) < CHELSIO_T6)
2882                 return CUDBG_STATUS_ENTITY_NOT_FOUND;
2883 
2884         n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
2885         size = sizeof(struct ireg_buf) * n;
2886         rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
2887         if (rc)
2888                 return rc;
2889 
2890         hma_indr = (struct ireg_buf *)temp_buff.data;
2891         for (i = 0; i < n; i++) {
2892                 struct ireg_field *hma_fli = &hma_indr->tp_pio;
2893                 u32 *buff = hma_indr->outbuf;
2894 
2895                 hma_fli->ireg_addr = t6_hma_ireg_array[i][0];
2896                 hma_fli->ireg_data = t6_hma_ireg_array[i][1];
2897                 hma_fli->ireg_local_offset = t6_hma_ireg_array[i][2];
2898                 hma_fli->ireg_offset_range = t6_hma_ireg_array[i][3];
2899                 t4_read_indirect(padap, hma_fli->ireg_addr, hma_fli->ireg_data,
2900                                  buff, hma_fli->ireg_offset_range,
2901                                  hma_fli->ireg_local_offset);
2902                 hma_indr++;
2903         }
2904         return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
2905 }
2906 
2907 void cudbg_fill_qdesc_num_and_size(const struct adapter *padap,
2908                                    u32 *num, u32 *size)
2909 {
2910         u32 tot_entries = 0, tot_size = 0;
2911 
2912         /* NIC TXQ, RXQ, FLQ, and CTRLQ */
2913         tot_entries += MAX_ETH_QSETS * 3;
2914         tot_entries += MAX_CTRL_QUEUES;
2915 
2916         tot_size += MAX_ETH_QSETS * MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2917         tot_size += MAX_ETH_QSETS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
2918         tot_size += MAX_ETH_QSETS * MAX_RX_BUFFERS * MAX_FL_DESC_SIZE;
2919         tot_size += MAX_CTRL_QUEUES * MAX_CTRL_TXQ_ENTRIES *
2920                     MAX_CTRL_TXQ_DESC_SIZE;
2921 
2922         /* FW_EVTQ and INTRQ */
2923         tot_entries += INGQ_EXTRAS;
2924         tot_size += INGQ_EXTRAS * MAX_RSPQ_ENTRIES * MAX_RXQ_DESC_SIZE;
2925 
2926         /* PTP_TXQ */
2927         tot_entries += 1;
2928         tot_size += MAX_TXQ_ENTRIES * MAX_TXQ_DESC_SIZE;
2929 
2930         /* ULD TXQ, RXQ, and FLQ */
2931         tot_entries += CXGB4_TX_MAX * MAX_OFLD_QSETS;
2932         tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS * 2;
2933 
2934         tot_size += CXGB4_TX_MAX * MAX_OFLD_QSETS * MAX_TXQ_ENTRIES *
2935                     MAX_TXQ_DESC_SIZE;
2936         tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RSPQ_ENTRIES *
2937                     MAX_RXQ_DESC_SIZE;
2938         tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * MAX_RX_BUFFERS *
2939                     MAX_FL_DESC_SIZE;
2940 
2941         /* ULD CIQ */
2942         tot_entries += CXGB4_ULD_MAX * MAX_ULD_QSETS;
2943         tot_size += CXGB4_ULD_MAX * MAX_ULD_QSETS * SGE_MAX_IQ_SIZE *
2944                     MAX_RXQ_DESC_SIZE;
2945 
2946         tot_size += sizeof(struct cudbg_ver_hdr) +
2947                     sizeof(struct cudbg_qdesc_info) +
2948                     sizeof(struct cudbg_qdesc_entry) * tot_entries;
2949 
2950         if (num)
2951                 *num = tot_entries;
2952 
2953         if (size)
2954                 *size = tot_size;
2955 }
2956 
2957 int cudbg_collect_qdesc(struct cudbg_init *pdbg_init,
2958                         struct cudbg_buffer *dbg_buff,
2959                         struct cudbg_error *cudbg_err)
2960 {
2961         u32 num_queues = 0, tot_entries = 0, size = 0;
2962         struct adapter *padap = pdbg_init->adap;
2963         struct cudbg_buffer temp_buff = { 0 };
2964         struct cudbg_qdesc_entry *qdesc_entry;
2965         struct cudbg_qdesc_info *qdesc_info;
2966         struct cudbg_ver_hdr *ver_hdr;
2967         struct sge *s = &padap->sge;
2968         u32 i, j, cur_off, tot_len;
2969         u8 *data;
2970         int rc;
2971 
2972         cudbg_fill_qdesc_num_and_size(padap, &tot_entries, &size);
2973         size = min_t(u32, size, CUDBG_DUMP_BUFF_SIZE);
2974         tot_len = size;
2975         data = kvzalloc(size, GFP_KERNEL);
2976         if (!data)
2977                 return -ENOMEM;
2978 
2979         ver_hdr = (struct cudbg_ver_hdr *)data;
2980         ver_hdr->signature = CUDBG_ENTITY_SIGNATURE;
2981         ver_hdr->revision = CUDBG_QDESC_REV;
2982         ver_hdr->size = sizeof(struct cudbg_qdesc_info);
2983         size -= sizeof(*ver_hdr);
2984 
2985         qdesc_info = (struct cudbg_qdesc_info *)(data +
2986                                                  sizeof(*ver_hdr));
2987         size -= sizeof(*qdesc_info);
2988         qdesc_entry = (struct cudbg_qdesc_entry *)qdesc_info->data;
2989 
2990 #define QDESC_GET(q, desc, type, label) do { \
2991         if (size <= 0) { \
2992                 goto label; \
2993         } \
2994         if (desc) { \
2995                 cudbg_fill_qdesc_##q(q, type, qdesc_entry); \
2996                 size -= sizeof(*qdesc_entry) + qdesc_entry->data_size; \
2997                 num_queues++; \
2998                 qdesc_entry = cudbg_next_qdesc(qdesc_entry); \
2999         } \
3000 } while (0)
3001 
3002 #define QDESC_GET_TXQ(q, type, label) do { \
3003         struct sge_txq *txq = (struct sge_txq *)q; \
3004         QDESC_GET(txq, txq->desc, type, label); \
3005 } while (0)
3006 
3007 #define QDESC_GET_RXQ(q, type, label) do { \
3008         struct sge_rspq *rxq = (struct sge_rspq *)q; \
3009         QDESC_GET(rxq, rxq->desc, type, label); \
3010 } while (0)
3011 
3012 #define QDESC_GET_FLQ(q, type, label) do { \
3013         struct sge_fl *flq = (struct sge_fl *)q; \
3014         QDESC_GET(flq, flq->desc, type, label); \
3015 } while (0)
3016 
3017         /* NIC TXQ */
3018         for (i = 0; i < s->ethqsets; i++)
3019                 QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out);
3020 
3021         /* NIC RXQ */
3022         for (i = 0; i < s->ethqsets; i++)
3023                 QDESC_GET_RXQ(&s->ethrxq[i].rspq, CUDBG_QTYPE_NIC_RXQ, out);
3024 
3025         /* NIC FLQ */
3026         for (i = 0; i < s->ethqsets; i++)
3027                 QDESC_GET_FLQ(&s->ethrxq[i].fl, CUDBG_QTYPE_NIC_FLQ, out);
3028 
3029         /* NIC CTRLQ */
3030         for (i = 0; i < padap->params.nports; i++)
3031                 QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out);
3032 
3033         /* FW_EVTQ */
3034         QDESC_GET_RXQ(&s->fw_evtq, CUDBG_QTYPE_FWEVTQ, out);
3035 
3036         /* INTRQ */
3037         QDESC_GET_RXQ(&s->intrq, CUDBG_QTYPE_INTRQ, out);
3038 
3039         /* PTP_TXQ */
3040         QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out);
3041 
3042         /* ULD Queues */
3043         mutex_lock(&uld_mutex);
3044 
3045         if (s->uld_txq_info) {
3046                 struct sge_uld_txq_info *utxq;
3047 
3048                 /* ULD TXQ */
3049                 for (j = 0; j < CXGB4_TX_MAX; j++) {
3050                         if (!s->uld_txq_info[j])
3051                                 continue;
3052 
3053                         utxq = s->uld_txq_info[j];
3054                         for (i = 0; i < utxq->ntxq; i++)
3055                                 QDESC_GET_TXQ(&utxq->uldtxq[i].q,
3056                                               cudbg_uld_txq_to_qtype(j),
3057                                               out_unlock);
3058                 }
3059         }
3060 
3061         if (s->uld_rxq_info) {
3062                 struct sge_uld_rxq_info *urxq;
3063                 u32 base;
3064 
3065                 /* ULD RXQ */
3066                 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3067                         if (!s->uld_rxq_info[j])
3068                                 continue;
3069 
3070                         urxq = s->uld_rxq_info[j];
3071                         for (i = 0; i < urxq->nrxq; i++)
3072                                 QDESC_GET_RXQ(&urxq->uldrxq[i].rspq,
3073                                               cudbg_uld_rxq_to_qtype(j),
3074                                               out_unlock);
3075                 }
3076 
3077                 /* ULD FLQ */
3078                 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3079                         if (!s->uld_rxq_info[j])
3080                                 continue;
3081 
3082                         urxq = s->uld_rxq_info[j];
3083                         for (i = 0; i < urxq->nrxq; i++)
3084                                 QDESC_GET_FLQ(&urxq->uldrxq[i].fl,
3085                                               cudbg_uld_flq_to_qtype(j),
3086                                               out_unlock);
3087                 }
3088 
3089                 /* ULD CIQ */
3090                 for (j = 0; j < CXGB4_ULD_MAX; j++) {
3091                         if (!s->uld_rxq_info[j])
3092                                 continue;
3093 
3094                         urxq = s->uld_rxq_info[j];
3095                         base = urxq->nrxq;
3096                         for (i = 0; i < urxq->nciq; i++)
3097                                 QDESC_GET_RXQ(&urxq->uldrxq[base + i].rspq,
3098                                               cudbg_uld_ciq_to_qtype(j),
3099                                               out_unlock);
3100                 }
3101         }
3102 
3103 out_unlock:
3104         mutex_unlock(&uld_mutex);
3105 
3106 out:
3107         qdesc_info->qdesc_entry_size = sizeof(*qdesc_entry);
3108         qdesc_info->num_queues = num_queues;
3109         cur_off = 0;
3110         while (tot_len) {
3111                 u32 chunk_size = min_t(u32, tot_len, CUDBG_CHUNK_SIZE);
3112 
3113                 rc = cudbg_get_buff(pdbg_init, dbg_buff, chunk_size,
3114                                     &temp_buff);
3115                 if (rc) {
3116                         cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3117                         goto out_free;
3118                 }
3119 
3120                 memcpy(temp_buff.data, data + cur_off, chunk_size);
3121                 tot_len -= chunk_size;
3122                 cur_off += chunk_size;
3123                 rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
3124                                                   dbg_buff);
3125                 if (rc) {
3126                         cudbg_put_buff(pdbg_init, &temp_buff);
3127                         cudbg_err->sys_warn = CUDBG_STATUS_PARTIAL_DATA;
3128                         goto out_free;
3129                 }
3130         }
3131 
3132 out_free:
3133         if (data)
3134                 kvfree(data);
3135 
3136 #undef QDESC_GET_FLQ
3137 #undef QDESC_GET_RXQ
3138 #undef QDESC_GET_TXQ
3139 #undef QDESC_GET
3140 
3141         return rc;
3142 }

/* [<][>][^][v][top][bottom][index][help] */