This source file includes following definitions.
- hclge_dbg_get_dfx_bd_num
- hclge_dbg_cmd_send
- hclge_dbg_dump_reg_common
- hclge_dbg_dump_dcb
- hclge_dbg_dump_reg_cmd
- hclge_title_idx_print
- hclge_dbg_dump_tc
- hclge_dbg_dump_tm_pg
- hclge_dbg_dump_tm
- hclge_dbg_dump_tm_map
- hclge_dbg_dump_qos_pause_cfg
- hclge_dbg_dump_qos_pri_map
- hclge_dbg_dump_qos_buf_cfg
- hclge_dbg_dump_mng_table
- hclge_dbg_fd_tcam_read
- hclge_dbg_fd_tcam
- hclge_dbg_dump_rst_info
- hclge_dbg_get_m7_stats_info
- hclge_ncl_config_data_print
- hclge_dbg_dump_ncl_config
- hclge_dbg_dump_mac_tnl_status
- hclge_dbg_run_cmd
1
2
3
4 #include <linux/device.h>
5
6 #include "hclge_debugfs.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 #include "hnae3.h"
10
11 static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
12 { .reg_type = "bios common",
13 .dfx_msg = &hclge_dbg_bios_common_reg[0],
14 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
15 .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
16 .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
17 { .reg_type = "ssu",
18 .dfx_msg = &hclge_dbg_ssu_reg_0[0],
19 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
20 .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
21 .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
22 { .reg_type = "ssu",
23 .dfx_msg = &hclge_dbg_ssu_reg_1[0],
24 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
25 .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
26 .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
27 { .reg_type = "ssu",
28 .dfx_msg = &hclge_dbg_ssu_reg_2[0],
29 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
30 .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
31 .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
32 { .reg_type = "igu egu",
33 .dfx_msg = &hclge_dbg_igu_egu_reg[0],
34 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
35 .offset = HCLGE_DBG_DFX_IGU_OFFSET,
36 .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
37 { .reg_type = "rpu",
38 .dfx_msg = &hclge_dbg_rpu_reg_0[0],
39 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
40 .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
41 .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
42 { .reg_type = "rpu",
43 .dfx_msg = &hclge_dbg_rpu_reg_1[0],
44 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
45 .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
46 .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
47 { .reg_type = "ncsi",
48 .dfx_msg = &hclge_dbg_ncsi_reg[0],
49 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
50 .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
51 .cmd = HCLGE_OPC_DFX_NCSI_REG } },
52 { .reg_type = "rtc",
53 .dfx_msg = &hclge_dbg_rtc_reg[0],
54 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
55 .offset = HCLGE_DBG_DFX_RTC_OFFSET,
56 .cmd = HCLGE_OPC_DFX_RTC_REG } },
57 { .reg_type = "ppp",
58 .dfx_msg = &hclge_dbg_ppp_reg[0],
59 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
60 .offset = HCLGE_DBG_DFX_PPP_OFFSET,
61 .cmd = HCLGE_OPC_DFX_PPP_REG } },
62 { .reg_type = "rcb",
63 .dfx_msg = &hclge_dbg_rcb_reg[0],
64 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
65 .offset = HCLGE_DBG_DFX_RCB_OFFSET,
66 .cmd = HCLGE_OPC_DFX_RCB_REG } },
67 { .reg_type = "tqp",
68 .dfx_msg = &hclge_dbg_tqp_reg[0],
69 .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
70 .offset = HCLGE_DBG_DFX_TQP_OFFSET,
71 .cmd = HCLGE_OPC_DFX_TQP_REG } },
72 };
73
74 static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
75 {
76 #define HCLGE_GET_DFX_REG_TYPE_CNT 4
77
78 struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
79 int entries_per_desc;
80 int index;
81 int ret;
82
83 ret = hclge_query_bd_num_cmd_send(hdev, desc);
84 if (ret) {
85 dev_err(&hdev->pdev->dev,
86 "get dfx bdnum fail, ret = %d\n", ret);
87 return ret;
88 }
89
90 entries_per_desc = ARRAY_SIZE(desc[0].data);
91 index = offset % entries_per_desc;
92 return (int)desc[offset / entries_per_desc].data[index];
93 }
94
95 static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
96 struct hclge_desc *desc_src,
97 int index, int bd_num,
98 enum hclge_opcode_type cmd)
99 {
100 struct hclge_desc *desc = desc_src;
101 int ret, i;
102
103 hclge_cmd_setup_basic_desc(desc, cmd, true);
104 desc->data[0] = cpu_to_le32(index);
105
106 for (i = 1; i < bd_num; i++) {
107 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
108 desc++;
109 hclge_cmd_setup_basic_desc(desc, cmd, true);
110 }
111
112 ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
113 if (ret)
114 dev_err(&hdev->pdev->dev,
115 "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
116 return ret;
117 }
118
119 static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
120 struct hclge_dbg_reg_type_info *reg_info,
121 const char *cmd_buf)
122 {
123 #define IDX_OFFSET 1
124
125 const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
126 struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
127 struct hclge_dbg_reg_common_msg *reg_msg = ®_info->reg_msg;
128 struct hclge_desc *desc_src;
129 struct hclge_desc *desc;
130 int entries_per_desc;
131 int bd_num, buf_len;
132 int index = 0;
133 int min_num;
134 int ret, i;
135
136 if (*s) {
137 ret = kstrtouint(s, 0, &index);
138 index = (ret != 0) ? 0 : index;
139 }
140
141 bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
142 if (bd_num <= 0) {
143 dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
144 reg_msg->offset, bd_num);
145 return;
146 }
147
148 buf_len = sizeof(struct hclge_desc) * bd_num;
149 desc_src = kzalloc(buf_len, GFP_KERNEL);
150 if (!desc_src) {
151 dev_err(&hdev->pdev->dev, "call kzalloc failed\n");
152 return;
153 }
154
155 desc = desc_src;
156 ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
157 if (ret) {
158 kfree(desc_src);
159 return;
160 }
161
162 entries_per_desc = ARRAY_SIZE(desc->data);
163 min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
164
165 desc = desc_src;
166 for (i = 0; i < min_num; i++) {
167 if (i > 0 && (i % entries_per_desc) == 0)
168 desc++;
169 if (dfx_message->flag)
170 dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
171 dfx_message->message,
172 desc->data[i % entries_per_desc]);
173
174 dfx_message++;
175 }
176
177 kfree(desc_src);
178 }
179
180 static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
181 {
182 struct device *dev = &hdev->pdev->dev;
183 struct hclge_dbg_bitmap_cmd *bitmap;
184 int rq_id, pri_id, qset_id;
185 int port_id, nq_id, pg_id;
186 struct hclge_desc desc[2];
187
188 int cnt, ret;
189
190 cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
191 &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
192 if (cnt != 6) {
193 dev_err(&hdev->pdev->dev,
194 "dump dcb: bad command parameter, cnt=%d\n", cnt);
195 return;
196 }
197
198 ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1,
199 HCLGE_OPC_QSET_DFX_STS);
200 if (ret)
201 return;
202
203 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
204 dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
205 dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
206 dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
207 dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
208
209 ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS);
210 if (ret)
211 return;
212
213 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
214 dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
215 dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
216 dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
217
218 ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS);
219 if (ret)
220 return;
221
222 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
223 dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
224 dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
225 dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
226
227 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
228 HCLGE_OPC_PORT_DFX_STS);
229 if (ret)
230 return;
231
232 bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
233 dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
234 dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
235
236 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT);
237 if (ret)
238 return;
239
240 dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]);
241
242 ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT);
243 if (ret)
244 return;
245
246 dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]);
247
248 ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS);
249 if (ret)
250 return;
251
252 dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]);
253 dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]);
254 dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]);
255 dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]);
256 dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]);
257 dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]);
258 dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]);
259
260 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
261 HCLGE_OPC_TM_INTERNAL_CNT);
262 if (ret)
263 return;
264
265 dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]);
266 dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]);
267
268 ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1,
269 HCLGE_OPC_TM_INTERNAL_STS_1);
270 if (ret)
271 return;
272
273 dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]);
274 dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]);
275 dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]);
276 dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]);
277 dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]);
278 }
279
280 static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
281 {
282 struct hclge_dbg_reg_type_info *reg_info;
283 bool has_dump = false;
284 int i;
285
286 for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
287 reg_info = &hclge_dbg_reg_info[i];
288 if (!strncmp(cmd_buf, reg_info->reg_type,
289 strlen(reg_info->reg_type))) {
290 hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
291 has_dump = true;
292 }
293 }
294
295 if (strncmp(cmd_buf, "dcb", 3) == 0) {
296 hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
297 has_dump = true;
298 }
299
300 if (!has_dump) {
301 dev_info(&hdev->pdev->dev, "unknown command\n");
302 return;
303 }
304 }
305
306 static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
307 char *title_buf, char *true_buf,
308 char *false_buf)
309 {
310 if (flag)
311 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
312 true_buf);
313 else
314 dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
315 false_buf);
316 }
317
318 static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
319 {
320 struct hclge_ets_tc_weight_cmd *ets_weight;
321 struct hclge_desc desc;
322 int i, ret;
323
324 if (!hnae3_dev_dcb_supported(hdev)) {
325 dev_info(&hdev->pdev->dev,
326 "Only DCB-supported dev supports tc\n");
327 return;
328 }
329
330 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
331
332 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
333 if (ret) {
334 dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
335 return;
336 }
337
338 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
339
340 dev_info(&hdev->pdev->dev, "dump tc\n");
341 dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
342 ets_weight->weight_offset);
343
344 for (i = 0; i < HNAE3_MAX_TC; i++)
345 hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
346 "tc", "no sp mode", "sp mode");
347 }
348
349 static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
350 {
351 struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
352 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
353 struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
354 enum hclge_opcode_type cmd;
355 struct hclge_desc desc;
356 int ret;
357
358 cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
359 hclge_cmd_setup_basic_desc(&desc, cmd, true);
360 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
361 if (ret)
362 goto err_tm_pg_cmd_send;
363
364 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
365 dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
366 dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
367 pg_shap_cfg_cmd->pg_shapping_para);
368
369 cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
370 hclge_cmd_setup_basic_desc(&desc, cmd, true);
371 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
372 if (ret)
373 goto err_tm_pg_cmd_send;
374
375 pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
376 dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
377 dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
378 pg_shap_cfg_cmd->pg_shapping_para);
379
380 cmd = HCLGE_OPC_TM_PORT_SHAPPING;
381 hclge_cmd_setup_basic_desc(&desc, cmd, true);
382 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
383 if (ret)
384 goto err_tm_pg_cmd_send;
385
386 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
387 dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
388 port_shap_cfg_cmd->port_shapping_para);
389
390 cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
391 hclge_cmd_setup_basic_desc(&desc, cmd, true);
392 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
393 if (ret)
394 goto err_tm_pg_cmd_send;
395
396 dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]);
397
398 cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
399 hclge_cmd_setup_basic_desc(&desc, cmd, true);
400 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
401 if (ret)
402 goto err_tm_pg_cmd_send;
403
404 dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n", desc.data[0]);
405
406 cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
407 hclge_cmd_setup_basic_desc(&desc, cmd, true);
408 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
409 if (ret)
410 goto err_tm_pg_cmd_send;
411
412 dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n", desc.data[0]);
413
414 if (!hnae3_dev_dcb_supported(hdev)) {
415 dev_info(&hdev->pdev->dev,
416 "Only DCB-supported dev supports tm mapping\n");
417 return;
418 }
419
420 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
421 hclge_cmd_setup_basic_desc(&desc, cmd, true);
422 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
423 if (ret)
424 goto err_tm_pg_cmd_send;
425
426 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
427 dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
428 bp_to_qs_map_cmd->tc_id);
429 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
430 bp_to_qs_map_cmd->qs_group_id);
431 dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
432 bp_to_qs_map_cmd->qs_bit_map);
433 return;
434
435 err_tm_pg_cmd_send:
436 dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
437 cmd, ret);
438 }
439
440 static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
441 {
442 struct hclge_priority_weight_cmd *priority_weight;
443 struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
444 struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
445 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
446 struct hclge_pri_shapping_cmd *shap_cfg_cmd;
447 struct hclge_pg_weight_cmd *pg_weight;
448 struct hclge_qs_weight_cmd *qs_weight;
449 enum hclge_opcode_type cmd;
450 struct hclge_desc desc;
451 int ret;
452
453 cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
454 hclge_cmd_setup_basic_desc(&desc, cmd, true);
455 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
456 if (ret)
457 goto err_tm_cmd_send;
458
459 pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
460 dev_info(&hdev->pdev->dev, "dump tm\n");
461 dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
462 pg_to_pri_map->pg_id);
463 dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
464 pg_to_pri_map->pri_bit_map);
465
466 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
467 hclge_cmd_setup_basic_desc(&desc, cmd, true);
468 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
469 if (ret)
470 goto err_tm_cmd_send;
471
472 qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
473 dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
474 qs_to_pri_map->qs_id);
475 dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
476 qs_to_pri_map->priority);
477 dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
478 qs_to_pri_map->link_vld);
479
480 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
481 hclge_cmd_setup_basic_desc(&desc, cmd, true);
482 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
483 if (ret)
484 goto err_tm_cmd_send;
485
486 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
487 dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id);
488 dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
489 nq_to_qs_map->qset_id);
490
491 cmd = HCLGE_OPC_TM_PG_WEIGHT;
492 hclge_cmd_setup_basic_desc(&desc, cmd, true);
493 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
494 if (ret)
495 goto err_tm_cmd_send;
496
497 pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
498 dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
499 dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
500
501 cmd = HCLGE_OPC_TM_QS_WEIGHT;
502 hclge_cmd_setup_basic_desc(&desc, cmd, true);
503 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
504 if (ret)
505 goto err_tm_cmd_send;
506
507 qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
508 dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id);
509 dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
510
511 cmd = HCLGE_OPC_TM_PRI_WEIGHT;
512 hclge_cmd_setup_basic_desc(&desc, cmd, true);
513 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
514 if (ret)
515 goto err_tm_cmd_send;
516
517 priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
518 dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
519 dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
520
521 cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
522 hclge_cmd_setup_basic_desc(&desc, cmd, true);
523 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
524 if (ret)
525 goto err_tm_cmd_send;
526
527 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
528 dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
529 dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
530 shap_cfg_cmd->pri_shapping_para);
531
532 cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
533 hclge_cmd_setup_basic_desc(&desc, cmd, true);
534 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
535 if (ret)
536 goto err_tm_cmd_send;
537
538 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
539 dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
540 dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
541 shap_cfg_cmd->pri_shapping_para);
542
543 hclge_dbg_dump_tm_pg(hdev);
544
545 return;
546
547 err_tm_cmd_send:
548 dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
549 cmd, ret);
550 }
551
552 static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
553 const char *cmd_buf)
554 {
555 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
556 struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
557 struct hclge_qs_to_pri_link_cmd *map;
558 struct hclge_tqp_tx_queue_tc_cmd *tc;
559 enum hclge_opcode_type cmd;
560 struct hclge_desc desc;
561 int queue_id, group_id;
562 u32 qset_maping[32];
563 int tc_id, qset_id;
564 int pri_id, ret;
565 u32 i;
566
567 ret = kstrtouint(cmd_buf, 0, &queue_id);
568 queue_id = (ret != 0) ? 0 : queue_id;
569
570 cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
571 nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
572 hclge_cmd_setup_basic_desc(&desc, cmd, true);
573 nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
574 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
575 if (ret)
576 goto err_tm_map_cmd_send;
577 qset_id = nq_to_qs_map->qset_id & 0x3FF;
578
579 cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
580 map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
581 hclge_cmd_setup_basic_desc(&desc, cmd, true);
582 map->qs_id = cpu_to_le16(qset_id);
583 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
584 if (ret)
585 goto err_tm_map_cmd_send;
586 pri_id = map->priority;
587
588 cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
589 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
590 hclge_cmd_setup_basic_desc(&desc, cmd, true);
591 tc->queue_id = cpu_to_le16(queue_id);
592 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
593 if (ret)
594 goto err_tm_map_cmd_send;
595 tc_id = tc->tc_id & 0x7;
596
597 dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
598 dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
599 queue_id, qset_id, pri_id, tc_id);
600
601 if (!hnae3_dev_dcb_supported(hdev)) {
602 dev_info(&hdev->pdev->dev,
603 "Only DCB-supported dev supports tm mapping\n");
604 return;
605 }
606
607 cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
608 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
609 for (group_id = 0; group_id < 32; group_id++) {
610 hclge_cmd_setup_basic_desc(&desc, cmd, true);
611 bp_to_qs_map_cmd->tc_id = tc_id;
612 bp_to_qs_map_cmd->qs_group_id = group_id;
613 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
614 if (ret)
615 goto err_tm_map_cmd_send;
616
617 qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map;
618 }
619
620 dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
621
622 i = 0;
623 for (group_id = 0; group_id < 4; group_id++) {
624 dev_info(&hdev->pdev->dev,
625 "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
626 group_id * 256, qset_maping[(u32)(i + 7)],
627 qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)],
628 qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)],
629 qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)],
630 qset_maping[i]);
631 i += 8;
632 }
633
634 return;
635
636 err_tm_map_cmd_send:
637 dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
638 cmd, ret);
639 }
640
641 static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
642 {
643 struct hclge_cfg_pause_param_cmd *pause_param;
644 struct hclge_desc desc;
645 int ret;
646
647 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
648
649 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
650 if (ret) {
651 dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
652 ret);
653 return;
654 }
655
656 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
657 dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
658 dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
659 pause_param->pause_trans_gap);
660 dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
661 pause_param->pause_trans_time);
662 }
663
664 static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
665 {
666 struct hclge_qos_pri_map_cmd *pri_map;
667 struct hclge_desc desc;
668 int ret;
669
670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
671
672 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
673 if (ret) {
674 dev_err(&hdev->pdev->dev,
675 "dump qos pri map fail, ret = %d\n", ret);
676 return;
677 }
678
679 pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
680 dev_info(&hdev->pdev->dev, "dump qos pri map\n");
681 dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
682 dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
683 dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
684 dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
685 dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
686 dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
687 dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
688 dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
689 dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
690 }
691
692 static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
693 {
694 struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
695 struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
696 struct hclge_rx_priv_wl_buf *rx_priv_wl;
697 struct hclge_rx_com_wl *rx_packet_cnt;
698 struct hclge_rx_com_thrd *rx_com_thrd;
699 struct hclge_rx_com_wl *rx_com_wl;
700 enum hclge_opcode_type cmd;
701 struct hclge_desc desc[2];
702 int i, ret;
703
704 cmd = HCLGE_OPC_TX_BUFF_ALLOC;
705 hclge_cmd_setup_basic_desc(desc, cmd, true);
706 ret = hclge_cmd_send(&hdev->hw, desc, 1);
707 if (ret)
708 goto err_qos_cmd_send;
709
710 dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
711
712 tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
713 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
714 dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
715 tx_buf_cmd->tx_pkt_buff[i]);
716
717 cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
718 hclge_cmd_setup_basic_desc(desc, cmd, true);
719 ret = hclge_cmd_send(&hdev->hw, desc, 1);
720 if (ret)
721 goto err_qos_cmd_send;
722
723 dev_info(&hdev->pdev->dev, "\n");
724 rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
725 for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
726 dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
727 rx_buf_cmd->buf_num[i]);
728
729 dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
730 rx_buf_cmd->shared_buf);
731
732 cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
733 hclge_cmd_setup_basic_desc(desc, cmd, true);
734 ret = hclge_cmd_send(&hdev->hw, desc, 1);
735 if (ret)
736 goto err_qos_cmd_send;
737
738 rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
739 dev_info(&hdev->pdev->dev, "\n");
740 dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
741 rx_com_wl->com_wl.high, rx_com_wl->com_wl.low);
742
743 cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
744 hclge_cmd_setup_basic_desc(desc, cmd, true);
745 ret = hclge_cmd_send(&hdev->hw, desc, 1);
746 if (ret)
747 goto err_qos_cmd_send;
748
749 rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
750 dev_info(&hdev->pdev->dev,
751 "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
752 rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low);
753 dev_info(&hdev->pdev->dev, "\n");
754
755 if (!hnae3_dev_dcb_supported(hdev)) {
756 dev_info(&hdev->pdev->dev,
757 "Only DCB-supported dev supports rx priv wl\n");
758 return;
759 }
760 cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
761 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
762 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
763 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
764 ret = hclge_cmd_send(&hdev->hw, desc, 2);
765 if (ret)
766 goto err_qos_cmd_send;
767
768 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
769 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
770 dev_info(&hdev->pdev->dev,
771 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
772 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
773
774 rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
775 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
776 dev_info(&hdev->pdev->dev,
777 "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
778 i + HCLGE_TC_NUM_ONE_DESC,
779 rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low);
780
781 cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
782 hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
783 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
784 hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
785 ret = hclge_cmd_send(&hdev->hw, desc, 2);
786 if (ret)
787 goto err_qos_cmd_send;
788
789 dev_info(&hdev->pdev->dev, "\n");
790 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
791 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
792 dev_info(&hdev->pdev->dev,
793 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
794 rx_com_thrd->com_thrd[i].high,
795 rx_com_thrd->com_thrd[i].low);
796
797 rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
798 for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
799 dev_info(&hdev->pdev->dev,
800 "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
801 i + HCLGE_TC_NUM_ONE_DESC,
802 rx_com_thrd->com_thrd[i].high,
803 rx_com_thrd->com_thrd[i].low);
804 return;
805
806 err_qos_cmd_send:
807 dev_err(&hdev->pdev->dev,
808 "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
809 }
810
811 static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
812 {
813 struct hclge_mac_ethertype_idx_rd_cmd *req0;
814 char printf_buf[HCLGE_DBG_BUF_LEN];
815 struct hclge_desc desc;
816 int ret, i;
817
818 dev_info(&hdev->pdev->dev, "mng tab:\n");
819 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
820 strncat(printf_buf,
821 "entry|mac_addr |mask|ether|mask|vlan|mask",
822 HCLGE_DBG_BUF_LEN - 1);
823 strncat(printf_buf + strlen(printf_buf),
824 "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
825 HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
826
827 dev_info(&hdev->pdev->dev, "%s", printf_buf);
828
829 for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
830 hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
831 true);
832 req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
833 req0->index = cpu_to_le16(i);
834
835 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
836 if (ret) {
837 dev_err(&hdev->pdev->dev,
838 "call hclge_cmd_send fail, ret = %d\n", ret);
839 return;
840 }
841
842 if (!req0->resp_code)
843 continue;
844
845 memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
846 snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
847 "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
848 req0->index, req0->mac_addr[0], req0->mac_addr[1],
849 req0->mac_addr[2], req0->mac_addr[3],
850 req0->mac_addr[4], req0->mac_addr[5]);
851
852 snprintf(printf_buf + strlen(printf_buf),
853 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
854 "%x |%04x |%x |%04x|%x |%02x |%02x |",
855 !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
856 req0->ethter_type,
857 !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
858 req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG,
859 !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
860 req0->i_port_bitmap, req0->i_port_direction);
861
862 snprintf(printf_buf + strlen(printf_buf),
863 HCLGE_DBG_BUF_LEN - strlen(printf_buf),
864 "%d |%d |%02d |%04d|%x\n",
865 !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B),
866 req0->egress_port & HCLGE_DBG_MNG_PF_ID,
867 (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
868 req0->egress_queue,
869 !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B));
870
871 dev_info(&hdev->pdev->dev, "%s", printf_buf);
872 }
873 }
874
875 static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
876 bool sel_x, u32 loc)
877 {
878 struct hclge_fd_tcam_config_1_cmd *req1;
879 struct hclge_fd_tcam_config_2_cmd *req2;
880 struct hclge_fd_tcam_config_3_cmd *req3;
881 struct hclge_desc desc[3];
882 int ret, i;
883 u32 *req;
884
885 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
886 desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
887 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
888 desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
889 hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
890
891 req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
892 req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
893 req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
894
895 req1->stage = stage;
896 req1->xy_sel = sel_x ? 1 : 0;
897 req1->index = cpu_to_le32(loc);
898
899 ret = hclge_cmd_send(&hdev->hw, desc, 3);
900 if (ret)
901 return;
902
903 dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
904 sel_x ? "x" : "y", loc);
905
906
907 req = (u32 *)req1->tcam_data;
908 for (i = 0; i < 2; i++)
909 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
910
911
912 req = (u32 *)req2->tcam_data;
913 for (i = 0; i < 6; i++)
914 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
915
916
917 req = (u32 *)req3->tcam_data;
918 for (i = 0; i < 5; i++)
919 dev_info(&hdev->pdev->dev, "%08x\n", *req++);
920 }
921
922 static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
923 {
924 u32 i;
925
926 for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) {
927 hclge_dbg_fd_tcam_read(hdev, 0, true, i);
928 hclge_dbg_fd_tcam_read(hdev, 0, false, i);
929 }
930 }
931
932 static void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
933 {
934 dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
935 hdev->rst_stats.pf_rst_cnt);
936 dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
937 hdev->rst_stats.flr_rst_cnt);
938 dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
939 hdev->rst_stats.global_rst_cnt);
940 dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
941 hdev->rst_stats.imp_rst_cnt);
942 dev_info(&hdev->pdev->dev, "reset done count: %u\n",
943 hdev->rst_stats.reset_done_cnt);
944 dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
945 hdev->rst_stats.hw_reset_done_cnt);
946 dev_info(&hdev->pdev->dev, "reset count: %u\n",
947 hdev->rst_stats.reset_cnt);
948 dev_info(&hdev->pdev->dev, "reset count: %u\n",
949 hdev->rst_stats.reset_cnt);
950 dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
951 hdev->rst_stats.reset_fail_cnt);
952 dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
953 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
954 dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
955 hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
956 dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
957 hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
958 dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
959 hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
960 dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
961 hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
962 dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
963 hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
964 }
965
966 static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
967 {
968 struct hclge_desc *desc_src, *desc_tmp;
969 struct hclge_get_m7_bd_cmd *req;
970 struct hclge_desc desc;
971 u32 bd_num, buf_len;
972 int ret, i;
973
974 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
975
976 req = (struct hclge_get_m7_bd_cmd *)desc.data;
977 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
978 if (ret) {
979 dev_err(&hdev->pdev->dev,
980 "get firmware statistics bd number failed, ret = %d\n",
981 ret);
982 return;
983 }
984
985 bd_num = le32_to_cpu(req->bd_num);
986
987 buf_len = sizeof(struct hclge_desc) * bd_num;
988 desc_src = kzalloc(buf_len, GFP_KERNEL);
989 if (!desc_src) {
990 dev_err(&hdev->pdev->dev,
991 "allocate desc for get_m7_stats failed\n");
992 return;
993 }
994
995 desc_tmp = desc_src;
996 ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
997 HCLGE_OPC_M7_STATS_INFO);
998 if (ret) {
999 kfree(desc_src);
1000 dev_err(&hdev->pdev->dev,
1001 "get firmware statistics failed, ret = %d\n", ret);
1002 return;
1003 }
1004
1005 for (i = 0; i < bd_num; i++) {
1006 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1007 le32_to_cpu(desc_tmp->data[0]),
1008 le32_to_cpu(desc_tmp->data[1]),
1009 le32_to_cpu(desc_tmp->data[2]));
1010 dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
1011 le32_to_cpu(desc_tmp->data[3]),
1012 le32_to_cpu(desc_tmp->data[4]),
1013 le32_to_cpu(desc_tmp->data[5]));
1014
1015 desc_tmp++;
1016 }
1017
1018 kfree(desc_src);
1019 }
1020
1021 #define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
1022
1023 static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
1024 struct hclge_desc *desc, int *offset,
1025 int *length)
1026 {
1027 #define HCLGE_CMD_DATA_NUM 6
1028
1029 int i;
1030 int j;
1031
1032 for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
1033 for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
1034 if (i == 0 && j == 0)
1035 continue;
1036
1037 dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
1038 *offset,
1039 le32_to_cpu(desc[i].data[j]));
1040 *offset += sizeof(u32);
1041 *length -= sizeof(u32);
1042 if (*length <= 0)
1043 return;
1044 }
1045 }
1046 }
1047
1048
1049
1050
1051
1052 static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
1053 const char *cmd_buf)
1054 {
1055 #define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
1056 #define HCLGE_MAX_NCL_CONFIG_LENGTH (20 + 24 * 4)
1057
1058 struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
1059 int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
1060 int offset;
1061 int length;
1062 int data0;
1063 int ret;
1064
1065 ret = sscanf(cmd_buf, "%x %x", &offset, &length);
1066 if (ret != 2 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
1067 length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
1068 dev_err(&hdev->pdev->dev, "Invalid offset or length.\n");
1069 return;
1070 }
1071 if (offset < 0 || length <= 0) {
1072 dev_err(&hdev->pdev->dev, "Non-positive offset or length.\n");
1073 return;
1074 }
1075
1076 dev_info(&hdev->pdev->dev, "offset | data\n");
1077
1078 while (length > 0) {
1079 data0 = offset;
1080 if (length >= HCLGE_MAX_NCL_CONFIG_LENGTH)
1081 data0 |= HCLGE_MAX_NCL_CONFIG_LENGTH << 16;
1082 else
1083 data0 |= length << 16;
1084 ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
1085 HCLGE_OPC_QUERY_NCL_CONFIG);
1086 if (ret)
1087 return;
1088
1089 hclge_ncl_config_data_print(hdev, desc, &offset, &length);
1090 }
1091 }
1092
1093
1094
1095
1096 static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
1097 {
1098 #define HCLGE_BILLION_NANO_SECONDS 1000000000
1099
1100 struct hclge_mac_tnl_stats stats;
1101 unsigned long rem_nsec;
1102
1103 dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
1104
1105 while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
1106 rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
1107 dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
1108 (unsigned long)stats.time, rem_nsec / 1000,
1109 stats.status);
1110 }
1111 }
1112
1113 int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
1114 {
1115 #define DUMP_REG "dump reg"
1116 #define DUMP_TM_MAP "dump tm map"
1117
1118 struct hclge_vport *vport = hclge_get_vport(handle);
1119 struct hclge_dev *hdev = vport->back;
1120
1121 if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
1122 hclge_dbg_fd_tcam(hdev);
1123 } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
1124 hclge_dbg_dump_tc(hdev);
1125 } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
1126 hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
1127 } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
1128 hclge_dbg_dump_tm(hdev);
1129 } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
1130 hclge_dbg_dump_qos_pause_cfg(hdev);
1131 } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
1132 hclge_dbg_dump_qos_pri_map(hdev);
1133 } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
1134 hclge_dbg_dump_qos_buf_cfg(hdev);
1135 } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
1136 hclge_dbg_dump_mng_table(hdev);
1137 } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
1138 hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
1139 } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
1140 hclge_dbg_dump_rst_info(hdev);
1141 } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
1142 hclge_dbg_get_m7_stats_info(hdev);
1143 } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
1144 hclge_dbg_dump_ncl_config(hdev,
1145 &cmd_buf[sizeof("dump ncl_config")]);
1146 } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
1147 hclge_dbg_dump_mac_tnl_status(hdev);
1148 } else {
1149 dev_info(&hdev->pdev->dev, "unknown command\n");
1150 return -EINVAL;
1151 }
1152
1153 return 0;
1154 }