This source file includes following definitions.
- mlx5_register_debugfs
- mlx5_unregister_debugfs
- mlx5_qp_debugfs_init
- mlx5_qp_debugfs_cleanup
- mlx5_eq_debugfs_init
- mlx5_eq_debugfs_cleanup
- average_read
- average_write
- mlx5_cmdif_debugfs_init
- mlx5_cmdif_debugfs_cleanup
- mlx5_cq_debugfs_init
- mlx5_cq_debugfs_cleanup
- qp_read_field
- mlx5_core_eq_query
- eq_read_field
- cq_read_field
- dbg_read
- add_res_tree
- rem_res_tree
- mlx5_debug_qp_add
- mlx5_debug_qp_remove
- mlx5_debug_eq_add
- mlx5_debug_eq_remove
- mlx5_debug_cq_add
- mlx5_debug_cq_remove
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/module.h>
34 #include <linux/debugfs.h>
35 #include <linux/mlx5/qp.h>
36 #include <linux/mlx5/cq.h>
37 #include <linux/mlx5/driver.h>
38 #include "mlx5_core.h"
39 #include "lib/eq.h"
40
41 enum {
42 QP_PID,
43 QP_STATE,
44 QP_XPORT,
45 QP_MTU,
46 QP_N_RECV,
47 QP_RECV_SZ,
48 QP_N_SEND,
49 QP_LOG_PG_SZ,
50 QP_RQPN,
51 };
52
53 static char *qp_fields[] = {
54 [QP_PID] = "pid",
55 [QP_STATE] = "state",
56 [QP_XPORT] = "transport",
57 [QP_MTU] = "mtu",
58 [QP_N_RECV] = "num_recv",
59 [QP_RECV_SZ] = "rcv_wqe_sz",
60 [QP_N_SEND] = "num_send",
61 [QP_LOG_PG_SZ] = "log2_page_sz",
62 [QP_RQPN] = "remote_qpn",
63 };
64
65 enum {
66 EQ_NUM_EQES,
67 EQ_INTR,
68 EQ_LOG_PG_SZ,
69 };
70
71 static char *eq_fields[] = {
72 [EQ_NUM_EQES] = "num_eqes",
73 [EQ_INTR] = "intr",
74 [EQ_LOG_PG_SZ] = "log_page_size",
75 };
76
77 enum {
78 CQ_PID,
79 CQ_NUM_CQES,
80 CQ_LOG_PG_SZ,
81 };
82
83 static char *cq_fields[] = {
84 [CQ_PID] = "pid",
85 [CQ_NUM_CQES] = "num_cqes",
86 [CQ_LOG_PG_SZ] = "log_page_size",
87 };
88
89 struct dentry *mlx5_debugfs_root;
90 EXPORT_SYMBOL(mlx5_debugfs_root);
91
92 void mlx5_register_debugfs(void)
93 {
94 mlx5_debugfs_root = debugfs_create_dir("mlx5", NULL);
95 }
96
97 void mlx5_unregister_debugfs(void)
98 {
99 debugfs_remove(mlx5_debugfs_root);
100 }
101
102 void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
103 {
104 atomic_set(&dev->num_qps, 0);
105
106 dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
107 }
108
109 void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
110 {
111 debugfs_remove_recursive(dev->priv.qp_debugfs);
112 }
113
114 void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
115 {
116 dev->priv.eq_debugfs = debugfs_create_dir("EQs", dev->priv.dbg_root);
117 }
118
119 void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev)
120 {
121 debugfs_remove_recursive(dev->priv.eq_debugfs);
122 }
123
124 static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
125 loff_t *pos)
126 {
127 struct mlx5_cmd_stats *stats;
128 u64 field = 0;
129 int ret;
130 char tbuf[22];
131
132 stats = filp->private_data;
133 spin_lock_irq(&stats->lock);
134 if (stats->n)
135 field = div64_u64(stats->sum, stats->n);
136 spin_unlock_irq(&stats->lock);
137 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
138 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
139 }
140
141 static ssize_t average_write(struct file *filp, const char __user *buf,
142 size_t count, loff_t *pos)
143 {
144 struct mlx5_cmd_stats *stats;
145
146 stats = filp->private_data;
147 spin_lock_irq(&stats->lock);
148 stats->sum = 0;
149 stats->n = 0;
150 spin_unlock_irq(&stats->lock);
151
152 *pos += count;
153
154 return count;
155 }
156
157 static const struct file_operations stats_fops = {
158 .owner = THIS_MODULE,
159 .open = simple_open,
160 .read = average_read,
161 .write = average_write,
162 };
163
164 void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
165 {
166 struct mlx5_cmd_stats *stats;
167 struct dentry **cmd;
168 const char *namep;
169 int i;
170
171 cmd = &dev->priv.cmdif_debugfs;
172 *cmd = debugfs_create_dir("commands", dev->priv.dbg_root);
173
174 for (i = 0; i < ARRAY_SIZE(dev->cmd.stats); i++) {
175 stats = &dev->cmd.stats[i];
176 namep = mlx5_command_str(i);
177 if (strcmp(namep, "unknown command opcode")) {
178 stats->root = debugfs_create_dir(namep, *cmd);
179
180 debugfs_create_file("average", 0400, stats->root, stats,
181 &stats_fops);
182 debugfs_create_u64("n", 0400, stats->root, &stats->n);
183 }
184 }
185 }
186
187 void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev)
188 {
189 debugfs_remove_recursive(dev->priv.cmdif_debugfs);
190 }
191
192 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev)
193 {
194 dev->priv.cq_debugfs = debugfs_create_dir("CQs", dev->priv.dbg_root);
195 }
196
197 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev)
198 {
199 debugfs_remove_recursive(dev->priv.cq_debugfs);
200 }
201
202 static u64 qp_read_field(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
203 int index, int *is_str)
204 {
205 int outlen = MLX5_ST_SZ_BYTES(query_qp_out);
206 struct mlx5_qp_context *ctx;
207 u64 param = 0;
208 u32 *out;
209 int err;
210 int no_sq;
211
212 out = kzalloc(outlen, GFP_KERNEL);
213 if (!out)
214 return param;
215
216 err = mlx5_core_qp_query(dev, qp, out, outlen);
217 if (err) {
218 mlx5_core_warn(dev, "failed to query qp err=%d\n", err);
219 goto out;
220 }
221
222 *is_str = 0;
223
224
225 ctx = (struct mlx5_qp_context *)MLX5_ADDR_OF(query_qp_out, out, qpc);
226
227 switch (index) {
228 case QP_PID:
229 param = qp->pid;
230 break;
231 case QP_STATE:
232 param = (unsigned long)mlx5_qp_state_str(be32_to_cpu(ctx->flags) >> 28);
233 *is_str = 1;
234 break;
235 case QP_XPORT:
236 param = (unsigned long)mlx5_qp_type_str((be32_to_cpu(ctx->flags) >> 16) & 0xff);
237 *is_str = 1;
238 break;
239 case QP_MTU:
240 switch (ctx->mtu_msgmax >> 5) {
241 case IB_MTU_256:
242 param = 256;
243 break;
244 case IB_MTU_512:
245 param = 512;
246 break;
247 case IB_MTU_1024:
248 param = 1024;
249 break;
250 case IB_MTU_2048:
251 param = 2048;
252 break;
253 case IB_MTU_4096:
254 param = 4096;
255 break;
256 default:
257 param = 0;
258 }
259 break;
260 case QP_N_RECV:
261 param = 1 << ((ctx->rq_size_stride >> 3) & 0xf);
262 break;
263 case QP_RECV_SZ:
264 param = 1 << ((ctx->rq_size_stride & 7) + 4);
265 break;
266 case QP_N_SEND:
267 no_sq = be16_to_cpu(ctx->sq_crq_size) >> 15;
268 if (!no_sq)
269 param = 1 << (be16_to_cpu(ctx->sq_crq_size) >> 11);
270 else
271 param = 0;
272 break;
273 case QP_LOG_PG_SZ:
274 param = (be32_to_cpu(ctx->log_pg_sz_remote_qpn) >> 24) & 0x1f;
275 param += 12;
276 break;
277 case QP_RQPN:
278 param = be32_to_cpu(ctx->log_pg_sz_remote_qpn) & 0xffffff;
279 break;
280 }
281
282 out:
283 kfree(out);
284 return param;
285 }
286
287 static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
288 u32 *out, int outlen)
289 {
290 u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {};
291
292 MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ);
293 MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
294 return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
295 }
296
297 static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
298 int index)
299 {
300 int outlen = MLX5_ST_SZ_BYTES(query_eq_out);
301 u64 param = 0;
302 void *ctx;
303 u32 *out;
304 int err;
305
306 out = kzalloc(outlen, GFP_KERNEL);
307 if (!out)
308 return param;
309
310 err = mlx5_core_eq_query(dev, eq, out, outlen);
311 if (err) {
312 mlx5_core_warn(dev, "failed to query eq\n");
313 goto out;
314 }
315 ctx = MLX5_ADDR_OF(query_eq_out, out, eq_context_entry);
316
317 switch (index) {
318 case EQ_NUM_EQES:
319 param = 1 << MLX5_GET(eqc, ctx, log_eq_size);
320 break;
321 case EQ_INTR:
322 param = MLX5_GET(eqc, ctx, intr);
323 break;
324 case EQ_LOG_PG_SZ:
325 param = MLX5_GET(eqc, ctx, log_page_size) + 12;
326 break;
327 }
328
329 out:
330 kfree(out);
331 return param;
332 }
333
334 static u64 cq_read_field(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
335 int index)
336 {
337 int outlen = MLX5_ST_SZ_BYTES(query_cq_out);
338 u64 param = 0;
339 void *ctx;
340 u32 *out;
341 int err;
342
343 out = kvzalloc(outlen, GFP_KERNEL);
344 if (!out)
345 return param;
346
347 err = mlx5_core_query_cq(dev, cq, out, outlen);
348 if (err) {
349 mlx5_core_warn(dev, "failed to query cq\n");
350 goto out;
351 }
352 ctx = MLX5_ADDR_OF(query_cq_out, out, cq_context);
353
354 switch (index) {
355 case CQ_PID:
356 param = cq->pid;
357 break;
358 case CQ_NUM_CQES:
359 param = 1 << MLX5_GET(cqc, ctx, log_cq_size);
360 break;
361 case CQ_LOG_PG_SZ:
362 param = MLX5_GET(cqc, ctx, log_page_size);
363 break;
364 }
365
366 out:
367 kvfree(out);
368 return param;
369 }
370
371 static ssize_t dbg_read(struct file *filp, char __user *buf, size_t count,
372 loff_t *pos)
373 {
374 struct mlx5_field_desc *desc;
375 struct mlx5_rsc_debug *d;
376 char tbuf[18];
377 int is_str = 0;
378 u64 field;
379 int ret;
380
381 desc = filp->private_data;
382 d = (void *)(desc - desc->i) - sizeof(*d);
383 switch (d->type) {
384 case MLX5_DBG_RSC_QP:
385 field = qp_read_field(d->dev, d->object, desc->i, &is_str);
386 break;
387
388 case MLX5_DBG_RSC_EQ:
389 field = eq_read_field(d->dev, d->object, desc->i);
390 break;
391
392 case MLX5_DBG_RSC_CQ:
393 field = cq_read_field(d->dev, d->object, desc->i);
394 break;
395
396 default:
397 mlx5_core_warn(d->dev, "invalid resource type %d\n", d->type);
398 return -EINVAL;
399 }
400
401 if (is_str)
402 ret = snprintf(tbuf, sizeof(tbuf), "%s\n", (const char *)(unsigned long)field);
403 else
404 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field);
405
406 return simple_read_from_buffer(buf, count, pos, tbuf, ret);
407 }
408
409 static const struct file_operations fops = {
410 .owner = THIS_MODULE,
411 .open = simple_open,
412 .read = dbg_read,
413 };
414
415 static int add_res_tree(struct mlx5_core_dev *dev, enum dbg_rsc_type type,
416 struct dentry *root, struct mlx5_rsc_debug **dbg,
417 int rsn, char **field, int nfile, void *data)
418 {
419 struct mlx5_rsc_debug *d;
420 char resn[32];
421 int i;
422
423 d = kzalloc(struct_size(d, fields, nfile), GFP_KERNEL);
424 if (!d)
425 return -ENOMEM;
426
427 d->dev = dev;
428 d->object = data;
429 d->type = type;
430 sprintf(resn, "0x%x", rsn);
431 d->root = debugfs_create_dir(resn, root);
432
433 for (i = 0; i < nfile; i++) {
434 d->fields[i].i = i;
435 debugfs_create_file(field[i], 0400, d->root, &d->fields[i],
436 &fops);
437 }
438 *dbg = d;
439
440 return 0;
441 }
442
443 static void rem_res_tree(struct mlx5_rsc_debug *d)
444 {
445 debugfs_remove_recursive(d->root);
446 kfree(d);
447 }
448
449 int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
450 {
451 int err;
452
453 if (!mlx5_debugfs_root)
454 return 0;
455
456 err = add_res_tree(dev, MLX5_DBG_RSC_QP, dev->priv.qp_debugfs,
457 &qp->dbg, qp->qpn, qp_fields,
458 ARRAY_SIZE(qp_fields), qp);
459 if (err)
460 qp->dbg = NULL;
461
462 return err;
463 }
464
465 void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
466 {
467 if (!mlx5_debugfs_root)
468 return;
469
470 if (qp->dbg)
471 rem_res_tree(qp->dbg);
472 }
473
474 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
475 {
476 int err;
477
478 if (!mlx5_debugfs_root)
479 return 0;
480
481 err = add_res_tree(dev, MLX5_DBG_RSC_EQ, dev->priv.eq_debugfs,
482 &eq->dbg, eq->eqn, eq_fields,
483 ARRAY_SIZE(eq_fields), eq);
484 if (err)
485 eq->dbg = NULL;
486
487 return err;
488 }
489
490 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
491 {
492 if (!mlx5_debugfs_root)
493 return;
494
495 if (eq->dbg)
496 rem_res_tree(eq->dbg);
497 }
498
499 int mlx5_debug_cq_add(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
500 {
501 int err;
502
503 if (!mlx5_debugfs_root)
504 return 0;
505
506 err = add_res_tree(dev, MLX5_DBG_RSC_CQ, dev->priv.cq_debugfs,
507 &cq->dbg, cq->cqn, cq_fields,
508 ARRAY_SIZE(cq_fields), cq);
509 if (err)
510 cq->dbg = NULL;
511
512 return err;
513 }
514
515 void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
516 {
517 if (!mlx5_debugfs_root)
518 return;
519
520 if (cq->dbg)
521 rem_res_tree(cq->dbg);
522 }