This source file includes following definitions.
- can_do_mad_ifc
- mlx5_MAD_IFC
- process_mad
- pma_cnt_ext_assign
- pma_cnt_assign
- process_pma_cmd
- mlx5_ib_process_mad
- mlx5_query_ext_port_caps
- mlx5_query_mad_ifc_smp_attr_node_info
- mlx5_query_mad_ifc_system_image_guid
- mlx5_query_mad_ifc_max_pkeys
- mlx5_query_mad_ifc_vendor_id
- mlx5_query_mad_ifc_node_desc
- mlx5_query_mad_ifc_node_guid
- mlx5_query_mad_ifc_pkey
- mlx5_query_mad_ifc_gids
- mlx5_query_mad_ifc_port
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <linux/mlx5/cmd.h>
34 #include <linux/mlx5/vport.h>
35 #include <rdma/ib_mad.h>
36 #include <rdma/ib_smi.h>
37 #include <rdma/ib_pma.h>
38 #include "mlx5_ib.h"
39 #include "cmd.h"
40
41 enum {
42 MLX5_IB_VENDOR_CLASS1 = 0x9,
43 MLX5_IB_VENDOR_CLASS2 = 0xa
44 };
45
46 static bool can_do_mad_ifc(struct mlx5_ib_dev *dev, u8 port_num,
47 struct ib_mad *in_mad)
48 {
49 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED &&
50 in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
51 return true;
52 return dev->mdev->port_caps[port_num - 1].has_smi;
53 }
54
55 static int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey,
56 int ignore_bkey, u8 port, const struct ib_wc *in_wc,
57 const struct ib_grh *in_grh, const void *in_mad,
58 void *response_mad)
59 {
60 u8 op_modifier = 0;
61
62 if (!can_do_mad_ifc(dev, port, (struct ib_mad *)in_mad))
63 return -EPERM;
64
65
66
67
68 if (ignore_mkey || !in_wc)
69 op_modifier |= 0x1;
70 if (ignore_bkey || !in_wc)
71 op_modifier |= 0x2;
72
73 return mlx5_cmd_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier,
74 port);
75 }
76
77 static int process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
78 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
79 const struct ib_mad *in_mad, struct ib_mad *out_mad)
80 {
81 u16 slid;
82 int err;
83
84 slid = in_wc ? ib_lid_cpu16(in_wc->slid) : be16_to_cpu(IB_LID_PERMISSIVE);
85
86 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
87 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
88
89 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
90 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
91 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
92 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET &&
93 in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS)
94 return IB_MAD_RESULT_SUCCESS;
95
96
97
98 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
99 return IB_MAD_RESULT_SUCCESS;
100 } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
101 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1 ||
102 in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2 ||
103 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
104 if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET &&
105 in_mad->mad_hdr.method != IB_MGMT_METHOD_SET)
106 return IB_MAD_RESULT_SUCCESS;
107 } else {
108 return IB_MAD_RESULT_SUCCESS;
109 }
110
111 err = mlx5_MAD_IFC(to_mdev(ibdev),
112 mad_flags & IB_MAD_IGNORE_MKEY,
113 mad_flags & IB_MAD_IGNORE_BKEY,
114 port_num, in_wc, in_grh, in_mad, out_mad);
115 if (err)
116 return IB_MAD_RESULT_FAILURE;
117
118
119 if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
120 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
121
122 if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
123
124 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
125
126 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
127 }
128
129 static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
130 void *out)
131 {
132 #define MLX5_SUM_CNT(p, cntr1, cntr2) \
133 (MLX5_GET64(query_vport_counter_out, p, cntr1) + \
134 MLX5_GET64(query_vport_counter_out, p, cntr2))
135
136 pma_cnt_ext->port_xmit_data =
137 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
138 transmitted_ib_multicast.octets) >> 2);
139 pma_cnt_ext->port_rcv_data =
140 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
141 received_ib_multicast.octets) >> 2);
142 pma_cnt_ext->port_xmit_packets =
143 cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.packets,
144 transmitted_ib_multicast.packets));
145 pma_cnt_ext->port_rcv_packets =
146 cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.packets,
147 received_ib_multicast.packets));
148 pma_cnt_ext->port_unicast_xmit_packets =
149 MLX5_GET64_BE(query_vport_counter_out,
150 out, transmitted_ib_unicast.packets);
151 pma_cnt_ext->port_unicast_rcv_packets =
152 MLX5_GET64_BE(query_vport_counter_out,
153 out, received_ib_unicast.packets);
154 pma_cnt_ext->port_multicast_xmit_packets =
155 MLX5_GET64_BE(query_vport_counter_out,
156 out, transmitted_ib_multicast.packets);
157 pma_cnt_ext->port_multicast_rcv_packets =
158 MLX5_GET64_BE(query_vport_counter_out,
159 out, received_ib_multicast.packets);
160 }
161
162 static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
163 void *out)
164 {
165
166
167
168 void *out_pma = MLX5_ADDR_OF(ppcnt_reg, out,
169 counter_set);
170
171 #define MLX5_ASSIGN_PMA_CNTR(counter_var, counter_name) { \
172 counter_var = MLX5_GET_BE(typeof(counter_var), \
173 ib_port_cntrs_grp_data_layout, \
174 out_pma, counter_name); \
175 }
176
177 MLX5_ASSIGN_PMA_CNTR(pma_cnt->symbol_error_counter,
178 symbol_error_counter);
179 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_error_recovery_counter,
180 link_error_recovery_counter);
181 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_downed_counter,
182 link_downed_counter);
183 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_errors,
184 port_rcv_errors);
185 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_remphys_errors,
186 port_rcv_remote_physical_errors);
187 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_switch_relay_errors,
188 port_rcv_switch_relay_errors);
189 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_discards,
190 port_xmit_discards);
191 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_constraint_errors,
192 port_xmit_constraint_errors);
193 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_xmit_wait,
194 port_xmit_wait);
195 MLX5_ASSIGN_PMA_CNTR(pma_cnt->port_rcv_constraint_errors,
196 port_rcv_constraint_errors);
197 MLX5_ASSIGN_PMA_CNTR(pma_cnt->link_overrun_errors,
198 link_overrun_errors);
199 MLX5_ASSIGN_PMA_CNTR(pma_cnt->vl15_dropped,
200 vl_15_dropped);
201 }
202
203 static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
204 const struct ib_mad *in_mad, struct ib_mad *out_mad)
205 {
206 struct mlx5_core_dev *mdev;
207 bool native_port = true;
208 u8 mdev_port_num;
209 void *out_cnt;
210 int err;
211
212 mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
213 if (!mdev) {
214
215
216
217
218 native_port = false;
219 mdev = dev->mdev;
220 mdev_port_num = 1;
221 }
222
223 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
224 struct ib_class_port_info cpi = {};
225
226 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
227 memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
228 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
229 goto done;
230 }
231
232 if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
233 struct ib_pma_portcounters_ext *pma_cnt_ext =
234 (struct ib_pma_portcounters_ext *)(out_mad->data + 40);
235 int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
236
237 out_cnt = kvzalloc(sz, GFP_KERNEL);
238 if (!out_cnt) {
239 err = IB_MAD_RESULT_FAILURE;
240 goto done;
241 }
242
243 err = mlx5_core_query_vport_counter(mdev, 0, 0,
244 mdev_port_num, out_cnt, sz);
245 if (!err)
246 pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
247 } else {
248 struct ib_pma_portcounters *pma_cnt =
249 (struct ib_pma_portcounters *)(out_mad->data + 40);
250 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
251
252 out_cnt = kvzalloc(sz, GFP_KERNEL);
253 if (!out_cnt) {
254 err = IB_MAD_RESULT_FAILURE;
255 goto done;
256 }
257
258 err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
259 out_cnt, sz);
260 if (!err)
261 pma_cnt_assign(pma_cnt, out_cnt);
262 }
263 kvfree(out_cnt);
264 err = err ? IB_MAD_RESULT_FAILURE :
265 IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
266 done:
267 if (native_port)
268 mlx5_ib_put_native_port_mdev(dev, port_num);
269 return err;
270 }
271
272 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
273 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
274 const struct ib_mad_hdr *in, size_t in_mad_size,
275 struct ib_mad_hdr *out, size_t *out_mad_size,
276 u16 *out_mad_pkey_index)
277 {
278 struct mlx5_ib_dev *dev = to_mdev(ibdev);
279 const struct ib_mad *in_mad = (const struct ib_mad *)in;
280 struct ib_mad *out_mad = (struct ib_mad *)out;
281 int ret;
282
283 if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
284 *out_mad_size != sizeof(*out_mad)))
285 return IB_MAD_RESULT_FAILURE;
286
287 memset(out_mad->data, 0, sizeof(out_mad->data));
288
289 if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
290 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
291 in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
292 ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
293 } else {
294 ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
295 in_mad, out_mad);
296 }
297 return ret;
298 }
299
300 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
301 {
302 struct ib_smp *in_mad = NULL;
303 struct ib_smp *out_mad = NULL;
304 int err = -ENOMEM;
305 u16 packet_error;
306
307 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
308 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
309 if (!in_mad || !out_mad)
310 goto out;
311
312 init_query_mad(in_mad);
313 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
314 in_mad->attr_mod = cpu_to_be32(port);
315
316 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
317
318 packet_error = be16_to_cpu(out_mad->status);
319
320 dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
321 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
322
323 out:
324 kfree(in_mad);
325 kfree(out_mad);
326 return err;
327 }
328
329 int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
330 struct ib_smp *out_mad)
331 {
332 struct ib_smp *in_mad = NULL;
333 int err = -ENOMEM;
334
335 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
336 if (!in_mad)
337 return -ENOMEM;
338
339 init_query_mad(in_mad);
340 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
341
342 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
343 out_mad);
344
345 kfree(in_mad);
346 return err;
347 }
348
349 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
350 __be64 *sys_image_guid)
351 {
352 struct ib_smp *out_mad = NULL;
353 int err = -ENOMEM;
354
355 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
356 if (!out_mad)
357 return -ENOMEM;
358
359 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
360 if (err)
361 goto out;
362
363 memcpy(sys_image_guid, out_mad->data + 4, 8);
364
365 out:
366 kfree(out_mad);
367
368 return err;
369 }
370
371 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
372 u16 *max_pkeys)
373 {
374 struct ib_smp *out_mad = NULL;
375 int err = -ENOMEM;
376
377 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
378 if (!out_mad)
379 return -ENOMEM;
380
381 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
382 if (err)
383 goto out;
384
385 *max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
386
387 out:
388 kfree(out_mad);
389
390 return err;
391 }
392
393 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
394 u32 *vendor_id)
395 {
396 struct ib_smp *out_mad = NULL;
397 int err = -ENOMEM;
398
399 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
400 if (!out_mad)
401 return -ENOMEM;
402
403 err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
404 if (err)
405 goto out;
406
407 *vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
408
409 out:
410 kfree(out_mad);
411
412 return err;
413 }
414
415 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
416 {
417 struct ib_smp *in_mad = NULL;
418 struct ib_smp *out_mad = NULL;
419 int err = -ENOMEM;
420
421 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
422 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
423 if (!in_mad || !out_mad)
424 goto out;
425
426 init_query_mad(in_mad);
427 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
428
429 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
430 if (err)
431 goto out;
432
433 memcpy(node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
434 out:
435 kfree(in_mad);
436 kfree(out_mad);
437 return err;
438 }
439
440 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
441 {
442 struct ib_smp *in_mad = NULL;
443 struct ib_smp *out_mad = NULL;
444 int err = -ENOMEM;
445
446 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
447 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
448 if (!in_mad || !out_mad)
449 goto out;
450
451 init_query_mad(in_mad);
452 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
453
454 err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
455 if (err)
456 goto out;
457
458 memcpy(node_guid, out_mad->data + 12, 8);
459 out:
460 kfree(in_mad);
461 kfree(out_mad);
462 return err;
463 }
464
465 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
466 u16 *pkey)
467 {
468 struct ib_smp *in_mad = NULL;
469 struct ib_smp *out_mad = NULL;
470 int err = -ENOMEM;
471
472 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
473 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
474 if (!in_mad || !out_mad)
475 goto out;
476
477 init_query_mad(in_mad);
478 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
479 in_mad->attr_mod = cpu_to_be32(index / 32);
480
481 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
482 out_mad);
483 if (err)
484 goto out;
485
486 *pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
487
488 out:
489 kfree(in_mad);
490 kfree(out_mad);
491 return err;
492 }
493
494 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
495 union ib_gid *gid)
496 {
497 struct ib_smp *in_mad = NULL;
498 struct ib_smp *out_mad = NULL;
499 int err = -ENOMEM;
500
501 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
502 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
503 if (!in_mad || !out_mad)
504 goto out;
505
506 init_query_mad(in_mad);
507 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
508 in_mad->attr_mod = cpu_to_be32(port);
509
510 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
511 out_mad);
512 if (err)
513 goto out;
514
515 memcpy(gid->raw, out_mad->data + 8, 8);
516
517 init_query_mad(in_mad);
518 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
519 in_mad->attr_mod = cpu_to_be32(index / 8);
520
521 err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
522 out_mad);
523 if (err)
524 goto out;
525
526 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
527
528 out:
529 kfree(in_mad);
530 kfree(out_mad);
531 return err;
532 }
533
534 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
535 struct ib_port_attr *props)
536 {
537 struct mlx5_ib_dev *dev = to_mdev(ibdev);
538 struct mlx5_core_dev *mdev = dev->mdev;
539 struct ib_smp *in_mad = NULL;
540 struct ib_smp *out_mad = NULL;
541 int ext_active_speed;
542 int err = -ENOMEM;
543
544 in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
545 out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
546 if (!in_mad || !out_mad)
547 goto out;
548
549
550
551 init_query_mad(in_mad);
552 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
553 in_mad->attr_mod = cpu_to_be32(port);
554
555 err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
556 if (err) {
557 mlx5_ib_warn(dev, "err %d\n", err);
558 goto out;
559 }
560
561 props->lid = be16_to_cpup((__be16 *)(out_mad->data + 16));
562 props->lmc = out_mad->data[34] & 0x7;
563 props->sm_lid = be16_to_cpup((__be16 *)(out_mad->data + 18));
564 props->sm_sl = out_mad->data[36] & 0xf;
565 props->state = out_mad->data[32] & 0xf;
566 props->phys_state = out_mad->data[33] >> 4;
567 props->port_cap_flags = be32_to_cpup((__be32 *)(out_mad->data + 20));
568 props->gid_tbl_len = out_mad->data[50];
569 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
570 props->pkey_tbl_len = mdev->port_caps[port - 1].pkey_table_len;
571 props->bad_pkey_cntr = be16_to_cpup((__be16 *)(out_mad->data + 46));
572 props->qkey_viol_cntr = be16_to_cpup((__be16 *)(out_mad->data + 48));
573 props->active_width = out_mad->data[31] & 0xf;
574 props->active_speed = out_mad->data[35] >> 4;
575 props->max_mtu = out_mad->data[41] & 0xf;
576 props->active_mtu = out_mad->data[36] >> 4;
577 props->subnet_timeout = out_mad->data[51] & 0x1f;
578 props->max_vl_num = out_mad->data[37] >> 4;
579 props->init_type_reply = out_mad->data[41] >> 4;
580
581 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) {
582 props->port_cap_flags2 =
583 be16_to_cpup((__be16 *)(out_mad->data + 60));
584
585 if (props->port_cap_flags2 & IB_PORT_LINK_WIDTH_2X_SUP)
586 props->active_width = out_mad->data[31] & 0x1f;
587 }
588
589
590 if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
591 ext_active_speed = out_mad->data[62] >> 4;
592
593 switch (ext_active_speed) {
594 case 1:
595 props->active_speed = 16;
596 break;
597 case 2:
598 props->active_speed = 32;
599 break;
600 case 4:
601 if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
602 props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
603 props->active_speed = IB_SPEED_HDR;
604 break;
605 }
606 }
607
608
609 if (props->active_speed == 4) {
610 if (mdev->port_caps[port - 1].ext_port_cap &
611 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
612 init_query_mad(in_mad);
613 in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
614 in_mad->attr_mod = cpu_to_be32(port);
615
616 err = mlx5_MAD_IFC(dev, 1, 1, port,
617 NULL, NULL, in_mad, out_mad);
618 if (err)
619 goto out;
620
621
622 if (out_mad->data[15] & 0x1)
623 props->active_speed = 8;
624 }
625 }
626
627 out:
628 kfree(in_mad);
629 kfree(out_mad);
630
631 return err;
632 }