1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses.  You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 *     Redistribution and use in source and binary forms, with or
11 *     without modification, are permitted provided that the following
12 *     conditions are met:
13 *
14 *      - Redistributions of source code must retain the above
15 *        copyright notice, this list of conditions and the following
16 *        disclaimer.
17 *
18 *      - Redistributions in binary form must reproduce the above
19 *        copyright notice, this list of conditions and the following
20 *        disclaimer in the documentation and/or other materials
21 *        provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/cmd.h>
34#include <rdma/ib_mad.h>
35#include <rdma/ib_smi.h>
36#include "mlx5_ib.h"
37
38enum {
39	MLX5_IB_VENDOR_CLASS1 = 0x9,
40	MLX5_IB_VENDOR_CLASS2 = 0xa
41};
42
43int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
44		 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
45		 const void *in_mad, void *response_mad)
46{
47	u8 op_modifier = 0;
48
49	/* Key check traps can't be generated unless we have in_wc to
50	 * tell us where to send the trap.
51	 */
52	if (ignore_mkey || !in_wc)
53		op_modifier |= 0x1;
54	if (ignore_bkey || !in_wc)
55		op_modifier |= 0x2;
56
57	return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
58}
59
60int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
61			const struct ib_wc *in_wc, const struct ib_grh *in_grh,
62			const struct ib_mad_hdr *in, size_t in_mad_size,
63			struct ib_mad_hdr *out, size_t *out_mad_size,
64			u16 *out_mad_pkey_index)
65{
66	u16 slid;
67	int err;
68	const struct ib_mad *in_mad = (const struct ib_mad *)in;
69	struct ib_mad *out_mad = (struct ib_mad *)out;
70
71	if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
72			 *out_mad_size != sizeof(*out_mad)))
73		return IB_MAD_RESULT_FAILURE;
74
75	slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
76
77	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0)
78		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
79
80	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
81	    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
82		if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
83		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
84		    in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
85			return IB_MAD_RESULT_SUCCESS;
86
87		/* Don't process SMInfo queries -- the SMA can't handle them.
88		 */
89		if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
90			return IB_MAD_RESULT_SUCCESS;
91	} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
92		   in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS1   ||
93		   in_mad->mad_hdr.mgmt_class == MLX5_IB_VENDOR_CLASS2   ||
94		   in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
95		if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
96		    in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
97			return IB_MAD_RESULT_SUCCESS;
98	} else {
99		return IB_MAD_RESULT_SUCCESS;
100	}
101
102	err = mlx5_MAD_IFC(to_mdev(ibdev),
103			   mad_flags & IB_MAD_IGNORE_MKEY,
104			   mad_flags & IB_MAD_IGNORE_BKEY,
105			   port_num, in_wc, in_grh, in_mad, out_mad);
106	if (err)
107		return IB_MAD_RESULT_FAILURE;
108
109	/* set return bit in status of directed route responses */
110	if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
111		out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
112
113	if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
114		/* no response for trap repress */
115		return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
116
117	return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
118}
119
120int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
121{
122	struct ib_smp *in_mad  = NULL;
123	struct ib_smp *out_mad = NULL;
124	int err = -ENOMEM;
125	u16 packet_error;
126
127	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
128	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
129	if (!in_mad || !out_mad)
130		goto out;
131
132	init_query_mad(in_mad);
133	in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
134	in_mad->attr_mod = cpu_to_be32(port);
135
136	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
137
138	packet_error = be16_to_cpu(out_mad->status);
139
140	dev->mdev->port_caps[port - 1].ext_port_cap = (!err && !packet_error) ?
141		MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
142
143out:
144	kfree(in_mad);
145	kfree(out_mad);
146	return err;
147}
148
149int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
150					  struct ib_smp *out_mad)
151{
152	struct ib_smp *in_mad = NULL;
153	int err = -ENOMEM;
154
155	in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL);
156	if (!in_mad)
157		return -ENOMEM;
158
159	init_query_mad(in_mad);
160	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
161
162	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
163			   out_mad);
164
165	kfree(in_mad);
166	return err;
167}
168
169int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
170					 __be64 *sys_image_guid)
171{
172	struct ib_smp *out_mad = NULL;
173	int err = -ENOMEM;
174
175	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
176	if (!out_mad)
177		return -ENOMEM;
178
179	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
180	if (err)
181		goto out;
182
183	memcpy(sys_image_guid, out_mad->data + 4, 8);
184
185out:
186	kfree(out_mad);
187
188	return err;
189}
190
191int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
192				 u16 *max_pkeys)
193{
194	struct ib_smp *out_mad = NULL;
195	int err = -ENOMEM;
196
197	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
198	if (!out_mad)
199		return -ENOMEM;
200
201	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
202	if (err)
203		goto out;
204
205	*max_pkeys = be16_to_cpup((__be16 *)(out_mad->data + 28));
206
207out:
208	kfree(out_mad);
209
210	return err;
211}
212
213int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
214				 u32 *vendor_id)
215{
216	struct ib_smp *out_mad = NULL;
217	int err = -ENOMEM;
218
219	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
220	if (!out_mad)
221		return -ENOMEM;
222
223	err = mlx5_query_mad_ifc_smp_attr_node_info(ibdev, out_mad);
224	if (err)
225		goto out;
226
227	*vendor_id = be32_to_cpup((__be32 *)(out_mad->data + 36)) & 0xffff;
228
229out:
230	kfree(out_mad);
231
232	return err;
233}
234
235int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
236{
237	struct ib_smp *in_mad  = NULL;
238	struct ib_smp *out_mad = NULL;
239	int err = -ENOMEM;
240
241	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
242	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
243	if (!in_mad || !out_mad)
244		goto out;
245
246	init_query_mad(in_mad);
247	in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
248
249	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
250	if (err)
251		goto out;
252
253	memcpy(node_desc, out_mad->data, 64);
254out:
255	kfree(in_mad);
256	kfree(out_mad);
257	return err;
258}
259
260int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
261{
262	struct ib_smp *in_mad  = NULL;
263	struct ib_smp *out_mad = NULL;
264	int err = -ENOMEM;
265
266	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
267	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
268	if (!in_mad || !out_mad)
269		goto out;
270
271	init_query_mad(in_mad);
272	in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
273
274	err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
275	if (err)
276		goto out;
277
278	memcpy(node_guid, out_mad->data + 12, 8);
279out:
280	kfree(in_mad);
281	kfree(out_mad);
282	return err;
283}
284
285int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u8 port, u16 index,
286			    u16 *pkey)
287{
288	struct ib_smp *in_mad  = NULL;
289	struct ib_smp *out_mad = NULL;
290	int err = -ENOMEM;
291
292	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
293	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
294	if (!in_mad || !out_mad)
295		goto out;
296
297	init_query_mad(in_mad);
298	in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
299	in_mad->attr_mod = cpu_to_be32(index / 32);
300
301	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
302			   out_mad);
303	if (err)
304		goto out;
305
306	*pkey = be16_to_cpu(((__be16 *)out_mad->data)[index % 32]);
307
308out:
309	kfree(in_mad);
310	kfree(out_mad);
311	return err;
312}
313
314int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u8 port, int index,
315			    union ib_gid *gid)
316{
317	struct ib_smp *in_mad  = NULL;
318	struct ib_smp *out_mad = NULL;
319	int err = -ENOMEM;
320
321	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
322	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
323	if (!in_mad || !out_mad)
324		goto out;
325
326	init_query_mad(in_mad);
327	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
328	in_mad->attr_mod = cpu_to_be32(port);
329
330	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
331			   out_mad);
332	if (err)
333		goto out;
334
335	memcpy(gid->raw, out_mad->data + 8, 8);
336
337	init_query_mad(in_mad);
338	in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
339	in_mad->attr_mod = cpu_to_be32(index / 8);
340
341	err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad,
342			   out_mad);
343	if (err)
344		goto out;
345
346	memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
347
348out:
349	kfree(in_mad);
350	kfree(out_mad);
351	return err;
352}
353
354int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u8 port,
355			    struct ib_port_attr *props)
356{
357	struct mlx5_ib_dev *dev = to_mdev(ibdev);
358	struct mlx5_core_dev *mdev = dev->mdev;
359	struct ib_smp *in_mad  = NULL;
360	struct ib_smp *out_mad = NULL;
361	int ext_active_speed;
362	int err = -ENOMEM;
363
364	if (port < 1 || port > MLX5_CAP_GEN(mdev, num_ports)) {
365		mlx5_ib_warn(dev, "invalid port number %d\n", port);
366		return -EINVAL;
367	}
368
369	in_mad  = kzalloc(sizeof(*in_mad), GFP_KERNEL);
370	out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL);
371	if (!in_mad || !out_mad)
372		goto out;
373
374	memset(props, 0, sizeof(*props));
375
376	init_query_mad(in_mad);
377	in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
378	in_mad->attr_mod = cpu_to_be32(port);
379
380	err = mlx5_MAD_IFC(dev, 1, 1, port, NULL, NULL, in_mad, out_mad);
381	if (err) {
382		mlx5_ib_warn(dev, "err %d\n", err);
383		goto out;
384	}
385
386	props->lid		= be16_to_cpup((__be16 *)(out_mad->data + 16));
387	props->lmc		= out_mad->data[34] & 0x7;
388	props->sm_lid		= be16_to_cpup((__be16 *)(out_mad->data + 18));
389	props->sm_sl		= out_mad->data[36] & 0xf;
390	props->state		= out_mad->data[32] & 0xf;
391	props->phys_state	= out_mad->data[33] >> 4;
392	props->port_cap_flags	= be32_to_cpup((__be32 *)(out_mad->data + 20));
393	props->gid_tbl_len	= out_mad->data[50];
394	props->max_msg_sz	= 1 << MLX5_CAP_GEN(mdev, log_max_msg);
395	props->pkey_tbl_len	= mdev->port_caps[port - 1].pkey_table_len;
396	props->bad_pkey_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 46));
397	props->qkey_viol_cntr	= be16_to_cpup((__be16 *)(out_mad->data + 48));
398	props->active_width	= out_mad->data[31] & 0xf;
399	props->active_speed	= out_mad->data[35] >> 4;
400	props->max_mtu		= out_mad->data[41] & 0xf;
401	props->active_mtu	= out_mad->data[36] >> 4;
402	props->subnet_timeout	= out_mad->data[51] & 0x1f;
403	props->max_vl_num	= out_mad->data[37] >> 4;
404	props->init_type_reply	= out_mad->data[41] >> 4;
405
406	/* Check if extended speeds (EDR/FDR/...) are supported */
407	if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) {
408		ext_active_speed = out_mad->data[62] >> 4;
409
410		switch (ext_active_speed) {
411		case 1:
412			props->active_speed = 16; /* FDR */
413			break;
414		case 2:
415			props->active_speed = 32; /* EDR */
416			break;
417		}
418	}
419
420	/* If reported active speed is QDR, check if is FDR-10 */
421	if (props->active_speed == 4) {
422		if (mdev->port_caps[port - 1].ext_port_cap &
423		    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
424			init_query_mad(in_mad);
425			in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
426			in_mad->attr_mod = cpu_to_be32(port);
427
428			err = mlx5_MAD_IFC(dev, 1, 1, port,
429					   NULL, NULL, in_mad, out_mad);
430			if (err)
431				goto out;
432
433			/* Checking LinkSpeedActive for FDR-10 */
434			if (out_mad->data[15] & 0x1)
435				props->active_speed = 8;
436		}
437	}
438
439out:
440	kfree(in_mad);
441	kfree(out_mad);
442
443	return err;
444}
445