1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 
34 #include <linux/gfp.h>
35 #include <linux/export.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/qp.h>
38 #include <linux/mlx5/driver.h>
39 
40 #include "mlx5_core.h"
41 
mlx5_get_rsc(struct mlx5_core_dev * dev,u32 rsn)42 static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
43 						 u32 rsn)
44 {
45 	struct mlx5_qp_table *table = &dev->priv.qp_table;
46 	struct mlx5_core_rsc_common *common;
47 
48 	spin_lock(&table->lock);
49 
50 	common = radix_tree_lookup(&table->tree, rsn);
51 	if (common)
52 		atomic_inc(&common->refcount);
53 
54 	spin_unlock(&table->lock);
55 
56 	if (!common) {
57 		mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
58 			       rsn);
59 		return NULL;
60 	}
61 	return common;
62 }
63 
mlx5_core_put_rsc(struct mlx5_core_rsc_common * common)64 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
65 {
66 	if (atomic_dec_and_test(&common->refcount))
67 		complete(&common->free);
68 }
69 
mlx5_rsc_event(struct mlx5_core_dev * dev,u32 rsn,int event_type)70 void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type)
71 {
72 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn);
73 	struct mlx5_core_qp *qp;
74 
75 	if (!common)
76 		return;
77 
78 	switch (common->res) {
79 	case MLX5_RES_QP:
80 		qp = (struct mlx5_core_qp *)common;
81 		qp->event(qp, event_type);
82 		break;
83 
84 	default:
85 		mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn);
86 	}
87 
88 	mlx5_core_put_rsc(common);
89 }
90 
91 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
mlx5_eq_pagefault(struct mlx5_core_dev * dev,struct mlx5_eqe * eqe)92 void mlx5_eq_pagefault(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
93 {
94 	struct mlx5_eqe_page_fault *pf_eqe = &eqe->data.page_fault;
95 	int qpn = be32_to_cpu(pf_eqe->flags_qpn) & MLX5_QPN_MASK;
96 	struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, qpn);
97 	struct mlx5_core_qp *qp =
98 		container_of(common, struct mlx5_core_qp, common);
99 	struct mlx5_pagefault pfault;
100 
101 	if (!qp) {
102 		mlx5_core_warn(dev, "ODP event for non-existent QP %06x\n",
103 			       qpn);
104 		return;
105 	}
106 
107 	pfault.event_subtype = eqe->sub_type;
108 	pfault.flags = (be32_to_cpu(pf_eqe->flags_qpn) >> MLX5_QPN_BITS) &
109 		(MLX5_PFAULT_REQUESTOR | MLX5_PFAULT_WRITE | MLX5_PFAULT_RDMA);
110 	pfault.bytes_committed = be32_to_cpu(
111 		pf_eqe->bytes_committed);
112 
113 	mlx5_core_dbg(dev,
114 		      "PAGE_FAULT: subtype: 0x%02x, flags: 0x%02x,\n",
115 		      eqe->sub_type, pfault.flags);
116 
117 	switch (eqe->sub_type) {
118 	case MLX5_PFAULT_SUBTYPE_RDMA:
119 		/* RDMA based event */
120 		pfault.rdma.r_key =
121 			be32_to_cpu(pf_eqe->rdma.r_key);
122 		pfault.rdma.packet_size =
123 			be16_to_cpu(pf_eqe->rdma.packet_length);
124 		pfault.rdma.rdma_op_len =
125 			be32_to_cpu(pf_eqe->rdma.rdma_op_len);
126 		pfault.rdma.rdma_va =
127 			be64_to_cpu(pf_eqe->rdma.rdma_va);
128 		mlx5_core_dbg(dev,
129 			      "PAGE_FAULT: qpn: 0x%06x, r_key: 0x%08x,\n",
130 			      qpn, pfault.rdma.r_key);
131 		mlx5_core_dbg(dev,
132 			      "PAGE_FAULT: rdma_op_len: 0x%08x,\n",
133 			      pfault.rdma.rdma_op_len);
134 		mlx5_core_dbg(dev,
135 			      "PAGE_FAULT: rdma_va: 0x%016llx,\n",
136 			      pfault.rdma.rdma_va);
137 		mlx5_core_dbg(dev,
138 			      "PAGE_FAULT: bytes_committed: 0x%06x\n",
139 			      pfault.bytes_committed);
140 		break;
141 
142 	case MLX5_PFAULT_SUBTYPE_WQE:
143 		/* WQE based event */
144 		pfault.wqe.wqe_index =
145 			be16_to_cpu(pf_eqe->wqe.wqe_index);
146 		pfault.wqe.packet_size =
147 			be16_to_cpu(pf_eqe->wqe.packet_length);
148 		mlx5_core_dbg(dev,
149 			      "PAGE_FAULT: qpn: 0x%06x, wqe_index: 0x%04x,\n",
150 			      qpn, pfault.wqe.wqe_index);
151 		mlx5_core_dbg(dev,
152 			      "PAGE_FAULT: bytes_committed: 0x%06x\n",
153 			      pfault.bytes_committed);
154 		break;
155 
156 	default:
157 		mlx5_core_warn(dev,
158 			       "Unsupported page fault event sub-type: 0x%02hhx, QP %06x\n",
159 			       eqe->sub_type, qpn);
160 		/* Unsupported page faults should still be resolved by the
161 		 * page fault handler
162 		 */
163 	}
164 
165 	if (qp->pfault_handler) {
166 		qp->pfault_handler(qp, &pfault);
167 	} else {
168 		mlx5_core_err(dev,
169 			      "ODP event for QP %08x, without a fault handler in QP\n",
170 			      qpn);
171 		/* Page fault will remain unresolved. QP will hang until it is
172 		 * destroyed
173 		 */
174 	}
175 
176 	mlx5_core_put_rsc(common);
177 }
178 #endif
179 
mlx5_core_create_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,struct mlx5_create_qp_mbox_in * in,int inlen)180 int mlx5_core_create_qp(struct mlx5_core_dev *dev,
181 			struct mlx5_core_qp *qp,
182 			struct mlx5_create_qp_mbox_in *in,
183 			int inlen)
184 {
185 	struct mlx5_qp_table *table = &dev->priv.qp_table;
186 	struct mlx5_create_qp_mbox_out out;
187 	struct mlx5_destroy_qp_mbox_in din;
188 	struct mlx5_destroy_qp_mbox_out dout;
189 	int err;
190 
191 	memset(&out, 0, sizeof(out));
192 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_QP);
193 
194 	err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
195 	if (err) {
196 		mlx5_core_warn(dev, "ret %d\n", err);
197 		return err;
198 	}
199 
200 	if (out.hdr.status) {
201 		mlx5_core_warn(dev, "current num of QPs 0x%x\n",
202 			       atomic_read(&dev->num_qps));
203 		return mlx5_cmd_status_to_err(&out.hdr);
204 	}
205 
206 	qp->qpn = be32_to_cpu(out.qpn) & 0xffffff;
207 	mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
208 
209 	qp->common.res = MLX5_RES_QP;
210 	spin_lock_irq(&table->lock);
211 	err = radix_tree_insert(&table->tree, qp->qpn, qp);
212 	spin_unlock_irq(&table->lock);
213 	if (err) {
214 		mlx5_core_warn(dev, "err %d\n", err);
215 		goto err_cmd;
216 	}
217 
218 	err = mlx5_debug_qp_add(dev, qp);
219 	if (err)
220 		mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
221 			      qp->qpn);
222 
223 	qp->pid = current->pid;
224 	atomic_set(&qp->common.refcount, 1);
225 	atomic_inc(&dev->num_qps);
226 	init_completion(&qp->common.free);
227 
228 	return 0;
229 
230 err_cmd:
231 	memset(&din, 0, sizeof(din));
232 	memset(&dout, 0, sizeof(dout));
233 	din.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
234 	din.qpn = cpu_to_be32(qp->qpn);
235 	mlx5_cmd_exec(dev, &din, sizeof(din), &out, sizeof(dout));
236 
237 	return err;
238 }
239 EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
240 
mlx5_core_destroy_qp(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp)241 int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
242 			 struct mlx5_core_qp *qp)
243 {
244 	struct mlx5_destroy_qp_mbox_in in;
245 	struct mlx5_destroy_qp_mbox_out out;
246 	struct mlx5_qp_table *table = &dev->priv.qp_table;
247 	unsigned long flags;
248 	int err;
249 
250 	mlx5_debug_qp_remove(dev, qp);
251 
252 	spin_lock_irqsave(&table->lock, flags);
253 	radix_tree_delete(&table->tree, qp->qpn);
254 	spin_unlock_irqrestore(&table->lock, flags);
255 
256 	mlx5_core_put_rsc((struct mlx5_core_rsc_common *)qp);
257 	wait_for_completion(&qp->common.free);
258 
259 	memset(&in, 0, sizeof(in));
260 	memset(&out, 0, sizeof(out));
261 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_QP);
262 	in.qpn = cpu_to_be32(qp->qpn);
263 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
264 	if (err)
265 		return err;
266 
267 	if (out.hdr.status)
268 		return mlx5_cmd_status_to_err(&out.hdr);
269 
270 	atomic_dec(&dev->num_qps);
271 	return 0;
272 }
273 EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
274 
mlx5_core_qp_modify(struct mlx5_core_dev * dev,enum mlx5_qp_state cur_state,enum mlx5_qp_state new_state,struct mlx5_modify_qp_mbox_in * in,int sqd_event,struct mlx5_core_qp * qp)275 int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
276 			enum mlx5_qp_state new_state,
277 			struct mlx5_modify_qp_mbox_in *in, int sqd_event,
278 			struct mlx5_core_qp *qp)
279 {
280 	static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
281 		[MLX5_QP_STATE_RST] = {
282 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
283 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
284 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_RST2INIT_QP,
285 		},
286 		[MLX5_QP_STATE_INIT]  = {
287 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
288 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
289 			[MLX5_QP_STATE_INIT]	= MLX5_CMD_OP_INIT2INIT_QP,
290 			[MLX5_QP_STATE_RTR]	= MLX5_CMD_OP_INIT2RTR_QP,
291 		},
292 		[MLX5_QP_STATE_RTR]   = {
293 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
294 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
295 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTR2RTS_QP,
296 		},
297 		[MLX5_QP_STATE_RTS]   = {
298 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
299 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
300 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_RTS2RTS_QP,
301 		},
302 		[MLX5_QP_STATE_SQD] = {
303 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
304 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
305 		},
306 		[MLX5_QP_STATE_SQER] = {
307 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
308 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
309 			[MLX5_QP_STATE_RTS]	= MLX5_CMD_OP_SQERR2RTS_QP,
310 		},
311 		[MLX5_QP_STATE_ERR] = {
312 			[MLX5_QP_STATE_RST]	= MLX5_CMD_OP_2RST_QP,
313 			[MLX5_QP_STATE_ERR]	= MLX5_CMD_OP_2ERR_QP,
314 		}
315 	};
316 
317 	struct mlx5_modify_qp_mbox_out out;
318 	int err = 0;
319 	u16 op;
320 
321 	if (cur_state >= MLX5_QP_NUM_STATE || new_state >= MLX5_QP_NUM_STATE ||
322 	    !optab[cur_state][new_state])
323 		return -EINVAL;
324 
325 	memset(&out, 0, sizeof(out));
326 	op = optab[cur_state][new_state];
327 	in->hdr.opcode = cpu_to_be16(op);
328 	in->qpn = cpu_to_be32(qp->qpn);
329 	err = mlx5_cmd_exec(dev, in, sizeof(*in), &out, sizeof(out));
330 	if (err)
331 		return err;
332 
333 	return mlx5_cmd_status_to_err(&out.hdr);
334 }
335 EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
336 
mlx5_init_qp_table(struct mlx5_core_dev * dev)337 void mlx5_init_qp_table(struct mlx5_core_dev *dev)
338 {
339 	struct mlx5_qp_table *table = &dev->priv.qp_table;
340 
341 	spin_lock_init(&table->lock);
342 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
343 	mlx5_qp_debugfs_init(dev);
344 }
345 
mlx5_cleanup_qp_table(struct mlx5_core_dev * dev)346 void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev)
347 {
348 	mlx5_qp_debugfs_cleanup(dev);
349 }
350 
mlx5_core_qp_query(struct mlx5_core_dev * dev,struct mlx5_core_qp * qp,struct mlx5_query_qp_mbox_out * out,int outlen)351 int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
352 		       struct mlx5_query_qp_mbox_out *out, int outlen)
353 {
354 	struct mlx5_query_qp_mbox_in in;
355 	int err;
356 
357 	memset(&in, 0, sizeof(in));
358 	memset(out, 0, outlen);
359 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_QP);
360 	in.qpn = cpu_to_be32(qp->qpn);
361 	err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
362 	if (err)
363 		return err;
364 
365 	if (out->hdr.status)
366 		return mlx5_cmd_status_to_err(&out->hdr);
367 
368 	return err;
369 }
370 EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
371 
mlx5_core_xrcd_alloc(struct mlx5_core_dev * dev,u32 * xrcdn)372 int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
373 {
374 	struct mlx5_alloc_xrcd_mbox_in in;
375 	struct mlx5_alloc_xrcd_mbox_out out;
376 	int err;
377 
378 	memset(&in, 0, sizeof(in));
379 	memset(&out, 0, sizeof(out));
380 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ALLOC_XRCD);
381 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
382 	if (err)
383 		return err;
384 
385 	if (out.hdr.status)
386 		err = mlx5_cmd_status_to_err(&out.hdr);
387 	else
388 		*xrcdn = be32_to_cpu(out.xrcdn);
389 
390 	return err;
391 }
392 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
393 
mlx5_core_xrcd_dealloc(struct mlx5_core_dev * dev,u32 xrcdn)394 int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn)
395 {
396 	struct mlx5_dealloc_xrcd_mbox_in in;
397 	struct mlx5_dealloc_xrcd_mbox_out out;
398 	int err;
399 
400 	memset(&in, 0, sizeof(in));
401 	memset(&out, 0, sizeof(out));
402 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DEALLOC_XRCD);
403 	in.xrcdn = cpu_to_be32(xrcdn);
404 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
405 	if (err)
406 		return err;
407 
408 	if (out.hdr.status)
409 		err = mlx5_cmd_status_to_err(&out.hdr);
410 
411 	return err;
412 }
413 EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
414 
415 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
mlx5_core_page_fault_resume(struct mlx5_core_dev * dev,u32 qpn,u8 flags,int error)416 int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
417 				u8 flags, int error)
418 {
419 	struct mlx5_page_fault_resume_mbox_in in;
420 	struct mlx5_page_fault_resume_mbox_out out;
421 	int err;
422 
423 	memset(&in, 0, sizeof(in));
424 	memset(&out, 0, sizeof(out));
425 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_PAGE_FAULT_RESUME);
426 	in.hdr.opmod = 0;
427 	flags &= (MLX5_PAGE_FAULT_RESUME_REQUESTOR |
428 		  MLX5_PAGE_FAULT_RESUME_WRITE	   |
429 		  MLX5_PAGE_FAULT_RESUME_RDMA);
430 	flags |= (error ? MLX5_PAGE_FAULT_RESUME_ERROR : 0);
431 	in.flags_qpn = cpu_to_be32((qpn & MLX5_QPN_MASK) |
432 				   (flags << MLX5_QPN_BITS));
433 	err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
434 	if (err)
435 		return err;
436 
437 	if (out.hdr.status)
438 		err = mlx5_cmd_status_to_err(&out.hdr);
439 
440 	return err;
441 }
442 EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume);
443 #endif
444