1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/srq.h>
38 #include <rdma/ib_verbs.h>
39 #include "mlx5_core.h"
40 #include "transobj.h"
41 
mlx5_srq_event(struct mlx5_core_dev * dev,u32 srqn,int event_type)42 void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
43 {
44 	struct mlx5_srq_table *table = &dev->priv.srq_table;
45 	struct mlx5_core_srq *srq;
46 
47 	spin_lock(&table->lock);
48 
49 	srq = radix_tree_lookup(&table->tree, srqn);
50 	if (srq)
51 		atomic_inc(&srq->refcount);
52 
53 	spin_unlock(&table->lock);
54 
55 	if (!srq) {
56 		mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn);
57 		return;
58 	}
59 
60 	srq->event(srq, event_type);
61 
62 	if (atomic_dec_and_test(&srq->refcount))
63 		complete(&srq->free);
64 }
65 
get_pas_size(void * srqc)66 static int get_pas_size(void *srqc)
67 {
68 	u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
69 	u32 log_srq_size  = MLX5_GET(srqc, srqc, log_srq_size);
70 	u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
71 	u32 page_offset   = MLX5_GET(srqc, srqc, page_offset);
72 	u32 po_quanta	  = 1 << (log_page_size - 6);
73 	u32 rq_sz	  = 1 << (log_srq_size + 4 + log_rq_stride);
74 	u32 page_size	  = 1 << log_page_size;
75 	u32 rq_sz_po      = rq_sz + (page_offset * po_quanta);
76 	u32 rq_num_pas	  = (rq_sz_po + page_size - 1) / page_size;
77 
78 	return rq_num_pas * sizeof(u64);
79 }
80 
rmpc_srqc_reformat(void * srqc,void * rmpc,bool srqc_to_rmpc)81 static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
82 {
83 	void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
84 
85 	if (srqc_to_rmpc) {
86 		switch (MLX5_GET(srqc, srqc, state)) {
87 		case MLX5_SRQC_STATE_GOOD:
88 			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
89 			break;
90 		case MLX5_SRQC_STATE_ERROR:
91 			MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
92 			break;
93 		default:
94 			pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
95 				__LINE__, MLX5_GET(srqc, srqc, state));
96 			MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
97 		}
98 
99 		MLX5_SET(wq,   wq, wq_signature,  MLX5_GET(srqc,  srqc, wq_signature));
100 		MLX5_SET(wq,   wq, log_wq_pg_sz,  MLX5_GET(srqc,  srqc, log_page_size));
101 		MLX5_SET(wq,   wq, log_wq_stride, MLX5_GET(srqc,  srqc, log_rq_stride) + 4);
102 		MLX5_SET(wq,   wq, log_wq_sz,     MLX5_GET(srqc,  srqc, log_srq_size));
103 		MLX5_SET(wq,   wq, page_offset,   MLX5_GET(srqc,  srqc, page_offset));
104 		MLX5_SET(wq,   wq, lwm,           MLX5_GET(srqc,  srqc, lwm));
105 		MLX5_SET(wq,   wq, pd,            MLX5_GET(srqc,  srqc, pd));
106 		MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc,	  srqc, dbr_addr));
107 	} else {
108 		switch (MLX5_GET(rmpc, rmpc, state)) {
109 		case MLX5_RMPC_STATE_RDY:
110 			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
111 			break;
112 		case MLX5_RMPC_STATE_ERR:
113 			MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
114 			break;
115 		default:
116 			pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
117 				__func__, __LINE__,
118 				MLX5_GET(rmpc, rmpc, state));
119 			MLX5_SET(srqc, srqc, state,
120 				 MLX5_GET(rmpc, rmpc, state));
121 		}
122 
123 		MLX5_SET(srqc,   srqc, wq_signature,   MLX5_GET(wq,   wq, wq_signature));
124 		MLX5_SET(srqc,   srqc, log_page_size,  MLX5_GET(wq,   wq, log_wq_pg_sz));
125 		MLX5_SET(srqc,   srqc, log_rq_stride,  MLX5_GET(wq,   wq, log_wq_stride) - 4);
126 		MLX5_SET(srqc,   srqc, log_srq_size,   MLX5_GET(wq,   wq, log_wq_sz));
127 		MLX5_SET(srqc,   srqc, page_offset,    MLX5_GET(wq,   wq, page_offset));
128 		MLX5_SET(srqc,   srqc, lwm,	       MLX5_GET(wq,   wq, lwm));
129 		MLX5_SET(srqc,   srqc, pd,	       MLX5_GET(wq,   wq, pd));
130 		MLX5_SET64(srqc, srqc, dbr_addr,       MLX5_GET64(wq, wq, dbr_addr));
131 	}
132 }
133 
mlx5_core_get_srq(struct mlx5_core_dev * dev,u32 srqn)134 struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
135 {
136 	struct mlx5_srq_table *table = &dev->priv.srq_table;
137 	struct mlx5_core_srq *srq;
138 
139 	spin_lock(&table->lock);
140 
141 	srq = radix_tree_lookup(&table->tree, srqn);
142 	if (srq)
143 		atomic_inc(&srq->refcount);
144 
145 	spin_unlock(&table->lock);
146 
147 	return srq;
148 }
149 EXPORT_SYMBOL(mlx5_core_get_srq);
150 
create_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_create_srq_mbox_in * in,int inlen)151 static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
152 			  struct mlx5_create_srq_mbox_in *in, int inlen)
153 {
154 	struct mlx5_create_srq_mbox_out out;
155 	int err;
156 
157 	memset(&out, 0, sizeof(out));
158 
159 	in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
160 
161 	err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
162 					 sizeof(out));
163 
164 	srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
165 
166 	return err;
167 }
168 
destroy_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)169 static int destroy_srq_cmd(struct mlx5_core_dev *dev,
170 			   struct mlx5_core_srq *srq)
171 {
172 	struct mlx5_destroy_srq_mbox_in in;
173 	struct mlx5_destroy_srq_mbox_out out;
174 
175 	memset(&in, 0, sizeof(in));
176 	memset(&out, 0, sizeof(out));
177 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
178 	in.srqn = cpu_to_be32(srq->srqn);
179 
180 	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
181 					  (u32 *)(&out), sizeof(out));
182 }
183 
arm_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)184 static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
185 		       u16 lwm, int is_srq)
186 {
187 	struct mlx5_arm_srq_mbox_in	in;
188 	struct mlx5_arm_srq_mbox_out	out;
189 
190 	memset(&in, 0, sizeof(in));
191 	memset(&out, 0, sizeof(out));
192 
193 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
194 	in.hdr.opmod = cpu_to_be16(!!is_srq);
195 	in.srqn = cpu_to_be32(srq->srqn);
196 	in.lwm = cpu_to_be16(lwm);
197 
198 	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
199 					  sizeof(in), (u32 *)(&out),
200 					  sizeof(out));
201 }
202 
query_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_query_srq_mbox_out * out)203 static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
204 			 struct mlx5_query_srq_mbox_out *out)
205 {
206 	struct mlx5_query_srq_mbox_in in;
207 
208 	memset(&in, 0, sizeof(in));
209 
210 	in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
211 	in.srqn = cpu_to_be32(srq->srqn);
212 
213 	return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
214 					  (u32 *)out, sizeof(*out));
215 }
216 
create_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_create_srq_mbox_in * in,int srq_inlen)217 static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
218 			      struct mlx5_core_srq *srq,
219 			      struct mlx5_create_srq_mbox_in *in,
220 			      int srq_inlen)
221 {
222 	u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
223 	void *create_in;
224 	void *srqc;
225 	void *xrc_srqc;
226 	void *pas;
227 	int pas_size;
228 	int inlen;
229 	int err;
230 
231 	srqc	  = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
232 	pas_size  = get_pas_size(srqc);
233 	inlen	  = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
234 	create_in = mlx5_vzalloc(inlen);
235 	if (!create_in)
236 		return -ENOMEM;
237 
238 	xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in,
239 				xrc_srq_context_entry);
240 	pas	 = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
241 
242 	memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
243 	memcpy(pas, in->pas, pas_size);
244 	/* 0xffffff means we ask to work with cqe version 0 */
245 	MLX5_SET(xrc_srqc,	    xrc_srqc,  user_index, 0xffffff);
246 	MLX5_SET(create_xrc_srq_in, create_in, opcode,
247 		 MLX5_CMD_OP_CREATE_XRC_SRQ);
248 
249 	memset(create_out, 0, sizeof(create_out));
250 	err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
251 					 sizeof(create_out));
252 	if (err)
253 		goto out;
254 
255 	srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn);
256 out:
257 	kvfree(create_in);
258 	return err;
259 }
260 
destroy_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)261 static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev,
262 			       struct mlx5_core_srq *srq)
263 {
264 	u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)];
265 	u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)];
266 
267 	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
268 	memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
269 
270 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode,
271 		 MLX5_CMD_OP_DESTROY_XRC_SRQ);
272 	MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
273 
274 	return mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
275 					  xrcsrq_out, sizeof(xrcsrq_out));
276 }
277 
arm_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)278 static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
279 			   struct mlx5_core_srq *srq, u16 lwm)
280 {
281 	u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)];
282 	u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)];
283 
284 	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
285 	memset(xrcsrq_out, 0, sizeof(xrcsrq_out));
286 
287 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode,   MLX5_CMD_OP_ARM_XRC_SRQ);
288 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod,   MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ);
289 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
290 	MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm,      lwm);
291 
292 	return  mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
293 					   xrcsrq_out, sizeof(xrcsrq_out));
294 }
295 
query_xrc_srq_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_query_srq_mbox_out * out)296 static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
297 			     struct mlx5_core_srq *srq,
298 			     struct mlx5_query_srq_mbox_out *out)
299 {
300 	u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
301 	u32 *xrcsrq_out;
302 	void *srqc;
303 	void *xrc_srqc;
304 	int err;
305 
306 	xrcsrq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out));
307 	if (!xrcsrq_out)
308 		return -ENOMEM;
309 	memset(xrcsrq_in, 0, sizeof(xrcsrq_in));
310 
311 	MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode,
312 		 MLX5_CMD_OP_QUERY_XRC_SRQ);
313 	MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn);
314 	err =  mlx5_cmd_exec_check_status(dev, xrcsrq_in, sizeof(xrcsrq_in),
315 					  xrcsrq_out,
316 					  MLX5_ST_SZ_BYTES(query_xrc_srq_out));
317 	if (err)
318 		goto out;
319 
320 	xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
321 				xrc_srq_context_entry);
322 	srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
323 	memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
324 
325 out:
326 	kvfree(xrcsrq_out);
327 	return err;
328 }
329 
create_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_create_srq_mbox_in * in,int srq_inlen)330 static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
331 			  struct mlx5_create_srq_mbox_in *in, int srq_inlen)
332 {
333 	void *create_in;
334 	void *rmpc;
335 	void *srqc;
336 	int pas_size;
337 	int inlen;
338 	int err;
339 
340 	srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
341 	pas_size = get_pas_size(srqc);
342 	inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
343 	create_in = mlx5_vzalloc(inlen);
344 	if (!create_in)
345 		return -ENOMEM;
346 
347 	rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
348 
349 	memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
350 	rmpc_srqc_reformat(srqc, rmpc, true);
351 
352 	err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
353 
354 	kvfree(create_in);
355 	return err;
356 }
357 
destroy_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)358 static int destroy_rmp_cmd(struct mlx5_core_dev *dev,
359 			   struct mlx5_core_srq *srq)
360 {
361 	return mlx5_core_destroy_rmp(dev, srq->srqn);
362 }
363 
arm_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm)364 static int arm_rmp_cmd(struct mlx5_core_dev *dev,
365 		       struct mlx5_core_srq *srq,
366 		       u16 lwm)
367 {
368 	void *in;
369 	void *rmpc;
370 	void *wq;
371 	void *bitmask;
372 	int err;
373 
374 	in = mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in));
375 	if (!in)
376 		return -ENOMEM;
377 
378 	rmpc =	  MLX5_ADDR_OF(modify_rmp_in,   in,   ctx);
379 	bitmask = MLX5_ADDR_OF(modify_rmp_in,   in,   bitmask);
380 	wq   =	  MLX5_ADDR_OF(rmpc,	        rmpc, wq);
381 
382 	MLX5_SET(modify_rmp_in, in,	 rmp_state, MLX5_RMPC_STATE_RDY);
383 	MLX5_SET(modify_rmp_in, in,	 rmpn,      srq->srqn);
384 	MLX5_SET(wq,		wq,	 lwm,	    lwm);
385 	MLX5_SET(rmp_bitmask,	bitmask, lwm,	    1);
386 	MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
387 
388 	err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in));
389 
390 	kvfree(in);
391 	return err;
392 }
393 
query_rmp_cmd(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_query_srq_mbox_out * out)394 static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
395 			 struct mlx5_query_srq_mbox_out *out)
396 {
397 	u32 *rmp_out;
398 	void *rmpc;
399 	void *srqc;
400 	int err;
401 
402 	rmp_out =  mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
403 	if (!rmp_out)
404 		return -ENOMEM;
405 
406 	err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out);
407 	if (err)
408 		goto out;
409 
410 	srqc = MLX5_ADDR_OF(query_srq_out, out,	    srq_context_entry);
411 	rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
412 	rmpc_srqc_reformat(srqc, rmpc, false);
413 
414 out:
415 	kvfree(rmp_out);
416 	return err;
417 }
418 
create_srq_split(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_create_srq_mbox_in * in,int inlen,int is_xrc)419 static int create_srq_split(struct mlx5_core_dev *dev,
420 			    struct mlx5_core_srq *srq,
421 			    struct mlx5_create_srq_mbox_in *in,
422 			    int inlen, int is_xrc)
423 {
424 	if (!dev->issi)
425 		return create_srq_cmd(dev, srq, in, inlen);
426 	else if (srq->common.res == MLX5_RES_XSRQ)
427 		return create_xrc_srq_cmd(dev, srq, in, inlen);
428 	else
429 		return create_rmp_cmd(dev, srq, in, inlen);
430 }
431 
destroy_srq_split(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)432 static int destroy_srq_split(struct mlx5_core_dev *dev,
433 			     struct mlx5_core_srq *srq)
434 {
435 	if (!dev->issi)
436 		return destroy_srq_cmd(dev, srq);
437 	else if (srq->common.res == MLX5_RES_XSRQ)
438 		return destroy_xrc_srq_cmd(dev, srq);
439 	else
440 		return destroy_rmp_cmd(dev, srq);
441 }
442 
mlx5_core_create_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_create_srq_mbox_in * in,int inlen,int is_xrc)443 int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
444 			 struct mlx5_create_srq_mbox_in *in, int inlen,
445 			 int is_xrc)
446 {
447 	int err;
448 	struct mlx5_srq_table *table = &dev->priv.srq_table;
449 
450 	srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
451 
452 	err = create_srq_split(dev, srq, in, inlen, is_xrc);
453 	if (err)
454 		return err;
455 
456 	atomic_set(&srq->refcount, 1);
457 	init_completion(&srq->free);
458 
459 	spin_lock_irq(&table->lock);
460 	err = radix_tree_insert(&table->tree, srq->srqn, srq);
461 	spin_unlock_irq(&table->lock);
462 	if (err) {
463 		mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn);
464 		goto err_destroy_srq_split;
465 	}
466 
467 	return 0;
468 
469 err_destroy_srq_split:
470 	destroy_srq_split(dev, srq);
471 
472 	return err;
473 }
474 EXPORT_SYMBOL(mlx5_core_create_srq);
475 
mlx5_core_destroy_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq)476 int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
477 {
478 	struct mlx5_srq_table *table = &dev->priv.srq_table;
479 	struct mlx5_core_srq *tmp;
480 	int err;
481 
482 	spin_lock_irq(&table->lock);
483 	tmp = radix_tree_delete(&table->tree, srq->srqn);
484 	spin_unlock_irq(&table->lock);
485 	if (!tmp) {
486 		mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn);
487 		return -EINVAL;
488 	}
489 	if (tmp != srq) {
490 		mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn);
491 		return -EINVAL;
492 	}
493 
494 	err = destroy_srq_split(dev, srq);
495 	if (err)
496 		return err;
497 
498 	if (atomic_dec_and_test(&srq->refcount))
499 		complete(&srq->free);
500 	wait_for_completion(&srq->free);
501 
502 	return 0;
503 }
504 EXPORT_SYMBOL(mlx5_core_destroy_srq);
505 
mlx5_core_query_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,struct mlx5_query_srq_mbox_out * out)506 int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
507 			struct mlx5_query_srq_mbox_out *out)
508 {
509 	if (!dev->issi)
510 		return query_srq_cmd(dev, srq, out);
511 	else if (srq->common.res == MLX5_RES_XSRQ)
512 		return query_xrc_srq_cmd(dev, srq, out);
513 	else
514 		return query_rmp_cmd(dev, srq, out);
515 }
516 EXPORT_SYMBOL(mlx5_core_query_srq);
517 
mlx5_core_arm_srq(struct mlx5_core_dev * dev,struct mlx5_core_srq * srq,u16 lwm,int is_srq)518 int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
519 		      u16 lwm, int is_srq)
520 {
521 	if (!dev->issi)
522 		return arm_srq_cmd(dev, srq, lwm, is_srq);
523 	else if (srq->common.res == MLX5_RES_XSRQ)
524 		return arm_xrc_srq_cmd(dev, srq, lwm);
525 	else
526 		return arm_rmp_cmd(dev, srq, lwm);
527 }
528 EXPORT_SYMBOL(mlx5_core_arm_srq);
529 
mlx5_init_srq_table(struct mlx5_core_dev * dev)530 void mlx5_init_srq_table(struct mlx5_core_dev *dev)
531 {
532 	struct mlx5_srq_table *table = &dev->priv.srq_table;
533 
534 	memset(table, 0, sizeof(*table));
535 	spin_lock_init(&table->lock);
536 	INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
537 }
538 
mlx5_cleanup_srq_table(struct mlx5_core_dev * dev)539 void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev)
540 {
541 	/* nothing */
542 }
543