1 /*
2  * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <asm-generic/kmap_types.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
44 
45 #include "mlx5_core.h"
46 
47 enum {
48 	CMD_IF_REV = 5,
49 };
50 
51 enum {
52 	CMD_MODE_POLLING,
53 	CMD_MODE_EVENTS
54 };
55 
56 enum {
57 	NUM_LONG_LISTS	  = 2,
58 	NUM_MED_LISTS	  = 64,
59 	LONG_LIST_SIZE	  = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 +
60 				MLX5_CMD_DATA_BLOCK_SIZE,
61 	MED_LIST_SIZE	  = 16 + MLX5_CMD_DATA_BLOCK_SIZE,
62 };
63 
64 enum {
65 	MLX5_CMD_DELIVERY_STAT_OK			= 0x0,
66 	MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR		= 0x1,
67 	MLX5_CMD_DELIVERY_STAT_TOK_ERR			= 0x2,
68 	MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR		= 0x3,
69 	MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR	= 0x4,
70 	MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR		= 0x5,
71 	MLX5_CMD_DELIVERY_STAT_FW_ERR			= 0x6,
72 	MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR		= 0x7,
73 	MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR		= 0x8,
74 	MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR	= 0x9,
75 	MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR		= 0x10,
76 };
77 
78 enum {
79 	MLX5_CMD_STAT_OK			= 0x0,
80 	MLX5_CMD_STAT_INT_ERR			= 0x1,
81 	MLX5_CMD_STAT_BAD_OP_ERR		= 0x2,
82 	MLX5_CMD_STAT_BAD_PARAM_ERR		= 0x3,
83 	MLX5_CMD_STAT_BAD_SYS_STATE_ERR		= 0x4,
84 	MLX5_CMD_STAT_BAD_RES_ERR		= 0x5,
85 	MLX5_CMD_STAT_RES_BUSY			= 0x6,
86 	MLX5_CMD_STAT_LIM_ERR			= 0x8,
87 	MLX5_CMD_STAT_BAD_RES_STATE_ERR		= 0x9,
88 	MLX5_CMD_STAT_IX_ERR			= 0xa,
89 	MLX5_CMD_STAT_NO_RES_ERR		= 0xf,
90 	MLX5_CMD_STAT_BAD_INP_LEN_ERR		= 0x50,
91 	MLX5_CMD_STAT_BAD_OUTP_LEN_ERR		= 0x51,
92 	MLX5_CMD_STAT_BAD_QP_STATE_ERR		= 0x10,
93 	MLX5_CMD_STAT_BAD_PKT_ERR		= 0x30,
94 	MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR	= 0x40,
95 };
96 
alloc_cmd(struct mlx5_cmd * cmd,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t cbk,void * context,int page_queue)97 static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
98 					   struct mlx5_cmd_msg *in,
99 					   struct mlx5_cmd_msg *out,
100 					   void *uout, int uout_size,
101 					   mlx5_cmd_cbk_t cbk,
102 					   void *context, int page_queue)
103 {
104 	gfp_t alloc_flags = cbk ? GFP_ATOMIC : GFP_KERNEL;
105 	struct mlx5_cmd_work_ent *ent;
106 
107 	ent = kzalloc(sizeof(*ent), alloc_flags);
108 	if (!ent)
109 		return ERR_PTR(-ENOMEM);
110 
111 	ent->in		= in;
112 	ent->out	= out;
113 	ent->uout	= uout;
114 	ent->uout_size	= uout_size;
115 	ent->callback	= cbk;
116 	ent->context	= context;
117 	ent->cmd	= cmd;
118 	ent->page_queue = page_queue;
119 
120 	return ent;
121 }
122 
alloc_token(struct mlx5_cmd * cmd)123 static u8 alloc_token(struct mlx5_cmd *cmd)
124 {
125 	u8 token;
126 
127 	spin_lock(&cmd->token_lock);
128 	cmd->token++;
129 	if (cmd->token == 0)
130 		cmd->token++;
131 	token = cmd->token;
132 	spin_unlock(&cmd->token_lock);
133 
134 	return token;
135 }
136 
alloc_ent(struct mlx5_cmd * cmd)137 static int alloc_ent(struct mlx5_cmd *cmd)
138 {
139 	unsigned long flags;
140 	int ret;
141 
142 	spin_lock_irqsave(&cmd->alloc_lock, flags);
143 	ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
144 	if (ret < cmd->max_reg_cmds)
145 		clear_bit(ret, &cmd->bitmask);
146 	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
147 
148 	return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
149 }
150 
free_ent(struct mlx5_cmd * cmd,int idx)151 static void free_ent(struct mlx5_cmd *cmd, int idx)
152 {
153 	unsigned long flags;
154 
155 	spin_lock_irqsave(&cmd->alloc_lock, flags);
156 	set_bit(idx, &cmd->bitmask);
157 	spin_unlock_irqrestore(&cmd->alloc_lock, flags);
158 }
159 
get_inst(struct mlx5_cmd * cmd,int idx)160 static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
161 {
162 	return cmd->cmd_buf + (idx << cmd->log_stride);
163 }
164 
xor8_buf(void * buf,int len)165 static u8 xor8_buf(void *buf, int len)
166 {
167 	u8 *ptr = buf;
168 	u8 sum = 0;
169 	int i;
170 
171 	for (i = 0; i < len; i++)
172 		sum ^= ptr[i];
173 
174 	return sum;
175 }
176 
verify_block_sig(struct mlx5_cmd_prot_block * block)177 static int verify_block_sig(struct mlx5_cmd_prot_block *block)
178 {
179 	if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
180 		return -EINVAL;
181 
182 	if (xor8_buf(block, sizeof(*block)) != 0xff)
183 		return -EINVAL;
184 
185 	return 0;
186 }
187 
calc_block_sig(struct mlx5_cmd_prot_block * block,u8 token,int csum)188 static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
189 			   int csum)
190 {
191 	block->token = token;
192 	if (csum) {
193 		block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
194 					    sizeof(block->data) - 2);
195 		block->sig = ~xor8_buf(block, sizeof(*block) - 1);
196 	}
197 }
198 
calc_chain_sig(struct mlx5_cmd_msg * msg,u8 token,int csum)199 static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
200 {
201 	struct mlx5_cmd_mailbox *next = msg->next;
202 
203 	while (next) {
204 		calc_block_sig(next->buf, token, csum);
205 		next = next->next;
206 	}
207 }
208 
set_signature(struct mlx5_cmd_work_ent * ent,int csum)209 static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
210 {
211 	ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
212 	calc_chain_sig(ent->in, ent->token, csum);
213 	calc_chain_sig(ent->out, ent->token, csum);
214 }
215 
poll_timeout(struct mlx5_cmd_work_ent * ent)216 static void poll_timeout(struct mlx5_cmd_work_ent *ent)
217 {
218 	unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
219 	u8 own;
220 
221 	do {
222 		own = ent->lay->status_own;
223 		if (!(own & CMD_OWNER_HW)) {
224 			ent->ret = 0;
225 			return;
226 		}
227 		usleep_range(5000, 10000);
228 	} while (time_before(jiffies, poll_end));
229 
230 	ent->ret = -ETIMEDOUT;
231 }
232 
free_cmd(struct mlx5_cmd_work_ent * ent)233 static void free_cmd(struct mlx5_cmd_work_ent *ent)
234 {
235 	kfree(ent);
236 }
237 
238 
verify_signature(struct mlx5_cmd_work_ent * ent)239 static int verify_signature(struct mlx5_cmd_work_ent *ent)
240 {
241 	struct mlx5_cmd_mailbox *next = ent->out->next;
242 	int err;
243 	u8 sig;
244 
245 	sig = xor8_buf(ent->lay, sizeof(*ent->lay));
246 	if (sig != 0xff)
247 		return -EINVAL;
248 
249 	while (next) {
250 		err = verify_block_sig(next->buf);
251 		if (err)
252 			return err;
253 
254 		next = next->next;
255 	}
256 
257 	return 0;
258 }
259 
dump_buf(void * buf,int size,int data_only,int offset)260 static void dump_buf(void *buf, int size, int data_only, int offset)
261 {
262 	__be32 *p = buf;
263 	int i;
264 
265 	for (i = 0; i < size; i += 16) {
266 		pr_debug("%03x: %08x %08x %08x %08x\n", offset, be32_to_cpu(p[0]),
267 			 be32_to_cpu(p[1]), be32_to_cpu(p[2]),
268 			 be32_to_cpu(p[3]));
269 		p += 4;
270 		offset += 16;
271 	}
272 	if (!data_only)
273 		pr_debug("\n");
274 }
275 
mlx5_command_str(int command)276 const char *mlx5_command_str(int command)
277 {
278 	switch (command) {
279 	case MLX5_CMD_OP_QUERY_HCA_CAP:
280 		return "QUERY_HCA_CAP";
281 
282 	case MLX5_CMD_OP_SET_HCA_CAP:
283 		return "SET_HCA_CAP";
284 
285 	case MLX5_CMD_OP_QUERY_ADAPTER:
286 		return "QUERY_ADAPTER";
287 
288 	case MLX5_CMD_OP_INIT_HCA:
289 		return "INIT_HCA";
290 
291 	case MLX5_CMD_OP_TEARDOWN_HCA:
292 		return "TEARDOWN_HCA";
293 
294 	case MLX5_CMD_OP_ENABLE_HCA:
295 		return "MLX5_CMD_OP_ENABLE_HCA";
296 
297 	case MLX5_CMD_OP_DISABLE_HCA:
298 		return "MLX5_CMD_OP_DISABLE_HCA";
299 
300 	case MLX5_CMD_OP_QUERY_PAGES:
301 		return "QUERY_PAGES";
302 
303 	case MLX5_CMD_OP_MANAGE_PAGES:
304 		return "MANAGE_PAGES";
305 
306 	case MLX5_CMD_OP_CREATE_MKEY:
307 		return "CREATE_MKEY";
308 
309 	case MLX5_CMD_OP_QUERY_MKEY:
310 		return "QUERY_MKEY";
311 
312 	case MLX5_CMD_OP_DESTROY_MKEY:
313 		return "DESTROY_MKEY";
314 
315 	case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
316 		return "QUERY_SPECIAL_CONTEXTS";
317 
318 	case MLX5_CMD_OP_CREATE_EQ:
319 		return "CREATE_EQ";
320 
321 	case MLX5_CMD_OP_DESTROY_EQ:
322 		return "DESTROY_EQ";
323 
324 	case MLX5_CMD_OP_QUERY_EQ:
325 		return "QUERY_EQ";
326 
327 	case MLX5_CMD_OP_CREATE_CQ:
328 		return "CREATE_CQ";
329 
330 	case MLX5_CMD_OP_DESTROY_CQ:
331 		return "DESTROY_CQ";
332 
333 	case MLX5_CMD_OP_QUERY_CQ:
334 		return "QUERY_CQ";
335 
336 	case MLX5_CMD_OP_MODIFY_CQ:
337 		return "MODIFY_CQ";
338 
339 	case MLX5_CMD_OP_CREATE_QP:
340 		return "CREATE_QP";
341 
342 	case MLX5_CMD_OP_DESTROY_QP:
343 		return "DESTROY_QP";
344 
345 	case MLX5_CMD_OP_RST2INIT_QP:
346 		return "RST2INIT_QP";
347 
348 	case MLX5_CMD_OP_INIT2RTR_QP:
349 		return "INIT2RTR_QP";
350 
351 	case MLX5_CMD_OP_RTR2RTS_QP:
352 		return "RTR2RTS_QP";
353 
354 	case MLX5_CMD_OP_RTS2RTS_QP:
355 		return "RTS2RTS_QP";
356 
357 	case MLX5_CMD_OP_SQERR2RTS_QP:
358 		return "SQERR2RTS_QP";
359 
360 	case MLX5_CMD_OP_2ERR_QP:
361 		return "2ERR_QP";
362 
363 	case MLX5_CMD_OP_2RST_QP:
364 		return "2RST_QP";
365 
366 	case MLX5_CMD_OP_QUERY_QP:
367 		return "QUERY_QP";
368 
369 	case MLX5_CMD_OP_MAD_IFC:
370 		return "MAD_IFC";
371 
372 	case MLX5_CMD_OP_INIT2INIT_QP:
373 		return "INIT2INIT_QP";
374 
375 	case MLX5_CMD_OP_CREATE_PSV:
376 		return "CREATE_PSV";
377 
378 	case MLX5_CMD_OP_DESTROY_PSV:
379 		return "DESTROY_PSV";
380 
381 	case MLX5_CMD_OP_CREATE_SRQ:
382 		return "CREATE_SRQ";
383 
384 	case MLX5_CMD_OP_DESTROY_SRQ:
385 		return "DESTROY_SRQ";
386 
387 	case MLX5_CMD_OP_QUERY_SRQ:
388 		return "QUERY_SRQ";
389 
390 	case MLX5_CMD_OP_ARM_RQ:
391 		return "ARM_RQ";
392 
393 	case MLX5_CMD_OP_RESIZE_SRQ:
394 		return "RESIZE_SRQ";
395 
396 	case MLX5_CMD_OP_ALLOC_PD:
397 		return "ALLOC_PD";
398 
399 	case MLX5_CMD_OP_DEALLOC_PD:
400 		return "DEALLOC_PD";
401 
402 	case MLX5_CMD_OP_ALLOC_UAR:
403 		return "ALLOC_UAR";
404 
405 	case MLX5_CMD_OP_DEALLOC_UAR:
406 		return "DEALLOC_UAR";
407 
408 	case MLX5_CMD_OP_ATTACH_TO_MCG:
409 		return "ATTACH_TO_MCG";
410 
411 	case MLX5_CMD_OP_DETACH_FROM_MCG:
412 		return "DETACH_FROM_MCG";
413 
414 	case MLX5_CMD_OP_ALLOC_XRCD:
415 		return "ALLOC_XRCD";
416 
417 	case MLX5_CMD_OP_DEALLOC_XRCD:
418 		return "DEALLOC_XRCD";
419 
420 	case MLX5_CMD_OP_ACCESS_REG:
421 		return "MLX5_CMD_OP_ACCESS_REG";
422 
423 	default: return "unknown command opcode";
424 	}
425 }
426 
dump_command(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent,int input)427 static void dump_command(struct mlx5_core_dev *dev,
428 			 struct mlx5_cmd_work_ent *ent, int input)
429 {
430 	u16 op = be16_to_cpu(((struct mlx5_inbox_hdr *)(ent->lay->in))->opcode);
431 	struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
432 	struct mlx5_cmd_mailbox *next = msg->next;
433 	int data_only;
434 	u32 offset = 0;
435 	int dump_len;
436 
437 	data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
438 
439 	if (data_only)
440 		mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_DATA,
441 				   "dump command data %s(0x%x) %s\n",
442 				   mlx5_command_str(op), op,
443 				   input ? "INPUT" : "OUTPUT");
444 	else
445 		mlx5_core_dbg(dev, "dump command %s(0x%x) %s\n",
446 			      mlx5_command_str(op), op,
447 			      input ? "INPUT" : "OUTPUT");
448 
449 	if (data_only) {
450 		if (input) {
451 			dump_buf(ent->lay->in, sizeof(ent->lay->in), 1, offset);
452 			offset += sizeof(ent->lay->in);
453 		} else {
454 			dump_buf(ent->lay->out, sizeof(ent->lay->out), 1, offset);
455 			offset += sizeof(ent->lay->out);
456 		}
457 	} else {
458 		dump_buf(ent->lay, sizeof(*ent->lay), 0, offset);
459 		offset += sizeof(*ent->lay);
460 	}
461 
462 	while (next && offset < msg->len) {
463 		if (data_only) {
464 			dump_len = min_t(int, MLX5_CMD_DATA_BLOCK_SIZE, msg->len - offset);
465 			dump_buf(next->buf, dump_len, 1, offset);
466 			offset += MLX5_CMD_DATA_BLOCK_SIZE;
467 		} else {
468 			mlx5_core_dbg(dev, "command block:\n");
469 			dump_buf(next->buf, sizeof(struct mlx5_cmd_prot_block), 0, offset);
470 			offset += sizeof(struct mlx5_cmd_prot_block);
471 		}
472 		next = next->next;
473 	}
474 
475 	if (data_only)
476 		pr_debug("\n");
477 }
478 
cmd_work_handler(struct work_struct * work)479 static void cmd_work_handler(struct work_struct *work)
480 {
481 	struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
482 	struct mlx5_cmd *cmd = ent->cmd;
483 	struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
484 	struct mlx5_cmd_layout *lay;
485 	struct semaphore *sem;
486 
487 	sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
488 	down(sem);
489 	if (!ent->page_queue) {
490 		ent->idx = alloc_ent(cmd);
491 		if (ent->idx < 0) {
492 			mlx5_core_err(dev, "failed to allocate command entry\n");
493 			up(sem);
494 			return;
495 		}
496 	} else {
497 		ent->idx = cmd->max_reg_cmds;
498 	}
499 
500 	ent->token = alloc_token(cmd);
501 	cmd->ent_arr[ent->idx] = ent;
502 	lay = get_inst(cmd, ent->idx);
503 	ent->lay = lay;
504 	memset(lay, 0, sizeof(*lay));
505 	memcpy(lay->in, ent->in->first.data, sizeof(lay->in));
506 	ent->op = be32_to_cpu(lay->in[0]) >> 16;
507 	if (ent->in->next)
508 		lay->in_ptr = cpu_to_be64(ent->in->next->dma);
509 	lay->inlen = cpu_to_be32(ent->in->len);
510 	if (ent->out->next)
511 		lay->out_ptr = cpu_to_be64(ent->out->next->dma);
512 	lay->outlen = cpu_to_be32(ent->out->len);
513 	lay->type = MLX5_PCI_CMD_XPORT;
514 	lay->token = ent->token;
515 	lay->status_own = CMD_OWNER_HW;
516 	set_signature(ent, !cmd->checksum_disabled);
517 	dump_command(dev, ent, 1);
518 	ent->ts1 = ktime_get_ns();
519 
520 	/* ring doorbell after the descriptor is valid */
521 	mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
522 	wmb();
523 	iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
524 	mmiowb();
525 	/* if not in polling don't use ent after this point */
526 	if (cmd->mode == CMD_MODE_POLLING) {
527 		poll_timeout(ent);
528 		/* make sure we read the descriptor after ownership is SW */
529 		rmb();
530 		mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
531 	}
532 }
533 
deliv_status_to_str(u8 status)534 static const char *deliv_status_to_str(u8 status)
535 {
536 	switch (status) {
537 	case MLX5_CMD_DELIVERY_STAT_OK:
538 		return "no errors";
539 	case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR:
540 		return "signature error";
541 	case MLX5_CMD_DELIVERY_STAT_TOK_ERR:
542 		return "token error";
543 	case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR:
544 		return "bad block number";
545 	case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR:
546 		return "output pointer not aligned to block size";
547 	case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR:
548 		return "input pointer not aligned to block size";
549 	case MLX5_CMD_DELIVERY_STAT_FW_ERR:
550 		return "firmware internal error";
551 	case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR:
552 		return "command input length error";
553 	case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR:
554 		return "command ouput length error";
555 	case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR:
556 		return "reserved fields not cleared";
557 	case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR:
558 		return "bad command descriptor type";
559 	default:
560 		return "unknown status code";
561 	}
562 }
563 
msg_to_opcode(struct mlx5_cmd_msg * in)564 static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
565 {
566 	struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
567 
568 	return be16_to_cpu(hdr->opcode);
569 }
570 
wait_func(struct mlx5_core_dev * dev,struct mlx5_cmd_work_ent * ent)571 static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
572 {
573 	unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
574 	struct mlx5_cmd *cmd = &dev->cmd;
575 	int err;
576 
577 	if (cmd->mode == CMD_MODE_POLLING) {
578 		wait_for_completion(&ent->done);
579 		err = ent->ret;
580 	} else {
581 		if (!wait_for_completion_timeout(&ent->done, timeout))
582 			err = -ETIMEDOUT;
583 		else
584 			err = 0;
585 	}
586 	if (err == -ETIMEDOUT) {
587 		mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
588 			       mlx5_command_str(msg_to_opcode(ent->in)),
589 			       msg_to_opcode(ent->in));
590 	}
591 	mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
592 		      err, deliv_status_to_str(ent->status), ent->status);
593 
594 	return err;
595 }
596 
597 /*  Notes:
598  *    1. Callback functions may not sleep
599  *    2. page queue commands do not support asynchrous completion
600  */
mlx5_cmd_invoke(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * in,struct mlx5_cmd_msg * out,void * uout,int uout_size,mlx5_cmd_cbk_t callback,void * context,int page_queue,u8 * status)601 static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
602 			   struct mlx5_cmd_msg *out, void *uout, int uout_size,
603 			   mlx5_cmd_cbk_t callback,
604 			   void *context, int page_queue, u8 *status)
605 {
606 	struct mlx5_cmd *cmd = &dev->cmd;
607 	struct mlx5_cmd_work_ent *ent;
608 	struct mlx5_cmd_stats *stats;
609 	int err = 0;
610 	s64 ds;
611 	u16 op;
612 
613 	if (callback && page_queue)
614 		return -EINVAL;
615 
616 	ent = alloc_cmd(cmd, in, out, uout, uout_size, callback, context,
617 			page_queue);
618 	if (IS_ERR(ent))
619 		return PTR_ERR(ent);
620 
621 	if (!callback)
622 		init_completion(&ent->done);
623 
624 	INIT_WORK(&ent->work, cmd_work_handler);
625 	if (page_queue) {
626 		cmd_work_handler(&ent->work);
627 	} else if (!queue_work(cmd->wq, &ent->work)) {
628 		mlx5_core_warn(dev, "failed to queue work\n");
629 		err = -ENOMEM;
630 		goto out_free;
631 	}
632 
633 	if (!callback) {
634 		err = wait_func(dev, ent);
635 		if (err == -ETIMEDOUT)
636 			goto out;
637 
638 		ds = ent->ts2 - ent->ts1;
639 		op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
640 		if (op < ARRAY_SIZE(cmd->stats)) {
641 			stats = &cmd->stats[op];
642 			spin_lock_irq(&stats->lock);
643 			stats->sum += ds;
644 			++stats->n;
645 			spin_unlock_irq(&stats->lock);
646 		}
647 		mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
648 				   "fw exec time for %s is %lld nsec\n",
649 				   mlx5_command_str(op), ds);
650 		*status = ent->status;
651 		free_cmd(ent);
652 	}
653 
654 	return err;
655 
656 out_free:
657 	free_cmd(ent);
658 out:
659 	return err;
660 }
661 
dbg_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)662 static ssize_t dbg_write(struct file *filp, const char __user *buf,
663 			 size_t count, loff_t *pos)
664 {
665 	struct mlx5_core_dev *dev = filp->private_data;
666 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
667 	char lbuf[3];
668 	int err;
669 
670 	if (!dbg->in_msg || !dbg->out_msg)
671 		return -ENOMEM;
672 
673 	if (copy_from_user(lbuf, buf, sizeof(lbuf)))
674 		return -EFAULT;
675 
676 	lbuf[sizeof(lbuf) - 1] = 0;
677 
678 	if (strcmp(lbuf, "go"))
679 		return -EINVAL;
680 
681 	err = mlx5_cmd_exec(dev, dbg->in_msg, dbg->inlen, dbg->out_msg, dbg->outlen);
682 
683 	return err ? err : count;
684 }
685 
686 
687 static const struct file_operations fops = {
688 	.owner	= THIS_MODULE,
689 	.open	= simple_open,
690 	.write	= dbg_write,
691 };
692 
mlx5_copy_to_msg(struct mlx5_cmd_msg * to,void * from,int size)693 static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
694 {
695 	struct mlx5_cmd_prot_block *block;
696 	struct mlx5_cmd_mailbox *next;
697 	int copy;
698 
699 	if (!to || !from)
700 		return -ENOMEM;
701 
702 	copy = min_t(int, size, sizeof(to->first.data));
703 	memcpy(to->first.data, from, copy);
704 	size -= copy;
705 	from += copy;
706 
707 	next = to->next;
708 	while (size) {
709 		if (!next) {
710 			/* this is a BUG */
711 			return -ENOMEM;
712 		}
713 
714 		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
715 		block = next->buf;
716 		memcpy(block->data, from, copy);
717 		from += copy;
718 		size -= copy;
719 		next = next->next;
720 	}
721 
722 	return 0;
723 }
724 
mlx5_copy_from_msg(void * to,struct mlx5_cmd_msg * from,int size)725 static int mlx5_copy_from_msg(void *to, struct mlx5_cmd_msg *from, int size)
726 {
727 	struct mlx5_cmd_prot_block *block;
728 	struct mlx5_cmd_mailbox *next;
729 	int copy;
730 
731 	if (!to || !from)
732 		return -ENOMEM;
733 
734 	copy = min_t(int, size, sizeof(from->first.data));
735 	memcpy(to, from->first.data, copy);
736 	size -= copy;
737 	to += copy;
738 
739 	next = from->next;
740 	while (size) {
741 		if (!next) {
742 			/* this is a BUG */
743 			return -ENOMEM;
744 		}
745 
746 		copy = min_t(int, size, MLX5_CMD_DATA_BLOCK_SIZE);
747 		block = next->buf;
748 
749 		memcpy(to, block->data, copy);
750 		to += copy;
751 		size -= copy;
752 		next = next->next;
753 	}
754 
755 	return 0;
756 }
757 
alloc_cmd_box(struct mlx5_core_dev * dev,gfp_t flags)758 static struct mlx5_cmd_mailbox *alloc_cmd_box(struct mlx5_core_dev *dev,
759 					      gfp_t flags)
760 {
761 	struct mlx5_cmd_mailbox *mailbox;
762 
763 	mailbox = kmalloc(sizeof(*mailbox), flags);
764 	if (!mailbox)
765 		return ERR_PTR(-ENOMEM);
766 
767 	mailbox->buf = pci_pool_alloc(dev->cmd.pool, flags,
768 				      &mailbox->dma);
769 	if (!mailbox->buf) {
770 		mlx5_core_dbg(dev, "failed allocation\n");
771 		kfree(mailbox);
772 		return ERR_PTR(-ENOMEM);
773 	}
774 	memset(mailbox->buf, 0, sizeof(struct mlx5_cmd_prot_block));
775 	mailbox->next = NULL;
776 
777 	return mailbox;
778 }
779 
free_cmd_box(struct mlx5_core_dev * dev,struct mlx5_cmd_mailbox * mailbox)780 static void free_cmd_box(struct mlx5_core_dev *dev,
781 			 struct mlx5_cmd_mailbox *mailbox)
782 {
783 	pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma);
784 	kfree(mailbox);
785 }
786 
mlx5_alloc_cmd_msg(struct mlx5_core_dev * dev,gfp_t flags,int size)787 static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
788 					       gfp_t flags, int size)
789 {
790 	struct mlx5_cmd_mailbox *tmp, *head = NULL;
791 	struct mlx5_cmd_prot_block *block;
792 	struct mlx5_cmd_msg *msg;
793 	int blen;
794 	int err;
795 	int n;
796 	int i;
797 
798 	msg = kzalloc(sizeof(*msg), flags);
799 	if (!msg)
800 		return ERR_PTR(-ENOMEM);
801 
802 	blen = size - min_t(int, sizeof(msg->first.data), size);
803 	n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1) / MLX5_CMD_DATA_BLOCK_SIZE;
804 
805 	for (i = 0; i < n; i++) {
806 		tmp = alloc_cmd_box(dev, flags);
807 		if (IS_ERR(tmp)) {
808 			mlx5_core_warn(dev, "failed allocating block\n");
809 			err = PTR_ERR(tmp);
810 			goto err_alloc;
811 		}
812 
813 		block = tmp->buf;
814 		tmp->next = head;
815 		block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
816 		block->block_num = cpu_to_be32(n - i - 1);
817 		head = tmp;
818 	}
819 	msg->next = head;
820 	msg->len = size;
821 	return msg;
822 
823 err_alloc:
824 	while (head) {
825 		tmp = head->next;
826 		free_cmd_box(dev, head);
827 		head = tmp;
828 	}
829 	kfree(msg);
830 
831 	return ERR_PTR(err);
832 }
833 
mlx5_free_cmd_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)834 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
835 				  struct mlx5_cmd_msg *msg)
836 {
837 	struct mlx5_cmd_mailbox *head = msg->next;
838 	struct mlx5_cmd_mailbox *next;
839 
840 	while (head) {
841 		next = head->next;
842 		free_cmd_box(dev, head);
843 		head = next;
844 	}
845 	kfree(msg);
846 }
847 
data_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)848 static ssize_t data_write(struct file *filp, const char __user *buf,
849 			  size_t count, loff_t *pos)
850 {
851 	struct mlx5_core_dev *dev = filp->private_data;
852 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
853 	void *ptr;
854 	int err;
855 
856 	if (*pos != 0)
857 		return -EINVAL;
858 
859 	kfree(dbg->in_msg);
860 	dbg->in_msg = NULL;
861 	dbg->inlen = 0;
862 
863 	ptr = kzalloc(count, GFP_KERNEL);
864 	if (!ptr)
865 		return -ENOMEM;
866 
867 	if (copy_from_user(ptr, buf, count)) {
868 		err = -EFAULT;
869 		goto out;
870 	}
871 	dbg->in_msg = ptr;
872 	dbg->inlen = count;
873 
874 	*pos = count;
875 
876 	return count;
877 
878 out:
879 	kfree(ptr);
880 	return err;
881 }
882 
data_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)883 static ssize_t data_read(struct file *filp, char __user *buf, size_t count,
884 			 loff_t *pos)
885 {
886 	struct mlx5_core_dev *dev = filp->private_data;
887 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
888 	int copy;
889 
890 	if (*pos)
891 		return 0;
892 
893 	if (!dbg->out_msg)
894 		return -ENOMEM;
895 
896 	copy = min_t(int, count, dbg->outlen);
897 	if (copy_to_user(buf, dbg->out_msg, copy))
898 		return -EFAULT;
899 
900 	*pos += copy;
901 
902 	return copy;
903 }
904 
905 static const struct file_operations dfops = {
906 	.owner	= THIS_MODULE,
907 	.open	= simple_open,
908 	.write	= data_write,
909 	.read	= data_read,
910 };
911 
outlen_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)912 static ssize_t outlen_read(struct file *filp, char __user *buf, size_t count,
913 			   loff_t *pos)
914 {
915 	struct mlx5_core_dev *dev = filp->private_data;
916 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
917 	char outlen[8];
918 	int err;
919 
920 	if (*pos)
921 		return 0;
922 
923 	err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen);
924 	if (err < 0)
925 		return err;
926 
927 	if (copy_to_user(buf, &outlen, err))
928 		return -EFAULT;
929 
930 	*pos += err;
931 
932 	return err;
933 }
934 
outlen_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)935 static ssize_t outlen_write(struct file *filp, const char __user *buf,
936 			    size_t count, loff_t *pos)
937 {
938 	struct mlx5_core_dev *dev = filp->private_data;
939 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
940 	char outlen_str[8];
941 	int outlen;
942 	void *ptr;
943 	int err;
944 
945 	if (*pos != 0 || count > 6)
946 		return -EINVAL;
947 
948 	kfree(dbg->out_msg);
949 	dbg->out_msg = NULL;
950 	dbg->outlen = 0;
951 
952 	if (copy_from_user(outlen_str, buf, count))
953 		return -EFAULT;
954 
955 	outlen_str[7] = 0;
956 
957 	err = sscanf(outlen_str, "%d", &outlen);
958 	if (err < 0)
959 		return err;
960 
961 	ptr = kzalloc(outlen, GFP_KERNEL);
962 	if (!ptr)
963 		return -ENOMEM;
964 
965 	dbg->out_msg = ptr;
966 	dbg->outlen = outlen;
967 
968 	*pos = count;
969 
970 	return count;
971 }
972 
973 static const struct file_operations olfops = {
974 	.owner	= THIS_MODULE,
975 	.open	= simple_open,
976 	.write	= outlen_write,
977 	.read	= outlen_read,
978 };
979 
set_wqname(struct mlx5_core_dev * dev)980 static void set_wqname(struct mlx5_core_dev *dev)
981 {
982 	struct mlx5_cmd *cmd = &dev->cmd;
983 
984 	snprintf(cmd->wq_name, sizeof(cmd->wq_name), "mlx5_cmd_%s",
985 		 dev_name(&dev->pdev->dev));
986 }
987 
clean_debug_files(struct mlx5_core_dev * dev)988 static void clean_debug_files(struct mlx5_core_dev *dev)
989 {
990 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
991 
992 	if (!mlx5_debugfs_root)
993 		return;
994 
995 	mlx5_cmdif_debugfs_cleanup(dev);
996 	debugfs_remove_recursive(dbg->dbg_root);
997 }
998 
create_debugfs_files(struct mlx5_core_dev * dev)999 static int create_debugfs_files(struct mlx5_core_dev *dev)
1000 {
1001 	struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1002 	int err = -ENOMEM;
1003 
1004 	if (!mlx5_debugfs_root)
1005 		return 0;
1006 
1007 	dbg->dbg_root = debugfs_create_dir("cmd", dev->priv.dbg_root);
1008 	if (!dbg->dbg_root)
1009 		return err;
1010 
1011 	dbg->dbg_in = debugfs_create_file("in", 0400, dbg->dbg_root,
1012 					  dev, &dfops);
1013 	if (!dbg->dbg_in)
1014 		goto err_dbg;
1015 
1016 	dbg->dbg_out = debugfs_create_file("out", 0200, dbg->dbg_root,
1017 					   dev, &dfops);
1018 	if (!dbg->dbg_out)
1019 		goto err_dbg;
1020 
1021 	dbg->dbg_outlen = debugfs_create_file("out_len", 0600, dbg->dbg_root,
1022 					      dev, &olfops);
1023 	if (!dbg->dbg_outlen)
1024 		goto err_dbg;
1025 
1026 	dbg->dbg_status = debugfs_create_u8("status", 0600, dbg->dbg_root,
1027 					    &dbg->status);
1028 	if (!dbg->dbg_status)
1029 		goto err_dbg;
1030 
1031 	dbg->dbg_run = debugfs_create_file("run", 0200, dbg->dbg_root, dev, &fops);
1032 	if (!dbg->dbg_run)
1033 		goto err_dbg;
1034 
1035 	mlx5_cmdif_debugfs_init(dev);
1036 
1037 	return 0;
1038 
1039 err_dbg:
1040 	clean_debug_files(dev);
1041 	return err;
1042 }
1043 
mlx5_cmd_use_events(struct mlx5_core_dev * dev)1044 void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
1045 {
1046 	struct mlx5_cmd *cmd = &dev->cmd;
1047 	int i;
1048 
1049 	for (i = 0; i < cmd->max_reg_cmds; i++)
1050 		down(&cmd->sem);
1051 
1052 	down(&cmd->pages_sem);
1053 
1054 	flush_workqueue(cmd->wq);
1055 
1056 	cmd->mode = CMD_MODE_EVENTS;
1057 
1058 	up(&cmd->pages_sem);
1059 	for (i = 0; i < cmd->max_reg_cmds; i++)
1060 		up(&cmd->sem);
1061 }
1062 
mlx5_cmd_use_polling(struct mlx5_core_dev * dev)1063 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
1064 {
1065 	struct mlx5_cmd *cmd = &dev->cmd;
1066 	int i;
1067 
1068 	for (i = 0; i < cmd->max_reg_cmds; i++)
1069 		down(&cmd->sem);
1070 
1071 	down(&cmd->pages_sem);
1072 
1073 	flush_workqueue(cmd->wq);
1074 	cmd->mode = CMD_MODE_POLLING;
1075 
1076 	up(&cmd->pages_sem);
1077 	for (i = 0; i < cmd->max_reg_cmds; i++)
1078 		up(&cmd->sem);
1079 }
1080 
free_msg(struct mlx5_core_dev * dev,struct mlx5_cmd_msg * msg)1081 static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
1082 {
1083 	unsigned long flags;
1084 
1085 	if (msg->cache) {
1086 		spin_lock_irqsave(&msg->cache->lock, flags);
1087 		list_add_tail(&msg->list, &msg->cache->head);
1088 		spin_unlock_irqrestore(&msg->cache->lock, flags);
1089 	} else {
1090 		mlx5_free_cmd_msg(dev, msg);
1091 	}
1092 }
1093 
mlx5_cmd_comp_handler(struct mlx5_core_dev * dev,unsigned long vector)1094 void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
1095 {
1096 	struct mlx5_cmd *cmd = &dev->cmd;
1097 	struct mlx5_cmd_work_ent *ent;
1098 	mlx5_cmd_cbk_t callback;
1099 	void *context;
1100 	int err;
1101 	int i;
1102 	s64 ds;
1103 	struct mlx5_cmd_stats *stats;
1104 	unsigned long flags;
1105 
1106 	for (i = 0; i < (1 << cmd->log_sz); i++) {
1107 		if (test_bit(i, &vector)) {
1108 			struct semaphore *sem;
1109 
1110 			ent = cmd->ent_arr[i];
1111 			if (ent->page_queue)
1112 				sem = &cmd->pages_sem;
1113 			else
1114 				sem = &cmd->sem;
1115 			ent->ts2 = ktime_get_ns();
1116 			memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
1117 			dump_command(dev, ent, 0);
1118 			if (!ent->ret) {
1119 				if (!cmd->checksum_disabled)
1120 					ent->ret = verify_signature(ent);
1121 				else
1122 					ent->ret = 0;
1123 				ent->status = ent->lay->status_own >> 1;
1124 				mlx5_core_dbg(dev, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1125 					      ent->ret, deliv_status_to_str(ent->status), ent->status);
1126 			}
1127 			free_ent(cmd, ent->idx);
1128 			if (ent->callback) {
1129 				ds = ent->ts2 - ent->ts1;
1130 				if (ent->op < ARRAY_SIZE(cmd->stats)) {
1131 					stats = &cmd->stats[ent->op];
1132 					spin_lock_irqsave(&stats->lock, flags);
1133 					stats->sum += ds;
1134 					++stats->n;
1135 					spin_unlock_irqrestore(&stats->lock, flags);
1136 				}
1137 
1138 				callback = ent->callback;
1139 				context = ent->context;
1140 				err = ent->ret;
1141 				if (!err)
1142 					err = mlx5_copy_from_msg(ent->uout,
1143 								 ent->out,
1144 								 ent->uout_size);
1145 
1146 				mlx5_free_cmd_msg(dev, ent->out);
1147 				free_msg(dev, ent->in);
1148 
1149 				free_cmd(ent);
1150 				callback(err, context);
1151 			} else {
1152 				complete(&ent->done);
1153 			}
1154 			up(sem);
1155 		}
1156 	}
1157 }
1158 EXPORT_SYMBOL(mlx5_cmd_comp_handler);
1159 
status_to_err(u8 status)1160 static int status_to_err(u8 status)
1161 {
1162 	return status ? -1 : 0; /* TBD more meaningful codes */
1163 }
1164 
alloc_msg(struct mlx5_core_dev * dev,int in_size,gfp_t gfp)1165 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
1166 				      gfp_t gfp)
1167 {
1168 	struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM);
1169 	struct mlx5_cmd *cmd = &dev->cmd;
1170 	struct cache_ent *ent = NULL;
1171 
1172 	if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE)
1173 		ent = &cmd->cache.large;
1174 	else if (in_size > 16 && in_size <= MED_LIST_SIZE)
1175 		ent = &cmd->cache.med;
1176 
1177 	if (ent) {
1178 		spin_lock_irq(&ent->lock);
1179 		if (!list_empty(&ent->head)) {
1180 			msg = list_entry(ent->head.next, typeof(*msg), list);
1181 			/* For cached lists, we must explicitly state what is
1182 			 * the real size
1183 			 */
1184 			msg->len = in_size;
1185 			list_del(&msg->list);
1186 		}
1187 		spin_unlock_irq(&ent->lock);
1188 	}
1189 
1190 	if (IS_ERR(msg))
1191 		msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
1192 
1193 	return msg;
1194 }
1195 
is_manage_pages(struct mlx5_inbox_hdr * in)1196 static int is_manage_pages(struct mlx5_inbox_hdr *in)
1197 {
1198 	return be16_to_cpu(in->opcode) == MLX5_CMD_OP_MANAGE_PAGES;
1199 }
1200 
cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context)1201 static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1202 		    int out_size, mlx5_cmd_cbk_t callback, void *context)
1203 {
1204 	struct mlx5_cmd_msg *inb;
1205 	struct mlx5_cmd_msg *outb;
1206 	int pages_queue;
1207 	gfp_t gfp;
1208 	int err;
1209 	u8 status = 0;
1210 
1211 	pages_queue = is_manage_pages(in);
1212 	gfp = callback ? GFP_ATOMIC : GFP_KERNEL;
1213 
1214 	inb = alloc_msg(dev, in_size, gfp);
1215 	if (IS_ERR(inb)) {
1216 		err = PTR_ERR(inb);
1217 		return err;
1218 	}
1219 
1220 	err = mlx5_copy_to_msg(inb, in, in_size);
1221 	if (err) {
1222 		mlx5_core_warn(dev, "err %d\n", err);
1223 		goto out_in;
1224 	}
1225 
1226 	outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
1227 	if (IS_ERR(outb)) {
1228 		err = PTR_ERR(outb);
1229 		goto out_in;
1230 	}
1231 
1232 	err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
1233 			      pages_queue, &status);
1234 	if (err)
1235 		goto out_out;
1236 
1237 	mlx5_core_dbg(dev, "err %d, status %d\n", err, status);
1238 	if (status) {
1239 		err = status_to_err(status);
1240 		goto out_out;
1241 	}
1242 
1243 	if (!callback)
1244 		err = mlx5_copy_from_msg(out, outb, out_size);
1245 
1246 out_out:
1247 	if (!callback)
1248 		mlx5_free_cmd_msg(dev, outb);
1249 
1250 out_in:
1251 	if (!callback)
1252 		free_msg(dev, inb);
1253 	return err;
1254 }
1255 
mlx5_cmd_exec(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size)1256 int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
1257 		  int out_size)
1258 {
1259 	return cmd_exec(dev, in, in_size, out, out_size, NULL, NULL);
1260 }
1261 EXPORT_SYMBOL(mlx5_cmd_exec);
1262 
mlx5_cmd_exec_cb(struct mlx5_core_dev * dev,void * in,int in_size,void * out,int out_size,mlx5_cmd_cbk_t callback,void * context)1263 int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size,
1264 		     void *out, int out_size, mlx5_cmd_cbk_t callback,
1265 		     void *context)
1266 {
1267 	return cmd_exec(dev, in, in_size, out, out_size, callback, context);
1268 }
1269 EXPORT_SYMBOL(mlx5_cmd_exec_cb);
1270 
destroy_msg_cache(struct mlx5_core_dev * dev)1271 static void destroy_msg_cache(struct mlx5_core_dev *dev)
1272 {
1273 	struct mlx5_cmd *cmd = &dev->cmd;
1274 	struct mlx5_cmd_msg *msg;
1275 	struct mlx5_cmd_msg *n;
1276 
1277 	list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) {
1278 		list_del(&msg->list);
1279 		mlx5_free_cmd_msg(dev, msg);
1280 	}
1281 
1282 	list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) {
1283 		list_del(&msg->list);
1284 		mlx5_free_cmd_msg(dev, msg);
1285 	}
1286 }
1287 
create_msg_cache(struct mlx5_core_dev * dev)1288 static int create_msg_cache(struct mlx5_core_dev *dev)
1289 {
1290 	struct mlx5_cmd *cmd = &dev->cmd;
1291 	struct mlx5_cmd_msg *msg;
1292 	int err;
1293 	int i;
1294 
1295 	spin_lock_init(&cmd->cache.large.lock);
1296 	INIT_LIST_HEAD(&cmd->cache.large.head);
1297 	spin_lock_init(&cmd->cache.med.lock);
1298 	INIT_LIST_HEAD(&cmd->cache.med.head);
1299 
1300 	for (i = 0; i < NUM_LONG_LISTS; i++) {
1301 		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
1302 		if (IS_ERR(msg)) {
1303 			err = PTR_ERR(msg);
1304 			goto ex_err;
1305 		}
1306 		msg->cache = &cmd->cache.large;
1307 		list_add_tail(&msg->list, &cmd->cache.large.head);
1308 	}
1309 
1310 	for (i = 0; i < NUM_MED_LISTS; i++) {
1311 		msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
1312 		if (IS_ERR(msg)) {
1313 			err = PTR_ERR(msg);
1314 			goto ex_err;
1315 		}
1316 		msg->cache = &cmd->cache.med;
1317 		list_add_tail(&msg->list, &cmd->cache.med.head);
1318 	}
1319 
1320 	return 0;
1321 
1322 ex_err:
1323 	destroy_msg_cache(dev);
1324 	return err;
1325 }
1326 
alloc_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1327 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1328 {
1329 	struct device *ddev = &dev->pdev->dev;
1330 
1331 	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
1332 						 &cmd->alloc_dma, GFP_KERNEL);
1333 	if (!cmd->cmd_alloc_buf)
1334 		return -ENOMEM;
1335 
1336 	/* make sure it is aligned to 4K */
1337 	if (!((uintptr_t)cmd->cmd_alloc_buf & (MLX5_ADAPTER_PAGE_SIZE - 1))) {
1338 		cmd->cmd_buf = cmd->cmd_alloc_buf;
1339 		cmd->dma = cmd->alloc_dma;
1340 		cmd->alloc_size = MLX5_ADAPTER_PAGE_SIZE;
1341 		return 0;
1342 	}
1343 
1344 	dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
1345 			  cmd->alloc_dma);
1346 	cmd->cmd_alloc_buf = dma_zalloc_coherent(ddev,
1347 						 2 * MLX5_ADAPTER_PAGE_SIZE - 1,
1348 						 &cmd->alloc_dma, GFP_KERNEL);
1349 	if (!cmd->cmd_alloc_buf)
1350 		return -ENOMEM;
1351 
1352 	cmd->cmd_buf = PTR_ALIGN(cmd->cmd_alloc_buf, MLX5_ADAPTER_PAGE_SIZE);
1353 	cmd->dma = ALIGN(cmd->alloc_dma, MLX5_ADAPTER_PAGE_SIZE);
1354 	cmd->alloc_size = 2 * MLX5_ADAPTER_PAGE_SIZE - 1;
1355 	return 0;
1356 }
1357 
free_cmd_page(struct mlx5_core_dev * dev,struct mlx5_cmd * cmd)1358 static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
1359 {
1360 	struct device *ddev = &dev->pdev->dev;
1361 
1362 	dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
1363 			  cmd->alloc_dma);
1364 }
1365 
mlx5_cmd_init(struct mlx5_core_dev * dev)1366 int mlx5_cmd_init(struct mlx5_core_dev *dev)
1367 {
1368 	int size = sizeof(struct mlx5_cmd_prot_block);
1369 	int align = roundup_pow_of_two(size);
1370 	struct mlx5_cmd *cmd = &dev->cmd;
1371 	u32 cmd_h, cmd_l;
1372 	u16 cmd_if_rev;
1373 	int err;
1374 	int i;
1375 
1376 	cmd_if_rev = cmdif_rev(dev);
1377 	if (cmd_if_rev != CMD_IF_REV) {
1378 		dev_err(&dev->pdev->dev,
1379 			"Driver cmdif rev(%d) differs from firmware's(%d)\n",
1380 			CMD_IF_REV, cmd_if_rev);
1381 		return -EINVAL;
1382 	}
1383 
1384 	cmd->pool = pci_pool_create("mlx5_cmd", dev->pdev, size, align, 0);
1385 	if (!cmd->pool)
1386 		return -ENOMEM;
1387 
1388 	err = alloc_cmd_page(dev, cmd);
1389 	if (err)
1390 		goto err_free_pool;
1391 
1392 	cmd_l = ioread32be(&dev->iseg->cmdq_addr_l_sz) & 0xff;
1393 	cmd->log_sz = cmd_l >> 4 & 0xf;
1394 	cmd->log_stride = cmd_l & 0xf;
1395 	if (1 << cmd->log_sz > MLX5_MAX_COMMANDS) {
1396 		dev_err(&dev->pdev->dev, "firmware reports too many outstanding commands %d\n",
1397 			1 << cmd->log_sz);
1398 		err = -EINVAL;
1399 		goto err_free_page;
1400 	}
1401 
1402 	if (cmd->log_sz + cmd->log_stride > MLX5_ADAPTER_PAGE_SHIFT) {
1403 		dev_err(&dev->pdev->dev, "command queue size overflow\n");
1404 		err = -EINVAL;
1405 		goto err_free_page;
1406 	}
1407 
1408 	cmd->checksum_disabled = 1;
1409 	cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
1410 	cmd->bitmask = (1 << cmd->max_reg_cmds) - 1;
1411 
1412 	cmd->cmdif_rev = ioread32be(&dev->iseg->cmdif_rev_fw_sub) >> 16;
1413 	if (cmd->cmdif_rev > CMD_IF_REV) {
1414 		dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
1415 			CMD_IF_REV, cmd->cmdif_rev);
1416 		err = -ENOTSUPP;
1417 		goto err_free_page;
1418 	}
1419 
1420 	spin_lock_init(&cmd->alloc_lock);
1421 	spin_lock_init(&cmd->token_lock);
1422 	for (i = 0; i < ARRAY_SIZE(cmd->stats); i++)
1423 		spin_lock_init(&cmd->stats[i].lock);
1424 
1425 	sema_init(&cmd->sem, cmd->max_reg_cmds);
1426 	sema_init(&cmd->pages_sem, 1);
1427 
1428 	cmd_h = (u32)((u64)(cmd->dma) >> 32);
1429 	cmd_l = (u32)(cmd->dma);
1430 	if (cmd_l & 0xfff) {
1431 		dev_err(&dev->pdev->dev, "invalid command queue address\n");
1432 		err = -ENOMEM;
1433 		goto err_free_page;
1434 	}
1435 
1436 	iowrite32be(cmd_h, &dev->iseg->cmdq_addr_h);
1437 	iowrite32be(cmd_l, &dev->iseg->cmdq_addr_l_sz);
1438 
1439 	/* Make sure firmware sees the complete address before we proceed */
1440 	wmb();
1441 
1442 	mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
1443 
1444 	cmd->mode = CMD_MODE_POLLING;
1445 
1446 	err = create_msg_cache(dev);
1447 	if (err) {
1448 		dev_err(&dev->pdev->dev, "failed to create command cache\n");
1449 		goto err_free_page;
1450 	}
1451 
1452 	set_wqname(dev);
1453 	cmd->wq = create_singlethread_workqueue(cmd->wq_name);
1454 	if (!cmd->wq) {
1455 		dev_err(&dev->pdev->dev, "failed to create command workqueue\n");
1456 		err = -ENOMEM;
1457 		goto err_cache;
1458 	}
1459 
1460 	err = create_debugfs_files(dev);
1461 	if (err) {
1462 		err = -ENOMEM;
1463 		goto err_wq;
1464 	}
1465 
1466 	return 0;
1467 
1468 err_wq:
1469 	destroy_workqueue(cmd->wq);
1470 
1471 err_cache:
1472 	destroy_msg_cache(dev);
1473 
1474 err_free_page:
1475 	free_cmd_page(dev, cmd);
1476 
1477 err_free_pool:
1478 	pci_pool_destroy(cmd->pool);
1479 
1480 	return err;
1481 }
1482 EXPORT_SYMBOL(mlx5_cmd_init);
1483 
mlx5_cmd_cleanup(struct mlx5_core_dev * dev)1484 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
1485 {
1486 	struct mlx5_cmd *cmd = &dev->cmd;
1487 
1488 	clean_debug_files(dev);
1489 	destroy_workqueue(cmd->wq);
1490 	destroy_msg_cache(dev);
1491 	free_cmd_page(dev, cmd);
1492 	pci_pool_destroy(cmd->pool);
1493 }
1494 EXPORT_SYMBOL(mlx5_cmd_cleanup);
1495 
cmd_status_str(u8 status)1496 static const char *cmd_status_str(u8 status)
1497 {
1498 	switch (status) {
1499 	case MLX5_CMD_STAT_OK:
1500 		return "OK";
1501 	case MLX5_CMD_STAT_INT_ERR:
1502 		return "internal error";
1503 	case MLX5_CMD_STAT_BAD_OP_ERR:
1504 		return "bad operation";
1505 	case MLX5_CMD_STAT_BAD_PARAM_ERR:
1506 		return "bad parameter";
1507 	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:
1508 		return "bad system state";
1509 	case MLX5_CMD_STAT_BAD_RES_ERR:
1510 		return "bad resource";
1511 	case MLX5_CMD_STAT_RES_BUSY:
1512 		return "resource busy";
1513 	case MLX5_CMD_STAT_LIM_ERR:
1514 		return "limits exceeded";
1515 	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:
1516 		return "bad resource state";
1517 	case MLX5_CMD_STAT_IX_ERR:
1518 		return "bad index";
1519 	case MLX5_CMD_STAT_NO_RES_ERR:
1520 		return "no resources";
1521 	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:
1522 		return "bad input length";
1523 	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:
1524 		return "bad output length";
1525 	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:
1526 		return "bad QP state";
1527 	case MLX5_CMD_STAT_BAD_PKT_ERR:
1528 		return "bad packet (discarded)";
1529 	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:
1530 		return "bad size too many outstanding CQEs";
1531 	default:
1532 		return "unknown status";
1533 	}
1534 }
1535 
cmd_status_to_err(u8 status)1536 static int cmd_status_to_err(u8 status)
1537 {
1538 	switch (status) {
1539 	case MLX5_CMD_STAT_OK:				return 0;
1540 	case MLX5_CMD_STAT_INT_ERR:			return -EIO;
1541 	case MLX5_CMD_STAT_BAD_OP_ERR:			return -EINVAL;
1542 	case MLX5_CMD_STAT_BAD_PARAM_ERR:		return -EINVAL;
1543 	case MLX5_CMD_STAT_BAD_SYS_STATE_ERR:		return -EIO;
1544 	case MLX5_CMD_STAT_BAD_RES_ERR:			return -EINVAL;
1545 	case MLX5_CMD_STAT_RES_BUSY:			return -EBUSY;
1546 	case MLX5_CMD_STAT_LIM_ERR:			return -ENOMEM;
1547 	case MLX5_CMD_STAT_BAD_RES_STATE_ERR:		return -EINVAL;
1548 	case MLX5_CMD_STAT_IX_ERR:			return -EINVAL;
1549 	case MLX5_CMD_STAT_NO_RES_ERR:			return -EAGAIN;
1550 	case MLX5_CMD_STAT_BAD_INP_LEN_ERR:		return -EIO;
1551 	case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR:		return -EIO;
1552 	case MLX5_CMD_STAT_BAD_QP_STATE_ERR:		return -EINVAL;
1553 	case MLX5_CMD_STAT_BAD_PKT_ERR:			return -EINVAL;
1554 	case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR:	return -EINVAL;
1555 	default:					return -EIO;
1556 	}
1557 }
1558 
1559 /* this will be available till all the commands use set/get macros */
mlx5_cmd_status_to_err(struct mlx5_outbox_hdr * hdr)1560 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr *hdr)
1561 {
1562 	if (!hdr->status)
1563 		return 0;
1564 
1565 	pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1566 		cmd_status_str(hdr->status), hdr->status,
1567 		be32_to_cpu(hdr->syndrome));
1568 
1569 	return cmd_status_to_err(hdr->status);
1570 }
1571 
mlx5_cmd_status_to_err_v2(void * ptr)1572 int mlx5_cmd_status_to_err_v2(void *ptr)
1573 {
1574 	u32	syndrome;
1575 	u8	status;
1576 
1577 	status = be32_to_cpu(*(__be32 *)ptr) >> 24;
1578 	if (!status)
1579 		return 0;
1580 
1581 	syndrome = be32_to_cpu(*(__be32 *)(ptr + 4));
1582 
1583 	pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1584 		cmd_status_str(status), status, syndrome);
1585 
1586 	return cmd_status_to_err(status);
1587 }
1588