1/*
2 * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/export.h>
38#include <linux/pci.h>
39#include <linux/errno.h>
40
41#include <linux/mlx4/cmd.h>
42#include <linux/mlx4/device.h>
43#include <linux/semaphore.h>
44#include <rdma/ib_smi.h>
45#include <linux/delay.h>
46
47#include <asm/io.h>
48
49#include "mlx4.h"
50#include "fw.h"
51#include "fw_qos.h"
52
53#define CMD_POLL_TOKEN 0xffff
54#define INBOX_MASK	0xffffffffffffff00ULL
55
56#define CMD_CHAN_VER 1
57#define CMD_CHAN_IF_REV 1
58
59enum {
60	/* command completed successfully: */
61	CMD_STAT_OK		= 0x00,
62	/* Internal error (such as a bus error) occurred while processing command: */
63	CMD_STAT_INTERNAL_ERR	= 0x01,
64	/* Operation/command not supported or opcode modifier not supported: */
65	CMD_STAT_BAD_OP		= 0x02,
66	/* Parameter not supported or parameter out of range: */
67	CMD_STAT_BAD_PARAM	= 0x03,
68	/* System not enabled or bad system state: */
69	CMD_STAT_BAD_SYS_STATE	= 0x04,
70	/* Attempt to access reserved or unallocaterd resource: */
71	CMD_STAT_BAD_RESOURCE	= 0x05,
72	/* Requested resource is currently executing a command, or is otherwise busy: */
73	CMD_STAT_RESOURCE_BUSY	= 0x06,
74	/* Required capability exceeds device limits: */
75	CMD_STAT_EXCEED_LIM	= 0x08,
76	/* Resource is not in the appropriate state or ownership: */
77	CMD_STAT_BAD_RES_STATE	= 0x09,
78	/* Index out of range: */
79	CMD_STAT_BAD_INDEX	= 0x0a,
80	/* FW image corrupted: */
81	CMD_STAT_BAD_NVMEM	= 0x0b,
82	/* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
83	CMD_STAT_ICM_ERROR	= 0x0c,
84	/* Attempt to modify a QP/EE which is not in the presumed state: */
85	CMD_STAT_BAD_QP_STATE   = 0x10,
86	/* Bad segment parameters (Address/Size): */
87	CMD_STAT_BAD_SEG_PARAM	= 0x20,
88	/* Memory Region has Memory Windows bound to: */
89	CMD_STAT_REG_BOUND	= 0x21,
90	/* HCA local attached memory not present: */
91	CMD_STAT_LAM_NOT_PRE	= 0x22,
92	/* Bad management packet (silently discarded): */
93	CMD_STAT_BAD_PKT	= 0x30,
94	/* More outstanding CQEs in CQ than new CQ size: */
95	CMD_STAT_BAD_SIZE	= 0x40,
96	/* Multi Function device support required: */
97	CMD_STAT_MULTI_FUNC_REQ	= 0x50,
98};
99
100enum {
101	HCR_IN_PARAM_OFFSET	= 0x00,
102	HCR_IN_MODIFIER_OFFSET	= 0x08,
103	HCR_OUT_PARAM_OFFSET	= 0x0c,
104	HCR_TOKEN_OFFSET	= 0x14,
105	HCR_STATUS_OFFSET	= 0x18,
106
107	HCR_OPMOD_SHIFT		= 12,
108	HCR_T_BIT		= 21,
109	HCR_E_BIT		= 22,
110	HCR_GO_BIT		= 23
111};
112
113enum {
114	GO_BIT_TIMEOUT_MSECS	= 10000
115};
116
117enum mlx4_vlan_transition {
118	MLX4_VLAN_TRANSITION_VST_VST = 0,
119	MLX4_VLAN_TRANSITION_VST_VGT = 1,
120	MLX4_VLAN_TRANSITION_VGT_VST = 2,
121	MLX4_VLAN_TRANSITION_VGT_VGT = 3,
122};
123
124
125struct mlx4_cmd_context {
126	struct completion	done;
127	int			result;
128	int			next;
129	u64			out_param;
130	u16			token;
131	u8			fw_status;
132};
133
134static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
135				    struct mlx4_vhcr_cmd *in_vhcr);
136
137static int mlx4_status_to_errno(u8 status)
138{
139	static const int trans_table[] = {
140		[CMD_STAT_INTERNAL_ERR]	  = -EIO,
141		[CMD_STAT_BAD_OP]	  = -EPERM,
142		[CMD_STAT_BAD_PARAM]	  = -EINVAL,
143		[CMD_STAT_BAD_SYS_STATE]  = -ENXIO,
144		[CMD_STAT_BAD_RESOURCE]	  = -EBADF,
145		[CMD_STAT_RESOURCE_BUSY]  = -EBUSY,
146		[CMD_STAT_EXCEED_LIM]	  = -ENOMEM,
147		[CMD_STAT_BAD_RES_STATE]  = -EBADF,
148		[CMD_STAT_BAD_INDEX]	  = -EBADF,
149		[CMD_STAT_BAD_NVMEM]	  = -EFAULT,
150		[CMD_STAT_ICM_ERROR]	  = -ENFILE,
151		[CMD_STAT_BAD_QP_STATE]   = -EINVAL,
152		[CMD_STAT_BAD_SEG_PARAM]  = -EFAULT,
153		[CMD_STAT_REG_BOUND]	  = -EBUSY,
154		[CMD_STAT_LAM_NOT_PRE]	  = -EAGAIN,
155		[CMD_STAT_BAD_PKT]	  = -EINVAL,
156		[CMD_STAT_BAD_SIZE]	  = -ENOMEM,
157		[CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
158	};
159
160	if (status >= ARRAY_SIZE(trans_table) ||
161	    (status != CMD_STAT_OK && trans_table[status] == 0))
162		return -EIO;
163
164	return trans_table[status];
165}
166
167static u8 mlx4_errno_to_status(int errno)
168{
169	switch (errno) {
170	case -EPERM:
171		return CMD_STAT_BAD_OP;
172	case -EINVAL:
173		return CMD_STAT_BAD_PARAM;
174	case -ENXIO:
175		return CMD_STAT_BAD_SYS_STATE;
176	case -EBUSY:
177		return CMD_STAT_RESOURCE_BUSY;
178	case -ENOMEM:
179		return CMD_STAT_EXCEED_LIM;
180	case -ENFILE:
181		return CMD_STAT_ICM_ERROR;
182	default:
183		return CMD_STAT_INTERNAL_ERR;
184	}
185}
186
187static int mlx4_internal_err_ret_value(struct mlx4_dev *dev, u16 op,
188				       u8 op_modifier)
189{
190	switch (op) {
191	case MLX4_CMD_UNMAP_ICM:
192	case MLX4_CMD_UNMAP_ICM_AUX:
193	case MLX4_CMD_UNMAP_FA:
194	case MLX4_CMD_2RST_QP:
195	case MLX4_CMD_HW2SW_EQ:
196	case MLX4_CMD_HW2SW_CQ:
197	case MLX4_CMD_HW2SW_SRQ:
198	case MLX4_CMD_HW2SW_MPT:
199	case MLX4_CMD_CLOSE_HCA:
200	case MLX4_QP_FLOW_STEERING_DETACH:
201	case MLX4_CMD_FREE_RES:
202	case MLX4_CMD_CLOSE_PORT:
203		return CMD_STAT_OK;
204
205	case MLX4_CMD_QP_ATTACH:
206		/* On Detach case return success */
207		if (op_modifier == 0)
208			return CMD_STAT_OK;
209		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
210
211	default:
212		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
213	}
214}
215
216static int mlx4_closing_cmd_fatal_error(u16 op, u8 fw_status)
217{
218	/* Any error during the closing commands below is considered fatal */
219	if (op == MLX4_CMD_CLOSE_HCA ||
220	    op == MLX4_CMD_HW2SW_EQ ||
221	    op == MLX4_CMD_HW2SW_CQ ||
222	    op == MLX4_CMD_2RST_QP ||
223	    op == MLX4_CMD_HW2SW_SRQ ||
224	    op == MLX4_CMD_SYNC_TPT ||
225	    op == MLX4_CMD_UNMAP_ICM ||
226	    op == MLX4_CMD_UNMAP_ICM_AUX ||
227	    op == MLX4_CMD_UNMAP_FA)
228		return 1;
229	/* Error on MLX4_CMD_HW2SW_MPT is fatal except when fw status equals
230	  * CMD_STAT_REG_BOUND.
231	  * This status indicates that memory region has memory windows bound to it
232	  * which may result from invalid user space usage and is not fatal.
233	  */
234	if (op == MLX4_CMD_HW2SW_MPT && fw_status != CMD_STAT_REG_BOUND)
235		return 1;
236	return 0;
237}
238
239static int mlx4_cmd_reset_flow(struct mlx4_dev *dev, u16 op, u8 op_modifier,
240			       int err)
241{
242	/* Only if reset flow is really active return code is based on
243	  * command, otherwise current error code is returned.
244	  */
245	if (mlx4_internal_err_reset) {
246		mlx4_enter_error_state(dev->persist);
247		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
248	}
249
250	return err;
251}
252
253static int comm_pending(struct mlx4_dev *dev)
254{
255	struct mlx4_priv *priv = mlx4_priv(dev);
256	u32 status = readl(&priv->mfunc.comm->slave_read);
257
258	return (swab32(status) >> 31) != priv->cmd.comm_toggle;
259}
260
261static int mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
262{
263	struct mlx4_priv *priv = mlx4_priv(dev);
264	u32 val;
265
266	/* To avoid writing to unknown addresses after the device state was
267	 * changed to internal error and the function was rest,
268	 * check the INTERNAL_ERROR flag which is updated under
269	 * device_state_mutex lock.
270	 */
271	mutex_lock(&dev->persist->device_state_mutex);
272
273	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
274		mutex_unlock(&dev->persist->device_state_mutex);
275		return -EIO;
276	}
277
278	priv->cmd.comm_toggle ^= 1;
279	val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
280	__raw_writel((__force u32) cpu_to_be32(val),
281		     &priv->mfunc.comm->slave_write);
282	mmiowb();
283	mutex_unlock(&dev->persist->device_state_mutex);
284	return 0;
285}
286
287static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
288		       unsigned long timeout)
289{
290	struct mlx4_priv *priv = mlx4_priv(dev);
291	unsigned long end;
292	int err = 0;
293	int ret_from_pending = 0;
294
295	/* First, verify that the master reports correct status */
296	if (comm_pending(dev)) {
297		mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
298			  priv->cmd.comm_toggle, cmd);
299		return -EAGAIN;
300	}
301
302	/* Write command */
303	down(&priv->cmd.poll_sem);
304	if (mlx4_comm_cmd_post(dev, cmd, param)) {
305		/* Only in case the device state is INTERNAL_ERROR,
306		 * mlx4_comm_cmd_post returns with an error
307		 */
308		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
309		goto out;
310	}
311
312	end = msecs_to_jiffies(timeout) + jiffies;
313	while (comm_pending(dev) && time_before(jiffies, end))
314		cond_resched();
315	ret_from_pending = comm_pending(dev);
316	if (ret_from_pending) {
317		/* check if the slave is trying to boot in the middle of
318		 * FLR process. The only non-zero result in the RESET command
319		 * is MLX4_DELAY_RESET_SLAVE*/
320		if ((MLX4_COMM_CMD_RESET == cmd)) {
321			err = MLX4_DELAY_RESET_SLAVE;
322			goto out;
323		} else {
324			mlx4_warn(dev, "Communication channel command 0x%x timed out\n",
325				  cmd);
326			err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
327		}
328	}
329
330	if (err)
331		mlx4_enter_error_state(dev->persist);
332out:
333	up(&priv->cmd.poll_sem);
334	return err;
335}
336
337static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 vhcr_cmd,
338			      u16 param, u16 op, unsigned long timeout)
339{
340	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
341	struct mlx4_cmd_context *context;
342	unsigned long end;
343	int err = 0;
344
345	down(&cmd->event_sem);
346
347	spin_lock(&cmd->context_lock);
348	BUG_ON(cmd->free_head < 0);
349	context = &cmd->context[cmd->free_head];
350	context->token += cmd->token_mask + 1;
351	cmd->free_head = context->next;
352	spin_unlock(&cmd->context_lock);
353
354	reinit_completion(&context->done);
355
356	if (mlx4_comm_cmd_post(dev, vhcr_cmd, param)) {
357		/* Only in case the device state is INTERNAL_ERROR,
358		 * mlx4_comm_cmd_post returns with an error
359		 */
360		err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
361		goto out;
362	}
363
364	if (!wait_for_completion_timeout(&context->done,
365					 msecs_to_jiffies(timeout))) {
366		mlx4_warn(dev, "communication channel command 0x%x (op=0x%x) timed out\n",
367			  vhcr_cmd, op);
368		goto out_reset;
369	}
370
371	err = context->result;
372	if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
373		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
374			 vhcr_cmd, context->fw_status);
375		if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
376			goto out_reset;
377	}
378
379	/* wait for comm channel ready
380	 * this is necessary for prevention the race
381	 * when switching between event to polling mode
382	 * Skipping this section in case the device is in FATAL_ERROR state,
383	 * In this state, no commands are sent via the comm channel until
384	 * the device has returned from reset.
385	 */
386	if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
387		end = msecs_to_jiffies(timeout) + jiffies;
388		while (comm_pending(dev) && time_before(jiffies, end))
389			cond_resched();
390	}
391	goto out;
392
393out_reset:
394	err = mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
395	mlx4_enter_error_state(dev->persist);
396out:
397	spin_lock(&cmd->context_lock);
398	context->next = cmd->free_head;
399	cmd->free_head = context - cmd->context;
400	spin_unlock(&cmd->context_lock);
401
402	up(&cmd->event_sem);
403	return err;
404}
405
406int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
407		  u16 op, unsigned long timeout)
408{
409	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
410		return mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
411
412	if (mlx4_priv(dev)->cmd.use_events)
413		return mlx4_comm_cmd_wait(dev, cmd, param, op, timeout);
414	return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
415}
416
417static int cmd_pending(struct mlx4_dev *dev)
418{
419	u32 status;
420
421	if (pci_channel_offline(dev->persist->pdev))
422		return -EIO;
423
424	status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
425
426	return (status & swab32(1 << HCR_GO_BIT)) ||
427		(mlx4_priv(dev)->cmd.toggle ==
428		 !!(status & swab32(1 << HCR_T_BIT)));
429}
430
431static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
432			 u32 in_modifier, u8 op_modifier, u16 op, u16 token,
433			 int event)
434{
435	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
436	u32 __iomem *hcr = cmd->hcr;
437	int ret = -EIO;
438	unsigned long end;
439
440	mutex_lock(&dev->persist->device_state_mutex);
441	/* To avoid writing to unknown addresses after the device state was
442	  * changed to internal error and the chip was reset,
443	  * check the INTERNAL_ERROR flag which is updated under
444	  * device_state_mutex lock.
445	  */
446	if (pci_channel_offline(dev->persist->pdev) ||
447	    (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)) {
448		/*
449		 * Device is going through error recovery
450		 * and cannot accept commands.
451		 */
452		goto out;
453	}
454
455	end = jiffies;
456	if (event)
457		end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
458
459	while (cmd_pending(dev)) {
460		if (pci_channel_offline(dev->persist->pdev)) {
461			/*
462			 * Device is going through error recovery
463			 * and cannot accept commands.
464			 */
465			goto out;
466		}
467
468		if (time_after_eq(jiffies, end)) {
469			mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
470			goto out;
471		}
472		cond_resched();
473	}
474
475	/*
476	 * We use writel (instead of something like memcpy_toio)
477	 * because writes of less than 32 bits to the HCR don't work
478	 * (and some architectures such as ia64 implement memcpy_toio
479	 * in terms of writeb).
480	 */
481	__raw_writel((__force u32) cpu_to_be32(in_param >> 32),		  hcr + 0);
482	__raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful),  hcr + 1);
483	__raw_writel((__force u32) cpu_to_be32(in_modifier),		  hcr + 2);
484	__raw_writel((__force u32) cpu_to_be32(out_param >> 32),	  hcr + 3);
485	__raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
486	__raw_writel((__force u32) cpu_to_be32(token << 16),		  hcr + 5);
487
488	/* __raw_writel may not order writes. */
489	wmb();
490
491	__raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT)		|
492					       (cmd->toggle << HCR_T_BIT)	|
493					       (event ? (1 << HCR_E_BIT) : 0)	|
494					       (op_modifier << HCR_OPMOD_SHIFT) |
495					       op), hcr + 6);
496
497	/*
498	 * Make sure that our HCR writes don't get mixed in with
499	 * writes from another CPU starting a FW command.
500	 */
501	mmiowb();
502
503	cmd->toggle = cmd->toggle ^ 1;
504
505	ret = 0;
506
507out:
508	if (ret)
509		mlx4_warn(dev, "Could not post command 0x%x: ret=%d, in_param=0x%llx, in_mod=0x%x, op_mod=0x%x\n",
510			  op, ret, in_param, in_modifier, op_modifier);
511	mutex_unlock(&dev->persist->device_state_mutex);
512
513	return ret;
514}
515
516static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
517			  int out_is_imm, u32 in_modifier, u8 op_modifier,
518			  u16 op, unsigned long timeout)
519{
520	struct mlx4_priv *priv = mlx4_priv(dev);
521	struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
522	int ret;
523
524	mutex_lock(&priv->cmd.slave_cmd_mutex);
525
526	vhcr->in_param = cpu_to_be64(in_param);
527	vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
528	vhcr->in_modifier = cpu_to_be32(in_modifier);
529	vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
530	vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
531	vhcr->status = 0;
532	vhcr->flags = !!(priv->cmd.use_events) << 6;
533
534	if (mlx4_is_master(dev)) {
535		ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
536		if (!ret) {
537			if (out_is_imm) {
538				if (out_param)
539					*out_param =
540						be64_to_cpu(vhcr->out_param);
541				else {
542					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
543						 op);
544					vhcr->status = CMD_STAT_BAD_PARAM;
545				}
546			}
547			ret = mlx4_status_to_errno(vhcr->status);
548		}
549		if (ret &&
550		    dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
551			ret = mlx4_internal_err_ret_value(dev, op, op_modifier);
552	} else {
553		ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0, op,
554				    MLX4_COMM_TIME + timeout);
555		if (!ret) {
556			if (out_is_imm) {
557				if (out_param)
558					*out_param =
559						be64_to_cpu(vhcr->out_param);
560				else {
561					mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
562						 op);
563					vhcr->status = CMD_STAT_BAD_PARAM;
564				}
565			}
566			ret = mlx4_status_to_errno(vhcr->status);
567		} else {
568			if (dev->persist->state &
569			    MLX4_DEVICE_STATE_INTERNAL_ERROR)
570				ret = mlx4_internal_err_ret_value(dev, op,
571								  op_modifier);
572			else
573				mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n", op);
574		}
575	}
576
577	mutex_unlock(&priv->cmd.slave_cmd_mutex);
578	return ret;
579}
580
581static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
582			 int out_is_imm, u32 in_modifier, u8 op_modifier,
583			 u16 op, unsigned long timeout)
584{
585	struct mlx4_priv *priv = mlx4_priv(dev);
586	void __iomem *hcr = priv->cmd.hcr;
587	int err = 0;
588	unsigned long end;
589	u32 stat;
590
591	down(&priv->cmd.poll_sem);
592
593	if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
594		/*
595		 * Device is going through error recovery
596		 * and cannot accept commands.
597		 */
598		err = mlx4_internal_err_ret_value(dev, op, op_modifier);
599		goto out;
600	}
601
602	if (out_is_imm && !out_param) {
603		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
604			 op);
605		err = -EINVAL;
606		goto out;
607	}
608
609	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
610			    in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
611	if (err)
612		goto out_reset;
613
614	end = msecs_to_jiffies(timeout) + jiffies;
615	while (cmd_pending(dev) && time_before(jiffies, end)) {
616		if (pci_channel_offline(dev->persist->pdev)) {
617			/*
618			 * Device is going through error recovery
619			 * and cannot accept commands.
620			 */
621			err = -EIO;
622			goto out_reset;
623		}
624
625		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
626			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
627			goto out;
628		}
629
630		cond_resched();
631	}
632
633	if (cmd_pending(dev)) {
634		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
635			  op);
636		err = -EIO;
637		goto out_reset;
638	}
639
640	if (out_is_imm)
641		*out_param =
642			(u64) be32_to_cpu((__force __be32)
643					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
644			(u64) be32_to_cpu((__force __be32)
645					  __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
646	stat = be32_to_cpu((__force __be32)
647			   __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
648	err = mlx4_status_to_errno(stat);
649	if (err) {
650		mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
651			 op, stat);
652		if (mlx4_closing_cmd_fatal_error(op, stat))
653			goto out_reset;
654		goto out;
655	}
656
657out_reset:
658	if (err)
659		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
660out:
661	up(&priv->cmd.poll_sem);
662	return err;
663}
664
665void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
666{
667	struct mlx4_priv *priv = mlx4_priv(dev);
668	struct mlx4_cmd_context *context =
669		&priv->cmd.context[token & priv->cmd.token_mask];
670
671	/* previously timed out command completing at long last */
672	if (token != context->token)
673		return;
674
675	context->fw_status = status;
676	context->result    = mlx4_status_to_errno(status);
677	context->out_param = out_param;
678
679	complete(&context->done);
680}
681
682static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
683			 int out_is_imm, u32 in_modifier, u8 op_modifier,
684			 u16 op, unsigned long timeout)
685{
686	struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
687	struct mlx4_cmd_context *context;
688	int err = 0;
689
690	down(&cmd->event_sem);
691
692	spin_lock(&cmd->context_lock);
693	BUG_ON(cmd->free_head < 0);
694	context = &cmd->context[cmd->free_head];
695	context->token += cmd->token_mask + 1;
696	cmd->free_head = context->next;
697	spin_unlock(&cmd->context_lock);
698
699	if (out_is_imm && !out_param) {
700		mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
701			 op);
702		err = -EINVAL;
703		goto out;
704	}
705
706	reinit_completion(&context->done);
707
708	err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
709			    in_modifier, op_modifier, op, context->token, 1);
710	if (err)
711		goto out_reset;
712
713	if (!wait_for_completion_timeout(&context->done,
714					 msecs_to_jiffies(timeout))) {
715		mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
716			  op);
717		if (op == MLX4_CMD_NOP) {
718			err = -EBUSY;
719			goto out;
720		} else {
721			err = -EIO;
722			goto out_reset;
723		}
724	}
725
726	err = context->result;
727	if (err) {
728		/* Since we do not want to have this error message always
729		 * displayed at driver start when there are ConnectX2 HCAs
730		 * on the host, we deprecate the error message for this
731		 * specific command/input_mod/opcode_mod/fw-status to be debug.
732		 */
733		if (op == MLX4_CMD_SET_PORT &&
734		    (in_modifier == 1 || in_modifier == 2) &&
735		    op_modifier == MLX4_SET_PORT_IB_OPCODE &&
736		    context->fw_status == CMD_STAT_BAD_SIZE)
737			mlx4_dbg(dev, "command 0x%x failed: fw status = 0x%x\n",
738				 op, context->fw_status);
739		else
740			mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
741				 op, context->fw_status);
742		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
743			err = mlx4_internal_err_ret_value(dev, op, op_modifier);
744		else if (mlx4_closing_cmd_fatal_error(op, context->fw_status))
745			goto out_reset;
746
747		goto out;
748	}
749
750	if (out_is_imm)
751		*out_param = context->out_param;
752
753out_reset:
754	if (err)
755		err = mlx4_cmd_reset_flow(dev, op, op_modifier, err);
756out:
757	spin_lock(&cmd->context_lock);
758	context->next = cmd->free_head;
759	cmd->free_head = context - cmd->context;
760	spin_unlock(&cmd->context_lock);
761
762	up(&cmd->event_sem);
763	return err;
764}
765
766int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
767	       int out_is_imm, u32 in_modifier, u8 op_modifier,
768	       u16 op, unsigned long timeout, int native)
769{
770	if (pci_channel_offline(dev->persist->pdev))
771		return mlx4_cmd_reset_flow(dev, op, op_modifier, -EIO);
772
773	if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
774		if (dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR)
775			return mlx4_internal_err_ret_value(dev, op,
776							  op_modifier);
777		if (mlx4_priv(dev)->cmd.use_events)
778			return mlx4_cmd_wait(dev, in_param, out_param,
779					     out_is_imm, in_modifier,
780					     op_modifier, op, timeout);
781		else
782			return mlx4_cmd_poll(dev, in_param, out_param,
783					     out_is_imm, in_modifier,
784					     op_modifier, op, timeout);
785	}
786	return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
787			      in_modifier, op_modifier, op, timeout);
788}
789EXPORT_SYMBOL_GPL(__mlx4_cmd);
790
791
792int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
793{
794	return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
795			MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
796}
797
798static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
799			   int slave, u64 slave_addr,
800			   int size, int is_read)
801{
802	u64 in_param;
803	u64 out_param;
804
805	if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
806	    (slave & ~0x7f) | (size & 0xff)) {
807		mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
808			 slave_addr, master_addr, slave, size);
809		return -EINVAL;
810	}
811
812	if (is_read) {
813		in_param = (u64) slave | slave_addr;
814		out_param = (u64) dev->caps.function | master_addr;
815	} else {
816		in_param = (u64) dev->caps.function | master_addr;
817		out_param = (u64) slave | slave_addr;
818	}
819
820	return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
821			    MLX4_CMD_ACCESS_MEM,
822			    MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
823}
824
825static int query_pkey_block(struct mlx4_dev *dev, u8 port, u16 index, u16 *pkey,
826			       struct mlx4_cmd_mailbox *inbox,
827			       struct mlx4_cmd_mailbox *outbox)
828{
829	struct ib_smp *in_mad = (struct ib_smp *)(inbox->buf);
830	struct ib_smp *out_mad = (struct ib_smp *)(outbox->buf);
831	int err;
832	int i;
833
834	if (index & 0x1f)
835		return -EINVAL;
836
837	in_mad->attr_mod = cpu_to_be32(index / 32);
838
839	err = mlx4_cmd_box(dev, inbox->dma, outbox->dma, port, 3,
840			   MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
841			   MLX4_CMD_NATIVE);
842	if (err)
843		return err;
844
845	for (i = 0; i < 32; ++i)
846		pkey[i] = be16_to_cpu(((__be16 *) out_mad->data)[i]);
847
848	return err;
849}
850
851static int get_full_pkey_table(struct mlx4_dev *dev, u8 port, u16 *table,
852			       struct mlx4_cmd_mailbox *inbox,
853			       struct mlx4_cmd_mailbox *outbox)
854{
855	int i;
856	int err;
857
858	for (i = 0; i < dev->caps.pkey_table_len[port]; i += 32) {
859		err = query_pkey_block(dev, port, i, table + i, inbox, outbox);
860		if (err)
861			return err;
862	}
863
864	return 0;
865}
866#define PORT_CAPABILITY_LOCATION_IN_SMP 20
867#define PORT_STATE_OFFSET 32
868
869static enum ib_port_state vf_port_state(struct mlx4_dev *dev, int port, int vf)
870{
871	if (mlx4_get_slave_port_state(dev, vf, port) == SLAVE_PORT_UP)
872		return IB_PORT_ACTIVE;
873	else
874		return IB_PORT_DOWN;
875}
876
877static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
878				struct mlx4_vhcr *vhcr,
879				struct mlx4_cmd_mailbox *inbox,
880				struct mlx4_cmd_mailbox *outbox,
881				struct mlx4_cmd_info *cmd)
882{
883	struct ib_smp *smp = inbox->buf;
884	u32 index;
885	u8 port;
886	u8 opcode_modifier;
887	u16 *table;
888	int err;
889	int vidx, pidx;
890	int network_view;
891	struct mlx4_priv *priv = mlx4_priv(dev);
892	struct ib_smp *outsmp = outbox->buf;
893	__be16 *outtab = (__be16 *)(outsmp->data);
894	__be32 slave_cap_mask;
895	__be64 slave_node_guid;
896
897	port = vhcr->in_modifier;
898
899	/* network-view bit is for driver use only, and should not be passed to FW */
900	opcode_modifier = vhcr->op_modifier & ~0x8; /* clear netw view bit */
901	network_view = !!(vhcr->op_modifier & 0x8);
902
903	if (smp->base_version == 1 &&
904	    smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
905	    smp->class_version == 1) {
906		/* host view is paravirtualized */
907		if (!network_view && smp->method == IB_MGMT_METHOD_GET) {
908			if (smp->attr_id == IB_SMP_ATTR_PKEY_TABLE) {
909				index = be32_to_cpu(smp->attr_mod);
910				if (port < 1 || port > dev->caps.num_ports)
911					return -EINVAL;
912				table = kcalloc((dev->caps.pkey_table_len[port] / 32) + 1,
913						sizeof(*table) * 32, GFP_KERNEL);
914
915				if (!table)
916					return -ENOMEM;
917				/* need to get the full pkey table because the paravirtualized
918				 * pkeys may be scattered among several pkey blocks.
919				 */
920				err = get_full_pkey_table(dev, port, table, inbox, outbox);
921				if (!err) {
922					for (vidx = index * 32; vidx < (index + 1) * 32; ++vidx) {
923						pidx = priv->virt2phys_pkey[slave][port - 1][vidx];
924						outtab[vidx % 32] = cpu_to_be16(table[pidx]);
925					}
926				}
927				kfree(table);
928				return err;
929			}
930			if (smp->attr_id == IB_SMP_ATTR_PORT_INFO) {
931				/*get the slave specific caps:*/
932				/*do the command */
933				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
934					    vhcr->in_modifier, opcode_modifier,
935					    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
936				/* modify the response for slaves */
937				if (!err && slave != mlx4_master_func_num(dev)) {
938					u8 *state = outsmp->data + PORT_STATE_OFFSET;
939
940					*state = (*state & 0xf0) | vf_port_state(dev, port, slave);
941					slave_cap_mask = priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
942					memcpy(outsmp->data + PORT_CAPABILITY_LOCATION_IN_SMP, &slave_cap_mask, 4);
943				}
944				return err;
945			}
946			if (smp->attr_id == IB_SMP_ATTR_GUID_INFO) {
947				__be64 guid = mlx4_get_admin_guid(dev, slave,
948								  port);
949
950				/* set the PF admin guid to the FW/HW burned
951				 * GUID, if it wasn't yet set
952				 */
953				if (slave == 0 && guid == 0) {
954					smp->attr_mod = 0;
955					err = mlx4_cmd_box(dev,
956							   inbox->dma,
957							   outbox->dma,
958							   vhcr->in_modifier,
959							   opcode_modifier,
960							   vhcr->op,
961							   MLX4_CMD_TIME_CLASS_C,
962							   MLX4_CMD_NATIVE);
963					if (err)
964						return err;
965					mlx4_set_admin_guid(dev,
966							    *(__be64 *)outsmp->
967							    data, slave, port);
968				} else {
969					memcpy(outsmp->data, &guid, 8);
970				}
971
972				/* clean all other gids */
973				memset(outsmp->data + 8, 0, 56);
974				return 0;
975			}
976			if (smp->attr_id == IB_SMP_ATTR_NODE_INFO) {
977				err = mlx4_cmd_box(dev, inbox->dma, outbox->dma,
978					     vhcr->in_modifier, opcode_modifier,
979					     vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
980				if (!err) {
981					slave_node_guid =  mlx4_get_slave_node_guid(dev, slave);
982					memcpy(outsmp->data + 12, &slave_node_guid, 8);
983				}
984				return err;
985			}
986		}
987	}
988
989	/* Non-privileged VFs are only allowed "host" view LID-routed 'Get' MADs.
990	 * These are the MADs used by ib verbs (such as ib_query_gids).
991	 */
992	if (slave != mlx4_master_func_num(dev) &&
993	    !mlx4_vf_smi_enabled(dev, slave, port)) {
994		if (!(smp->mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED &&
995		      smp->method == IB_MGMT_METHOD_GET) || network_view) {
996			mlx4_err(dev, "Unprivileged slave %d is trying to execute a Subnet MGMT MAD, class 0x%x, method 0x%x, view=%s for attr 0x%x. Rejecting\n",
997				 slave, smp->method, smp->mgmt_class,
998				 network_view ? "Network" : "Host",
999				 be16_to_cpu(smp->attr_id));
1000			return -EPERM;
1001		}
1002	}
1003
1004	return mlx4_cmd_box(dev, inbox->dma, outbox->dma,
1005				    vhcr->in_modifier, opcode_modifier,
1006				    vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
1007}
1008
1009static int mlx4_CMD_EPERM_wrapper(struct mlx4_dev *dev, int slave,
1010		     struct mlx4_vhcr *vhcr,
1011		     struct mlx4_cmd_mailbox *inbox,
1012		     struct mlx4_cmd_mailbox *outbox,
1013		     struct mlx4_cmd_info *cmd)
1014{
1015	return -EPERM;
1016}
1017
1018int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
1019		     struct mlx4_vhcr *vhcr,
1020		     struct mlx4_cmd_mailbox *inbox,
1021		     struct mlx4_cmd_mailbox *outbox,
1022		     struct mlx4_cmd_info *cmd)
1023{
1024	u64 in_param;
1025	u64 out_param;
1026	int err;
1027
1028	in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
1029	out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
1030	if (cmd->encode_slave_id) {
1031		in_param &= 0xffffffffffffff00ll;
1032		in_param |= slave;
1033	}
1034
1035	err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
1036			 vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
1037			 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
1038
1039	if (cmd->out_is_imm)
1040		vhcr->out_param = out_param;
1041
1042	return err;
1043}
1044
1045static struct mlx4_cmd_info cmd_info[] = {
1046	{
1047		.opcode = MLX4_CMD_QUERY_FW,
1048		.has_inbox = false,
1049		.has_outbox = true,
1050		.out_is_imm = false,
1051		.encode_slave_id = false,
1052		.verify = NULL,
1053		.wrapper = mlx4_QUERY_FW_wrapper
1054	},
1055	{
1056		.opcode = MLX4_CMD_QUERY_HCA,
1057		.has_inbox = false,
1058		.has_outbox = true,
1059		.out_is_imm = false,
1060		.encode_slave_id = false,
1061		.verify = NULL,
1062		.wrapper = NULL
1063	},
1064	{
1065		.opcode = MLX4_CMD_QUERY_DEV_CAP,
1066		.has_inbox = false,
1067		.has_outbox = true,
1068		.out_is_imm = false,
1069		.encode_slave_id = false,
1070		.verify = NULL,
1071		.wrapper = mlx4_QUERY_DEV_CAP_wrapper
1072	},
1073	{
1074		.opcode = MLX4_CMD_QUERY_FUNC_CAP,
1075		.has_inbox = false,
1076		.has_outbox = true,
1077		.out_is_imm = false,
1078		.encode_slave_id = false,
1079		.verify = NULL,
1080		.wrapper = mlx4_QUERY_FUNC_CAP_wrapper
1081	},
1082	{
1083		.opcode = MLX4_CMD_QUERY_ADAPTER,
1084		.has_inbox = false,
1085		.has_outbox = true,
1086		.out_is_imm = false,
1087		.encode_slave_id = false,
1088		.verify = NULL,
1089		.wrapper = NULL
1090	},
1091	{
1092		.opcode = MLX4_CMD_INIT_PORT,
1093		.has_inbox = false,
1094		.has_outbox = false,
1095		.out_is_imm = false,
1096		.encode_slave_id = false,
1097		.verify = NULL,
1098		.wrapper = mlx4_INIT_PORT_wrapper
1099	},
1100	{
1101		.opcode = MLX4_CMD_CLOSE_PORT,
1102		.has_inbox = false,
1103		.has_outbox = false,
1104		.out_is_imm  = false,
1105		.encode_slave_id = false,
1106		.verify = NULL,
1107		.wrapper = mlx4_CLOSE_PORT_wrapper
1108	},
1109	{
1110		.opcode = MLX4_CMD_QUERY_PORT,
1111		.has_inbox = false,
1112		.has_outbox = true,
1113		.out_is_imm = false,
1114		.encode_slave_id = false,
1115		.verify = NULL,
1116		.wrapper = mlx4_QUERY_PORT_wrapper
1117	},
1118	{
1119		.opcode = MLX4_CMD_SET_PORT,
1120		.has_inbox = true,
1121		.has_outbox = false,
1122		.out_is_imm = false,
1123		.encode_slave_id = false,
1124		.verify = NULL,
1125		.wrapper = mlx4_SET_PORT_wrapper
1126	},
1127	{
1128		.opcode = MLX4_CMD_MAP_EQ,
1129		.has_inbox = false,
1130		.has_outbox = false,
1131		.out_is_imm = false,
1132		.encode_slave_id = false,
1133		.verify = NULL,
1134		.wrapper = mlx4_MAP_EQ_wrapper
1135	},
1136	{
1137		.opcode = MLX4_CMD_SW2HW_EQ,
1138		.has_inbox = true,
1139		.has_outbox = false,
1140		.out_is_imm = false,
1141		.encode_slave_id = true,
1142		.verify = NULL,
1143		.wrapper = mlx4_SW2HW_EQ_wrapper
1144	},
1145	{
1146		.opcode = MLX4_CMD_HW_HEALTH_CHECK,
1147		.has_inbox = false,
1148		.has_outbox = false,
1149		.out_is_imm = false,
1150		.encode_slave_id = false,
1151		.verify = NULL,
1152		.wrapper = NULL
1153	},
1154	{
1155		.opcode = MLX4_CMD_NOP,
1156		.has_inbox = false,
1157		.has_outbox = false,
1158		.out_is_imm = false,
1159		.encode_slave_id = false,
1160		.verify = NULL,
1161		.wrapper = NULL
1162	},
1163	{
1164		.opcode = MLX4_CMD_CONFIG_DEV,
1165		.has_inbox = false,
1166		.has_outbox = true,
1167		.out_is_imm = false,
1168		.encode_slave_id = false,
1169		.verify = NULL,
1170		.wrapper = mlx4_CONFIG_DEV_wrapper
1171	},
1172	{
1173		.opcode = MLX4_CMD_ALLOC_RES,
1174		.has_inbox = false,
1175		.has_outbox = false,
1176		.out_is_imm = true,
1177		.encode_slave_id = false,
1178		.verify = NULL,
1179		.wrapper = mlx4_ALLOC_RES_wrapper
1180	},
1181	{
1182		.opcode = MLX4_CMD_FREE_RES,
1183		.has_inbox = false,
1184		.has_outbox = false,
1185		.out_is_imm = false,
1186		.encode_slave_id = false,
1187		.verify = NULL,
1188		.wrapper = mlx4_FREE_RES_wrapper
1189	},
1190	{
1191		.opcode = MLX4_CMD_SW2HW_MPT,
1192		.has_inbox = true,
1193		.has_outbox = false,
1194		.out_is_imm = false,
1195		.encode_slave_id = true,
1196		.verify = NULL,
1197		.wrapper = mlx4_SW2HW_MPT_wrapper
1198	},
1199	{
1200		.opcode = MLX4_CMD_QUERY_MPT,
1201		.has_inbox = false,
1202		.has_outbox = true,
1203		.out_is_imm = false,
1204		.encode_slave_id = false,
1205		.verify = NULL,
1206		.wrapper = mlx4_QUERY_MPT_wrapper
1207	},
1208	{
1209		.opcode = MLX4_CMD_HW2SW_MPT,
1210		.has_inbox = false,
1211		.has_outbox = false,
1212		.out_is_imm = false,
1213		.encode_slave_id = false,
1214		.verify = NULL,
1215		.wrapper = mlx4_HW2SW_MPT_wrapper
1216	},
1217	{
1218		.opcode = MLX4_CMD_READ_MTT,
1219		.has_inbox = false,
1220		.has_outbox = true,
1221		.out_is_imm = false,
1222		.encode_slave_id = false,
1223		.verify = NULL,
1224		.wrapper = NULL
1225	},
1226	{
1227		.opcode = MLX4_CMD_WRITE_MTT,
1228		.has_inbox = true,
1229		.has_outbox = false,
1230		.out_is_imm = false,
1231		.encode_slave_id = false,
1232		.verify = NULL,
1233		.wrapper = mlx4_WRITE_MTT_wrapper
1234	},
1235	{
1236		.opcode = MLX4_CMD_SYNC_TPT,
1237		.has_inbox = true,
1238		.has_outbox = false,
1239		.out_is_imm = false,
1240		.encode_slave_id = false,
1241		.verify = NULL,
1242		.wrapper = NULL
1243	},
1244	{
1245		.opcode = MLX4_CMD_HW2SW_EQ,
1246		.has_inbox = false,
1247		.has_outbox = false,
1248		.out_is_imm = false,
1249		.encode_slave_id = true,
1250		.verify = NULL,
1251		.wrapper = mlx4_HW2SW_EQ_wrapper
1252	},
1253	{
1254		.opcode = MLX4_CMD_QUERY_EQ,
1255		.has_inbox = false,
1256		.has_outbox = true,
1257		.out_is_imm = false,
1258		.encode_slave_id = true,
1259		.verify = NULL,
1260		.wrapper = mlx4_QUERY_EQ_wrapper
1261	},
1262	{
1263		.opcode = MLX4_CMD_SW2HW_CQ,
1264		.has_inbox = true,
1265		.has_outbox = false,
1266		.out_is_imm = false,
1267		.encode_slave_id = true,
1268		.verify = NULL,
1269		.wrapper = mlx4_SW2HW_CQ_wrapper
1270	},
1271	{
1272		.opcode = MLX4_CMD_HW2SW_CQ,
1273		.has_inbox = false,
1274		.has_outbox = false,
1275		.out_is_imm = false,
1276		.encode_slave_id = false,
1277		.verify = NULL,
1278		.wrapper = mlx4_HW2SW_CQ_wrapper
1279	},
1280	{
1281		.opcode = MLX4_CMD_QUERY_CQ,
1282		.has_inbox = false,
1283		.has_outbox = true,
1284		.out_is_imm = false,
1285		.encode_slave_id = false,
1286		.verify = NULL,
1287		.wrapper = mlx4_QUERY_CQ_wrapper
1288	},
1289	{
1290		.opcode = MLX4_CMD_MODIFY_CQ,
1291		.has_inbox = true,
1292		.has_outbox = false,
1293		.out_is_imm = true,
1294		.encode_slave_id = false,
1295		.verify = NULL,
1296		.wrapper = mlx4_MODIFY_CQ_wrapper
1297	},
1298	{
1299		.opcode = MLX4_CMD_SW2HW_SRQ,
1300		.has_inbox = true,
1301		.has_outbox = false,
1302		.out_is_imm = false,
1303		.encode_slave_id = true,
1304		.verify = NULL,
1305		.wrapper = mlx4_SW2HW_SRQ_wrapper
1306	},
1307	{
1308		.opcode = MLX4_CMD_HW2SW_SRQ,
1309		.has_inbox = false,
1310		.has_outbox = false,
1311		.out_is_imm = false,
1312		.encode_slave_id = false,
1313		.verify = NULL,
1314		.wrapper = mlx4_HW2SW_SRQ_wrapper
1315	},
1316	{
1317		.opcode = MLX4_CMD_QUERY_SRQ,
1318		.has_inbox = false,
1319		.has_outbox = true,
1320		.out_is_imm = false,
1321		.encode_slave_id = false,
1322		.verify = NULL,
1323		.wrapper = mlx4_QUERY_SRQ_wrapper
1324	},
1325	{
1326		.opcode = MLX4_CMD_ARM_SRQ,
1327		.has_inbox = false,
1328		.has_outbox = false,
1329		.out_is_imm = false,
1330		.encode_slave_id = false,
1331		.verify = NULL,
1332		.wrapper = mlx4_ARM_SRQ_wrapper
1333	},
1334	{
1335		.opcode = MLX4_CMD_RST2INIT_QP,
1336		.has_inbox = true,
1337		.has_outbox = false,
1338		.out_is_imm = false,
1339		.encode_slave_id = true,
1340		.verify = NULL,
1341		.wrapper = mlx4_RST2INIT_QP_wrapper
1342	},
1343	{
1344		.opcode = MLX4_CMD_INIT2INIT_QP,
1345		.has_inbox = true,
1346		.has_outbox = false,
1347		.out_is_imm = false,
1348		.encode_slave_id = false,
1349		.verify = NULL,
1350		.wrapper = mlx4_INIT2INIT_QP_wrapper
1351	},
1352	{
1353		.opcode = MLX4_CMD_INIT2RTR_QP,
1354		.has_inbox = true,
1355		.has_outbox = false,
1356		.out_is_imm = false,
1357		.encode_slave_id = false,
1358		.verify = NULL,
1359		.wrapper = mlx4_INIT2RTR_QP_wrapper
1360	},
1361	{
1362		.opcode = MLX4_CMD_RTR2RTS_QP,
1363		.has_inbox = true,
1364		.has_outbox = false,
1365		.out_is_imm = false,
1366		.encode_slave_id = false,
1367		.verify = NULL,
1368		.wrapper = mlx4_RTR2RTS_QP_wrapper
1369	},
1370	{
1371		.opcode = MLX4_CMD_RTS2RTS_QP,
1372		.has_inbox = true,
1373		.has_outbox = false,
1374		.out_is_imm = false,
1375		.encode_slave_id = false,
1376		.verify = NULL,
1377		.wrapper = mlx4_RTS2RTS_QP_wrapper
1378	},
1379	{
1380		.opcode = MLX4_CMD_SQERR2RTS_QP,
1381		.has_inbox = true,
1382		.has_outbox = false,
1383		.out_is_imm = false,
1384		.encode_slave_id = false,
1385		.verify = NULL,
1386		.wrapper = mlx4_SQERR2RTS_QP_wrapper
1387	},
1388	{
1389		.opcode = MLX4_CMD_2ERR_QP,
1390		.has_inbox = false,
1391		.has_outbox = false,
1392		.out_is_imm = false,
1393		.encode_slave_id = false,
1394		.verify = NULL,
1395		.wrapper = mlx4_GEN_QP_wrapper
1396	},
1397	{
1398		.opcode = MLX4_CMD_RTS2SQD_QP,
1399		.has_inbox = false,
1400		.has_outbox = false,
1401		.out_is_imm = false,
1402		.encode_slave_id = false,
1403		.verify = NULL,
1404		.wrapper = mlx4_GEN_QP_wrapper
1405	},
1406	{
1407		.opcode = MLX4_CMD_SQD2SQD_QP,
1408		.has_inbox = true,
1409		.has_outbox = false,
1410		.out_is_imm = false,
1411		.encode_slave_id = false,
1412		.verify = NULL,
1413		.wrapper = mlx4_SQD2SQD_QP_wrapper
1414	},
1415	{
1416		.opcode = MLX4_CMD_SQD2RTS_QP,
1417		.has_inbox = true,
1418		.has_outbox = false,
1419		.out_is_imm = false,
1420		.encode_slave_id = false,
1421		.verify = NULL,
1422		.wrapper = mlx4_SQD2RTS_QP_wrapper
1423	},
1424	{
1425		.opcode = MLX4_CMD_2RST_QP,
1426		.has_inbox = false,
1427		.has_outbox = false,
1428		.out_is_imm = false,
1429		.encode_slave_id = false,
1430		.verify = NULL,
1431		.wrapper = mlx4_2RST_QP_wrapper
1432	},
1433	{
1434		.opcode = MLX4_CMD_QUERY_QP,
1435		.has_inbox = false,
1436		.has_outbox = true,
1437		.out_is_imm = false,
1438		.encode_slave_id = false,
1439		.verify = NULL,
1440		.wrapper = mlx4_GEN_QP_wrapper
1441	},
1442	{
1443		.opcode = MLX4_CMD_SUSPEND_QP,
1444		.has_inbox = false,
1445		.has_outbox = false,
1446		.out_is_imm = false,
1447		.encode_slave_id = false,
1448		.verify = NULL,
1449		.wrapper = mlx4_GEN_QP_wrapper
1450	},
1451	{
1452		.opcode = MLX4_CMD_UNSUSPEND_QP,
1453		.has_inbox = false,
1454		.has_outbox = false,
1455		.out_is_imm = false,
1456		.encode_slave_id = false,
1457		.verify = NULL,
1458		.wrapper = mlx4_GEN_QP_wrapper
1459	},
1460	{
1461		.opcode = MLX4_CMD_UPDATE_QP,
1462		.has_inbox = true,
1463		.has_outbox = false,
1464		.out_is_imm = false,
1465		.encode_slave_id = false,
1466		.verify = NULL,
1467		.wrapper = mlx4_UPDATE_QP_wrapper
1468	},
1469	{
1470		.opcode = MLX4_CMD_GET_OP_REQ,
1471		.has_inbox = false,
1472		.has_outbox = false,
1473		.out_is_imm = false,
1474		.encode_slave_id = false,
1475		.verify = NULL,
1476		.wrapper = mlx4_CMD_EPERM_wrapper,
1477	},
1478	{
1479		.opcode = MLX4_CMD_ALLOCATE_VPP,
1480		.has_inbox = false,
1481		.has_outbox = true,
1482		.out_is_imm = false,
1483		.encode_slave_id = false,
1484		.verify = NULL,
1485		.wrapper = mlx4_CMD_EPERM_wrapper,
1486	},
1487	{
1488		.opcode = MLX4_CMD_SET_VPORT_QOS,
1489		.has_inbox = false,
1490		.has_outbox = true,
1491		.out_is_imm = false,
1492		.encode_slave_id = false,
1493		.verify = NULL,
1494		.wrapper = mlx4_CMD_EPERM_wrapper,
1495	},
1496	{
1497		.opcode = MLX4_CMD_CONF_SPECIAL_QP,
1498		.has_inbox = false,
1499		.has_outbox = false,
1500		.out_is_imm = false,
1501		.encode_slave_id = false,
1502		.verify = NULL, /* XXX verify: only demux can do this */
1503		.wrapper = NULL
1504	},
1505	{
1506		.opcode = MLX4_CMD_MAD_IFC,
1507		.has_inbox = true,
1508		.has_outbox = true,
1509		.out_is_imm = false,
1510		.encode_slave_id = false,
1511		.verify = NULL,
1512		.wrapper = mlx4_MAD_IFC_wrapper
1513	},
1514	{
1515		.opcode = MLX4_CMD_MAD_DEMUX,
1516		.has_inbox = false,
1517		.has_outbox = false,
1518		.out_is_imm = false,
1519		.encode_slave_id = false,
1520		.verify = NULL,
1521		.wrapper = mlx4_CMD_EPERM_wrapper
1522	},
1523	{
1524		.opcode = MLX4_CMD_QUERY_IF_STAT,
1525		.has_inbox = false,
1526		.has_outbox = true,
1527		.out_is_imm = false,
1528		.encode_slave_id = false,
1529		.verify = NULL,
1530		.wrapper = mlx4_QUERY_IF_STAT_wrapper
1531	},
1532	{
1533		.opcode = MLX4_CMD_ACCESS_REG,
1534		.has_inbox = true,
1535		.has_outbox = true,
1536		.out_is_imm = false,
1537		.encode_slave_id = false,
1538		.verify = NULL,
1539		.wrapper = mlx4_ACCESS_REG_wrapper,
1540	},
1541	{
1542		.opcode = MLX4_CMD_CONGESTION_CTRL_OPCODE,
1543		.has_inbox = false,
1544		.has_outbox = false,
1545		.out_is_imm = false,
1546		.encode_slave_id = false,
1547		.verify = NULL,
1548		.wrapper = mlx4_CMD_EPERM_wrapper,
1549	},
1550	/* Native multicast commands are not available for guests */
1551	{
1552		.opcode = MLX4_CMD_QP_ATTACH,
1553		.has_inbox = true,
1554		.has_outbox = false,
1555		.out_is_imm = false,
1556		.encode_slave_id = false,
1557		.verify = NULL,
1558		.wrapper = mlx4_QP_ATTACH_wrapper
1559	},
1560	{
1561		.opcode = MLX4_CMD_PROMISC,
1562		.has_inbox = false,
1563		.has_outbox = false,
1564		.out_is_imm = false,
1565		.encode_slave_id = false,
1566		.verify = NULL,
1567		.wrapper = mlx4_PROMISC_wrapper
1568	},
1569	/* Ethernet specific commands */
1570	{
1571		.opcode = MLX4_CMD_SET_VLAN_FLTR,
1572		.has_inbox = true,
1573		.has_outbox = false,
1574		.out_is_imm = false,
1575		.encode_slave_id = false,
1576		.verify = NULL,
1577		.wrapper = mlx4_SET_VLAN_FLTR_wrapper
1578	},
1579	{
1580		.opcode = MLX4_CMD_SET_MCAST_FLTR,
1581		.has_inbox = false,
1582		.has_outbox = false,
1583		.out_is_imm = false,
1584		.encode_slave_id = false,
1585		.verify = NULL,
1586		.wrapper = mlx4_SET_MCAST_FLTR_wrapper
1587	},
1588	{
1589		.opcode = MLX4_CMD_DUMP_ETH_STATS,
1590		.has_inbox = false,
1591		.has_outbox = true,
1592		.out_is_imm = false,
1593		.encode_slave_id = false,
1594		.verify = NULL,
1595		.wrapper = mlx4_DUMP_ETH_STATS_wrapper
1596	},
1597	{
1598		.opcode = MLX4_CMD_INFORM_FLR_DONE,
1599		.has_inbox = false,
1600		.has_outbox = false,
1601		.out_is_imm = false,
1602		.encode_slave_id = false,
1603		.verify = NULL,
1604		.wrapper = NULL
1605	},
1606	/* flow steering commands */
1607	{
1608		.opcode = MLX4_QP_FLOW_STEERING_ATTACH,
1609		.has_inbox = true,
1610		.has_outbox = false,
1611		.out_is_imm = true,
1612		.encode_slave_id = false,
1613		.verify = NULL,
1614		.wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper
1615	},
1616	{
1617		.opcode = MLX4_QP_FLOW_STEERING_DETACH,
1618		.has_inbox = false,
1619		.has_outbox = false,
1620		.out_is_imm = false,
1621		.encode_slave_id = false,
1622		.verify = NULL,
1623		.wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper
1624	},
1625	{
1626		.opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE,
1627		.has_inbox = false,
1628		.has_outbox = false,
1629		.out_is_imm = false,
1630		.encode_slave_id = false,
1631		.verify = NULL,
1632		.wrapper = mlx4_CMD_EPERM_wrapper
1633	},
1634	{
1635		.opcode = MLX4_CMD_VIRT_PORT_MAP,
1636		.has_inbox = false,
1637		.has_outbox = false,
1638		.out_is_imm = false,
1639		.encode_slave_id = false,
1640		.verify = NULL,
1641		.wrapper = mlx4_CMD_EPERM_wrapper
1642	},
1643};
1644
1645static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
1646				    struct mlx4_vhcr_cmd *in_vhcr)
1647{
1648	struct mlx4_priv *priv = mlx4_priv(dev);
1649	struct mlx4_cmd_info *cmd = NULL;
1650	struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
1651	struct mlx4_vhcr *vhcr;
1652	struct mlx4_cmd_mailbox *inbox = NULL;
1653	struct mlx4_cmd_mailbox *outbox = NULL;
1654	u64 in_param;
1655	u64 out_param;
1656	int ret = 0;
1657	int i;
1658	int err = 0;
1659
1660	/* Create sw representation of Virtual HCR */
1661	vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
1662	if (!vhcr)
1663		return -ENOMEM;
1664
1665	/* DMA in the vHCR */
1666	if (!in_vhcr) {
1667		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1668				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1669				      ALIGN(sizeof(struct mlx4_vhcr_cmd),
1670					    MLX4_ACCESS_MEM_ALIGN), 1);
1671		if (ret) {
1672			if (!(dev->persist->state &
1673			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1674				mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
1675					 __func__, ret);
1676			kfree(vhcr);
1677			return ret;
1678		}
1679	}
1680
1681	/* Fill SW VHCR fields */
1682	vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
1683	vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
1684	vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
1685	vhcr->token = be16_to_cpu(vhcr_cmd->token);
1686	vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
1687	vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
1688	vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
1689
1690	/* Lookup command */
1691	for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
1692		if (vhcr->op == cmd_info[i].opcode) {
1693			cmd = &cmd_info[i];
1694			break;
1695		}
1696	}
1697	if (!cmd) {
1698		mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
1699			 vhcr->op, slave);
1700		vhcr_cmd->status = CMD_STAT_BAD_PARAM;
1701		goto out_status;
1702	}
1703
1704	/* Read inbox */
1705	if (cmd->has_inbox) {
1706		vhcr->in_param &= INBOX_MASK;
1707		inbox = mlx4_alloc_cmd_mailbox(dev);
1708		if (IS_ERR(inbox)) {
1709			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1710			inbox = NULL;
1711			goto out_status;
1712		}
1713
1714		ret = mlx4_ACCESS_MEM(dev, inbox->dma, slave,
1715				      vhcr->in_param,
1716				      MLX4_MAILBOX_SIZE, 1);
1717		if (ret) {
1718			if (!(dev->persist->state &
1719			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1720				mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
1721					 __func__, cmd->opcode);
1722			vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
1723			goto out_status;
1724		}
1725	}
1726
1727	/* Apply permission and bound checks if applicable */
1728	if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
1729		mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
1730			  vhcr->op, slave, vhcr->in_modifier);
1731		vhcr_cmd->status = CMD_STAT_BAD_OP;
1732		goto out_status;
1733	}
1734
1735	/* Allocate outbox */
1736	if (cmd->has_outbox) {
1737		outbox = mlx4_alloc_cmd_mailbox(dev);
1738		if (IS_ERR(outbox)) {
1739			vhcr_cmd->status = CMD_STAT_BAD_SIZE;
1740			outbox = NULL;
1741			goto out_status;
1742		}
1743	}
1744
1745	/* Execute the command! */
1746	if (cmd->wrapper) {
1747		err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
1748				   cmd);
1749		if (cmd->out_is_imm)
1750			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1751	} else {
1752		in_param = cmd->has_inbox ? (u64) inbox->dma :
1753			vhcr->in_param;
1754		out_param = cmd->has_outbox ? (u64) outbox->dma :
1755			vhcr->out_param;
1756		err = __mlx4_cmd(dev, in_param, &out_param,
1757				 cmd->out_is_imm, vhcr->in_modifier,
1758				 vhcr->op_modifier, vhcr->op,
1759				 MLX4_CMD_TIME_CLASS_A,
1760				 MLX4_CMD_NATIVE);
1761
1762		if (cmd->out_is_imm) {
1763			vhcr->out_param = out_param;
1764			vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
1765		}
1766	}
1767
1768	if (err) {
1769		if (!(dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR))
1770			mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
1771				  vhcr->op, slave, vhcr->errno, err);
1772		vhcr_cmd->status = mlx4_errno_to_status(err);
1773		goto out_status;
1774	}
1775
1776
1777	/* Write outbox if command completed successfully */
1778	if (cmd->has_outbox && !vhcr_cmd->status) {
1779		ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
1780				      vhcr->out_param,
1781				      MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
1782		if (ret) {
1783			/* If we failed to write back the outbox after the
1784			 *command was successfully executed, we must fail this
1785			 * slave, as it is now in undefined state */
1786			if (!(dev->persist->state &
1787			    MLX4_DEVICE_STATE_INTERNAL_ERROR))
1788				mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
1789			goto out;
1790		}
1791	}
1792
1793out_status:
1794	/* DMA back vhcr result */
1795	if (!in_vhcr) {
1796		ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
1797				      priv->mfunc.master.slave_state[slave].vhcr_dma,
1798				      ALIGN(sizeof(struct mlx4_vhcr),
1799					    MLX4_ACCESS_MEM_ALIGN),
1800				      MLX4_CMD_WRAPPED);
1801		if (ret)
1802			mlx4_err(dev, "%s:Failed writing vhcr result\n",
1803				 __func__);
1804		else if (vhcr->e_bit &&
1805			 mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
1806				mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
1807					  slave);
1808	}
1809
1810out:
1811	kfree(vhcr);
1812	mlx4_free_cmd_mailbox(dev, inbox);
1813	mlx4_free_cmd_mailbox(dev, outbox);
1814	return ret;
1815}
1816
1817static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
1818					    int slave, int port)
1819{
1820	struct mlx4_vport_oper_state *vp_oper;
1821	struct mlx4_vport_state *vp_admin;
1822	struct mlx4_vf_immed_vlan_work *work;
1823	struct mlx4_dev *dev = &(priv->dev);
1824	int err;
1825	int admin_vlan_ix = NO_INDX;
1826
1827	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1828	vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1829
1830	if (vp_oper->state.default_vlan == vp_admin->default_vlan &&
1831	    vp_oper->state.default_qos == vp_admin->default_qos &&
1832	    vp_oper->state.link_state == vp_admin->link_state &&
1833	    vp_oper->state.qos_vport == vp_admin->qos_vport)
1834		return 0;
1835
1836	if (!(priv->mfunc.master.slave_state[slave].active &&
1837	      dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)) {
1838		/* even if the UPDATE_QP command isn't supported, we still want
1839		 * to set this VF link according to the admin directive
1840		 */
1841		vp_oper->state.link_state = vp_admin->link_state;
1842		return -1;
1843	}
1844
1845	mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
1846		 slave, port);
1847	mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
1848		 vp_admin->default_vlan, vp_admin->default_qos,
1849		 vp_admin->link_state);
1850
1851	work = kzalloc(sizeof(*work), GFP_KERNEL);
1852	if (!work)
1853		return -ENOMEM;
1854
1855	if (vp_oper->state.default_vlan != vp_admin->default_vlan) {
1856		if (MLX4_VGT != vp_admin->default_vlan) {
1857			err = __mlx4_register_vlan(&priv->dev, port,
1858						   vp_admin->default_vlan,
1859						   &admin_vlan_ix);
1860			if (err) {
1861				kfree(work);
1862				mlx4_warn(&priv->dev,
1863					  "No vlan resources slave %d, port %d\n",
1864					  slave, port);
1865				return err;
1866			}
1867		} else {
1868			admin_vlan_ix = NO_INDX;
1869		}
1870		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
1871		mlx4_dbg(&priv->dev,
1872			 "alloc vlan %d idx  %d slave %d port %d\n",
1873			 (int)(vp_admin->default_vlan),
1874			 admin_vlan_ix, slave, port);
1875	}
1876
1877	/* save original vlan ix and vlan id */
1878	work->orig_vlan_id = vp_oper->state.default_vlan;
1879	work->orig_vlan_ix = vp_oper->vlan_idx;
1880
1881	/* handle new qos */
1882	if (vp_oper->state.default_qos != vp_admin->default_qos)
1883		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_QOS;
1884
1885	if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN)
1886		vp_oper->vlan_idx = admin_vlan_ix;
1887
1888	vp_oper->state.default_vlan = vp_admin->default_vlan;
1889	vp_oper->state.default_qos = vp_admin->default_qos;
1890	vp_oper->state.link_state = vp_admin->link_state;
1891	vp_oper->state.qos_vport = vp_admin->qos_vport;
1892
1893	if (vp_admin->link_state == IFLA_VF_LINK_STATE_DISABLE)
1894		work->flags |= MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE;
1895
1896	/* iterate over QPs owned by this slave, using UPDATE_QP */
1897	work->port = port;
1898	work->slave = slave;
1899	work->qos = vp_oper->state.default_qos;
1900	work->qos_vport = vp_oper->state.qos_vport;
1901	work->vlan_id = vp_oper->state.default_vlan;
1902	work->vlan_ix = vp_oper->vlan_idx;
1903	work->priv = priv;
1904	INIT_WORK(&work->work, mlx4_vf_immed_vlan_work_handler);
1905	queue_work(priv->mfunc.master.comm_wq, &work->work);
1906
1907	return 0;
1908}
1909
1910static void mlx4_set_default_port_qos(struct mlx4_dev *dev, int port)
1911{
1912	struct mlx4_qos_manager *port_qos_ctl;
1913	struct mlx4_priv *priv = mlx4_priv(dev);
1914
1915	port_qos_ctl = &priv->mfunc.master.qos_ctl[port];
1916	bitmap_zero(port_qos_ctl->priority_bm, MLX4_NUM_UP);
1917
1918	/* Enable only default prio at PF init routine */
1919	set_bit(MLX4_DEFAULT_QOS_PRIO, port_qos_ctl->priority_bm);
1920}
1921
1922static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
1923{
1924	int i;
1925	int err;
1926	int num_vfs;
1927	u16 availible_vpp;
1928	u8 vpp_param[MLX4_NUM_UP];
1929	struct mlx4_qos_manager *port_qos;
1930	struct mlx4_priv *priv = mlx4_priv(dev);
1931
1932	err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1933	if (err) {
1934		mlx4_info(dev, "Failed query availible VPPs\n");
1935		return;
1936	}
1937
1938	port_qos = &priv->mfunc.master.qos_ctl[port];
1939	num_vfs = (availible_vpp /
1940		   bitmap_weight(port_qos->priority_bm, MLX4_NUM_UP));
1941
1942	for (i = 0; i < MLX4_NUM_UP; i++) {
1943		if (test_bit(i, port_qos->priority_bm))
1944			vpp_param[i] = num_vfs;
1945	}
1946
1947	err = mlx4_ALLOCATE_VPP_set(dev, port, vpp_param);
1948	if (err) {
1949		mlx4_info(dev, "Failed allocating VPPs\n");
1950		return;
1951	}
1952
1953	/* Query actual allocated VPP, just to make sure */
1954	err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
1955	if (err) {
1956		mlx4_info(dev, "Failed query availible VPPs\n");
1957		return;
1958	}
1959
1960	port_qos->num_of_qos_vfs = num_vfs;
1961	mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
1962
1963	for (i = 0; i < MLX4_NUM_UP; i++)
1964		mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
1965			 vpp_param[i]);
1966}
1967
1968static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
1969{
1970	int port, err;
1971	struct mlx4_vport_state *vp_admin;
1972	struct mlx4_vport_oper_state *vp_oper;
1973	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
1974			&priv->dev, slave);
1975	int min_port = find_first_bit(actv_ports.ports,
1976				      priv->dev.caps.num_ports) + 1;
1977	int max_port = min_port - 1 +
1978		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
1979
1980	for (port = min_port; port <= max_port; port++) {
1981		if (!test_bit(port - 1, actv_ports.ports))
1982			continue;
1983		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
1984			priv->mfunc.master.vf_admin[slave].enable_smi[port];
1985		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
1986		vp_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
1987		vp_oper->state = *vp_admin;
1988		if (MLX4_VGT != vp_admin->default_vlan) {
1989			err = __mlx4_register_vlan(&priv->dev, port,
1990						   vp_admin->default_vlan, &(vp_oper->vlan_idx));
1991			if (err) {
1992				vp_oper->vlan_idx = NO_INDX;
1993				mlx4_warn(&priv->dev,
1994					  "No vlan resources slave %d, port %d\n",
1995					  slave, port);
1996				return err;
1997			}
1998			mlx4_dbg(&priv->dev, "alloc vlan %d idx  %d slave %d port %d\n",
1999				 (int)(vp_oper->state.default_vlan),
2000				 vp_oper->vlan_idx, slave, port);
2001		}
2002		if (vp_admin->spoofchk) {
2003			vp_oper->mac_idx = __mlx4_register_mac(&priv->dev,
2004							       port,
2005							       vp_admin->mac);
2006			if (0 > vp_oper->mac_idx) {
2007				err = vp_oper->mac_idx;
2008				vp_oper->mac_idx = NO_INDX;
2009				mlx4_warn(&priv->dev,
2010					  "No mac resources slave %d, port %d\n",
2011					  slave, port);
2012				return err;
2013			}
2014			mlx4_dbg(&priv->dev, "alloc mac %llx idx  %d slave %d port %d\n",
2015				 vp_oper->state.mac, vp_oper->mac_idx, slave, port);
2016		}
2017	}
2018	return 0;
2019}
2020
2021static void mlx4_master_deactivate_admin_state(struct mlx4_priv *priv, int slave)
2022{
2023	int port;
2024	struct mlx4_vport_oper_state *vp_oper;
2025	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(
2026			&priv->dev, slave);
2027	int min_port = find_first_bit(actv_ports.ports,
2028				      priv->dev.caps.num_ports) + 1;
2029	int max_port = min_port - 1 +
2030		bitmap_weight(actv_ports.ports, priv->dev.caps.num_ports);
2031
2032
2033	for (port = min_port; port <= max_port; port++) {
2034		if (!test_bit(port - 1, actv_ports.ports))
2035			continue;
2036		priv->mfunc.master.vf_oper[slave].smi_enabled[port] =
2037			MLX4_VF_SMI_DISABLED;
2038		vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
2039		if (NO_INDX != vp_oper->vlan_idx) {
2040			__mlx4_unregister_vlan(&priv->dev,
2041					       port, vp_oper->state.default_vlan);
2042			vp_oper->vlan_idx = NO_INDX;
2043		}
2044		if (NO_INDX != vp_oper->mac_idx) {
2045			__mlx4_unregister_mac(&priv->dev, port, vp_oper->state.mac);
2046			vp_oper->mac_idx = NO_INDX;
2047		}
2048	}
2049	return;
2050}
2051
2052static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
2053			       u16 param, u8 toggle)
2054{
2055	struct mlx4_priv *priv = mlx4_priv(dev);
2056	struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2057	u32 reply;
2058	u8 is_going_down = 0;
2059	int i;
2060	unsigned long flags;
2061
2062	slave_state[slave].comm_toggle ^= 1;
2063	reply = (u32) slave_state[slave].comm_toggle << 31;
2064	if (toggle != slave_state[slave].comm_toggle) {
2065		mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
2066			  toggle, slave);
2067		goto reset_slave;
2068	}
2069	if (cmd == MLX4_COMM_CMD_RESET) {
2070		mlx4_warn(dev, "Received reset from slave:%d\n", slave);
2071		slave_state[slave].active = false;
2072		slave_state[slave].old_vlan_api = false;
2073		mlx4_master_deactivate_admin_state(priv, slave);
2074		for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
2075				slave_state[slave].event_eq[i].eqn = -1;
2076				slave_state[slave].event_eq[i].token = 0;
2077		}
2078		/*check if we are in the middle of FLR process,
2079		if so return "retry" status to the slave*/
2080		if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd)
2081			goto inform_slave_state;
2082
2083		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_SHUTDOWN, slave);
2084
2085		/* write the version in the event field */
2086		reply |= mlx4_comm_get_version();
2087
2088		goto reset_slave;
2089	}
2090	/*command from slave in the middle of FLR*/
2091	if (cmd != MLX4_COMM_CMD_RESET &&
2092	    MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
2093		mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
2094			  slave, cmd);
2095		return;
2096	}
2097
2098	switch (cmd) {
2099	case MLX4_COMM_CMD_VHCR0:
2100		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
2101			goto reset_slave;
2102		slave_state[slave].vhcr_dma = ((u64) param) << 48;
2103		priv->mfunc.master.slave_state[slave].cookie = 0;
2104		break;
2105	case MLX4_COMM_CMD_VHCR1:
2106		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
2107			goto reset_slave;
2108		slave_state[slave].vhcr_dma |= ((u64) param) << 32;
2109		break;
2110	case MLX4_COMM_CMD_VHCR2:
2111		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
2112			goto reset_slave;
2113		slave_state[slave].vhcr_dma |= ((u64) param) << 16;
2114		break;
2115	case MLX4_COMM_CMD_VHCR_EN:
2116		if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
2117			goto reset_slave;
2118		slave_state[slave].vhcr_dma |= param;
2119		if (mlx4_master_activate_admin_state(priv, slave))
2120				goto reset_slave;
2121		slave_state[slave].active = true;
2122		mlx4_dispatch_event(dev, MLX4_DEV_EVENT_SLAVE_INIT, slave);
2123		break;
2124	case MLX4_COMM_CMD_VHCR_POST:
2125		if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
2126		    (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST)) {
2127			mlx4_warn(dev, "slave:%d is out of sync, cmd=0x%x, last command=0x%x, reset is needed\n",
2128				  slave, cmd, slave_state[slave].last_cmd);
2129			goto reset_slave;
2130		}
2131
2132		mutex_lock(&priv->cmd.slave_cmd_mutex);
2133		if (mlx4_master_process_vhcr(dev, slave, NULL)) {
2134			mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
2135				 slave);
2136			mutex_unlock(&priv->cmd.slave_cmd_mutex);
2137			goto reset_slave;
2138		}
2139		mutex_unlock(&priv->cmd.slave_cmd_mutex);
2140		break;
2141	default:
2142		mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
2143		goto reset_slave;
2144	}
2145	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2146	if (!slave_state[slave].is_slave_going_down)
2147		slave_state[slave].last_cmd = cmd;
2148	else
2149		is_going_down = 1;
2150	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2151	if (is_going_down) {
2152		mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
2153			  cmd, slave);
2154		return;
2155	}
2156	__raw_writel((__force u32) cpu_to_be32(reply),
2157		     &priv->mfunc.comm[slave].slave_read);
2158	mmiowb();
2159
2160	return;
2161
2162reset_slave:
2163	/* cleanup any slave resources */
2164	if (dev->persist->interface_state & MLX4_INTERFACE_STATE_UP)
2165		mlx4_delete_all_resources_for_slave(dev, slave);
2166
2167	if (cmd != MLX4_COMM_CMD_RESET) {
2168		mlx4_warn(dev, "Turn on internal error to force reset, slave=%d, cmd=0x%x\n",
2169			  slave, cmd);
2170		/* Turn on internal error letting slave reset itself immeditaly,
2171		 * otherwise it might take till timeout on command is passed
2172		 */
2173		reply |= ((u32)COMM_CHAN_EVENT_INTERNAL_ERR);
2174	}
2175
2176	spin_lock_irqsave(&priv->mfunc.master.slave_state_lock, flags);
2177	if (!slave_state[slave].is_slave_going_down)
2178		slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
2179	spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
2180	/*with slave in the middle of flr, no need to clean resources again.*/
2181inform_slave_state:
2182	memset(&slave_state[slave].event_eq, 0,
2183	       sizeof(struct mlx4_slave_event_eq_info));
2184	__raw_writel((__force u32) cpu_to_be32(reply),
2185		     &priv->mfunc.comm[slave].slave_read);
2186	wmb();
2187}
2188
2189/* master command processing */
2190void mlx4_master_comm_channel(struct work_struct *work)
2191{
2192	struct mlx4_mfunc_master_ctx *master =
2193		container_of(work,
2194			     struct mlx4_mfunc_master_ctx,
2195			     comm_work);
2196	struct mlx4_mfunc *mfunc =
2197		container_of(master, struct mlx4_mfunc, master);
2198	struct mlx4_priv *priv =
2199		container_of(mfunc, struct mlx4_priv, mfunc);
2200	struct mlx4_dev *dev = &priv->dev;
2201	__be32 *bit_vec;
2202	u32 comm_cmd;
2203	u32 vec;
2204	int i, j, slave;
2205	int toggle;
2206	int served = 0;
2207	int reported = 0;
2208	u32 slt;
2209
2210	bit_vec = master->comm_arm_bit_vector;
2211	for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
2212		vec = be32_to_cpu(bit_vec[i]);
2213		for (j = 0; j < 32; j++) {
2214			if (!(vec & (1 << j)))
2215				continue;
2216			++reported;
2217			slave = (i * 32) + j;
2218			comm_cmd = swab32(readl(
2219					  &mfunc->comm[slave].slave_write));
2220			slt = swab32(readl(&mfunc->comm[slave].slave_read))
2221				     >> 31;
2222			toggle = comm_cmd >> 31;
2223			if (toggle != slt) {
2224				if (master->slave_state[slave].comm_toggle
2225				    != slt) {
2226					pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
2227						slave, slt,
2228						master->slave_state[slave].comm_toggle);
2229					master->slave_state[slave].comm_toggle =
2230						slt;
2231				}
2232				mlx4_master_do_cmd(dev, slave,
2233						   comm_cmd >> 16 & 0xff,
2234						   comm_cmd & 0xffff, toggle);
2235				++served;
2236			}
2237		}
2238	}
2239
2240	if (reported && reported != served)
2241		mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
2242			  reported, served);
2243
2244	if (mlx4_ARM_COMM_CHANNEL(dev))
2245		mlx4_warn(dev, "Failed to arm comm channel events\n");
2246}
2247
2248static int sync_toggles(struct mlx4_dev *dev)
2249{
2250	struct mlx4_priv *priv = mlx4_priv(dev);
2251	u32 wr_toggle;
2252	u32 rd_toggle;
2253	unsigned long end;
2254
2255	wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write));
2256	if (wr_toggle == 0xffffffff)
2257		end = jiffies + msecs_to_jiffies(30000);
2258	else
2259		end = jiffies + msecs_to_jiffies(5000);
2260
2261	while (time_before(jiffies, end)) {
2262		rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read));
2263		if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) {
2264			/* PCI might be offline */
2265			msleep(100);
2266			wr_toggle = swab32(readl(&priv->mfunc.comm->
2267					   slave_write));
2268			continue;
2269		}
2270
2271		if (rd_toggle >> 31 == wr_toggle >> 31) {
2272			priv->cmd.comm_toggle = rd_toggle >> 31;
2273			return 0;
2274		}
2275
2276		cond_resched();
2277	}
2278
2279	/*
2280	 * we could reach here if for example the previous VM using this
2281	 * function misbehaved and left the channel with unsynced state. We
2282	 * should fix this here and give this VM a chance to use a properly
2283	 * synced channel
2284	 */
2285	mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
2286	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
2287	__raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
2288	priv->cmd.comm_toggle = 0;
2289
2290	return 0;
2291}
2292
2293int mlx4_multi_func_init(struct mlx4_dev *dev)
2294{
2295	struct mlx4_priv *priv = mlx4_priv(dev);
2296	struct mlx4_slave_state *s_state;
2297	int i, j, err, port;
2298
2299	if (mlx4_is_master(dev))
2300		priv->mfunc.comm =
2301		ioremap(pci_resource_start(dev->persist->pdev,
2302					   priv->fw.comm_bar) +
2303			priv->fw.comm_base, MLX4_COMM_PAGESIZE);
2304	else
2305		priv->mfunc.comm =
2306		ioremap(pci_resource_start(dev->persist->pdev, 2) +
2307			MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
2308	if (!priv->mfunc.comm) {
2309		mlx4_err(dev, "Couldn't map communication vector\n");
2310		goto err_vhcr;
2311	}
2312
2313	if (mlx4_is_master(dev)) {
2314		struct mlx4_vf_oper_state *vf_oper;
2315		struct mlx4_vf_admin_state *vf_admin;
2316
2317		priv->mfunc.master.slave_state =
2318			kzalloc(dev->num_slaves *
2319				sizeof(struct mlx4_slave_state), GFP_KERNEL);
2320		if (!priv->mfunc.master.slave_state)
2321			goto err_comm;
2322
2323		priv->mfunc.master.vf_admin =
2324			kzalloc(dev->num_slaves *
2325				sizeof(struct mlx4_vf_admin_state), GFP_KERNEL);
2326		if (!priv->mfunc.master.vf_admin)
2327			goto err_comm_admin;
2328
2329		priv->mfunc.master.vf_oper =
2330			kzalloc(dev->num_slaves *
2331				sizeof(struct mlx4_vf_oper_state), GFP_KERNEL);
2332		if (!priv->mfunc.master.vf_oper)
2333			goto err_comm_oper;
2334
2335		for (i = 0; i < dev->num_slaves; ++i) {
2336			vf_admin = &priv->mfunc.master.vf_admin[i];
2337			vf_oper = &priv->mfunc.master.vf_oper[i];
2338			s_state = &priv->mfunc.master.slave_state[i];
2339			s_state->last_cmd = MLX4_COMM_CMD_RESET;
2340			mutex_init(&priv->mfunc.master.gen_eqe_mutex[i]);
2341			for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
2342				s_state->event_eq[j].eqn = -1;
2343			__raw_writel((__force u32) 0,
2344				     &priv->mfunc.comm[i].slave_write);
2345			__raw_writel((__force u32) 0,
2346				     &priv->mfunc.comm[i].slave_read);
2347			mmiowb();
2348			for (port = 1; port <= MLX4_MAX_PORTS; port++) {
2349				struct mlx4_vport_state *admin_vport;
2350				struct mlx4_vport_state *oper_vport;
2351
2352				s_state->vlan_filter[port] =
2353					kzalloc(sizeof(struct mlx4_vlan_fltr),
2354						GFP_KERNEL);
2355				if (!s_state->vlan_filter[port]) {
2356					if (--port)
2357						kfree(s_state->vlan_filter[port]);
2358					goto err_slaves;
2359				}
2360
2361				admin_vport = &vf_admin->vport[port];
2362				oper_vport = &vf_oper->vport[port].state;
2363				INIT_LIST_HEAD(&s_state->mcast_filters[port]);
2364				admin_vport->default_vlan = MLX4_VGT;
2365				oper_vport->default_vlan = MLX4_VGT;
2366				admin_vport->qos_vport =
2367						MLX4_VPP_DEFAULT_VPORT;
2368				oper_vport->qos_vport = MLX4_VPP_DEFAULT_VPORT;
2369				vf_oper->vport[port].vlan_idx = NO_INDX;
2370				vf_oper->vport[port].mac_idx = NO_INDX;
2371				mlx4_set_random_admin_guid(dev, i, port);
2372			}
2373			spin_lock_init(&s_state->lock);
2374		}
2375
2376		if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP) {
2377			for (port = 1; port <= dev->caps.num_ports; port++) {
2378				if (mlx4_is_eth(dev, port)) {
2379					mlx4_set_default_port_qos(dev, port);
2380					mlx4_allocate_port_vpps(dev, port);
2381				}
2382			}
2383		}
2384
2385		memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
2386		priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
2387		INIT_WORK(&priv->mfunc.master.comm_work,
2388			  mlx4_master_comm_channel);
2389		INIT_WORK(&priv->mfunc.master.slave_event_work,
2390			  mlx4_gen_slave_eqe);
2391		INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
2392			  mlx4_master_handle_slave_flr);
2393		spin_lock_init(&priv->mfunc.master.slave_state_lock);
2394		spin_lock_init(&priv->mfunc.master.slave_eq.event_lock);
2395		priv->mfunc.master.comm_wq =
2396			create_singlethread_workqueue("mlx4_comm");
2397		if (!priv->mfunc.master.comm_wq)
2398			goto err_slaves;
2399
2400		if (mlx4_init_resource_tracker(dev))
2401			goto err_thread;
2402
2403	} else {
2404		err = sync_toggles(dev);
2405		if (err) {
2406			mlx4_err(dev, "Couldn't sync toggles\n");
2407			goto err_comm;
2408		}
2409	}
2410	return 0;
2411
2412err_thread:
2413	flush_workqueue(priv->mfunc.master.comm_wq);
2414	destroy_workqueue(priv->mfunc.master.comm_wq);
2415err_slaves:
2416	while (--i) {
2417		for (port = 1; port <= MLX4_MAX_PORTS; port++)
2418			kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2419	}
2420	kfree(priv->mfunc.master.vf_oper);
2421err_comm_oper:
2422	kfree(priv->mfunc.master.vf_admin);
2423err_comm_admin:
2424	kfree(priv->mfunc.master.slave_state);
2425err_comm:
2426	iounmap(priv->mfunc.comm);
2427err_vhcr:
2428	dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2429			  priv->mfunc.vhcr,
2430			  priv->mfunc.vhcr_dma);
2431	priv->mfunc.vhcr = NULL;
2432	return -ENOMEM;
2433}
2434
2435int mlx4_cmd_init(struct mlx4_dev *dev)
2436{
2437	struct mlx4_priv *priv = mlx4_priv(dev);
2438	int flags = 0;
2439
2440	if (!priv->cmd.initialized) {
2441		mutex_init(&priv->cmd.slave_cmd_mutex);
2442		sema_init(&priv->cmd.poll_sem, 1);
2443		priv->cmd.use_events = 0;
2444		priv->cmd.toggle     = 1;
2445		priv->cmd.initialized = 1;
2446		flags |= MLX4_CMD_CLEANUP_STRUCT;
2447	}
2448
2449	if (!mlx4_is_slave(dev) && !priv->cmd.hcr) {
2450		priv->cmd.hcr = ioremap(pci_resource_start(dev->persist->pdev,
2451					0) + MLX4_HCR_BASE, MLX4_HCR_SIZE);
2452		if (!priv->cmd.hcr) {
2453			mlx4_err(dev, "Couldn't map command register\n");
2454			goto err;
2455		}
2456		flags |= MLX4_CMD_CLEANUP_HCR;
2457	}
2458
2459	if (mlx4_is_mfunc(dev) && !priv->mfunc.vhcr) {
2460		priv->mfunc.vhcr = dma_alloc_coherent(&dev->persist->pdev->dev,
2461						      PAGE_SIZE,
2462						      &priv->mfunc.vhcr_dma,
2463						      GFP_KERNEL);
2464		if (!priv->mfunc.vhcr)
2465			goto err;
2466
2467		flags |= MLX4_CMD_CLEANUP_VHCR;
2468	}
2469
2470	if (!priv->cmd.pool) {
2471		priv->cmd.pool = pci_pool_create("mlx4_cmd",
2472						 dev->persist->pdev,
2473						 MLX4_MAILBOX_SIZE,
2474						 MLX4_MAILBOX_SIZE, 0);
2475		if (!priv->cmd.pool)
2476			goto err;
2477
2478		flags |= MLX4_CMD_CLEANUP_POOL;
2479	}
2480
2481	return 0;
2482
2483err:
2484	mlx4_cmd_cleanup(dev, flags);
2485	return -ENOMEM;
2486}
2487
2488void mlx4_report_internal_err_comm_event(struct mlx4_dev *dev)
2489{
2490	struct mlx4_priv *priv = mlx4_priv(dev);
2491	int slave;
2492	u32 slave_read;
2493
2494	/* Report an internal error event to all
2495	 * communication channels.
2496	 */
2497	for (slave = 0; slave < dev->num_slaves; slave++) {
2498		slave_read = swab32(readl(&priv->mfunc.comm[slave].slave_read));
2499		slave_read |= (u32)COMM_CHAN_EVENT_INTERNAL_ERR;
2500		__raw_writel((__force u32)cpu_to_be32(slave_read),
2501			     &priv->mfunc.comm[slave].slave_read);
2502		/* Make sure that our comm channel write doesn't
2503		 * get mixed in with writes from another CPU.
2504		 */
2505		mmiowb();
2506	}
2507}
2508
2509void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
2510{
2511	struct mlx4_priv *priv = mlx4_priv(dev);
2512	int i, port;
2513
2514	if (mlx4_is_master(dev)) {
2515		flush_workqueue(priv->mfunc.master.comm_wq);
2516		destroy_workqueue(priv->mfunc.master.comm_wq);
2517		for (i = 0; i < dev->num_slaves; i++) {
2518			for (port = 1; port <= MLX4_MAX_PORTS; port++)
2519				kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2520		}
2521		kfree(priv->mfunc.master.slave_state);
2522		kfree(priv->mfunc.master.vf_admin);
2523		kfree(priv->mfunc.master.vf_oper);
2524		dev->num_slaves = 0;
2525	}
2526
2527	iounmap(priv->mfunc.comm);
2528}
2529
2530void mlx4_cmd_cleanup(struct mlx4_dev *dev, int cleanup_mask)
2531{
2532	struct mlx4_priv *priv = mlx4_priv(dev);
2533
2534	if (priv->cmd.pool && (cleanup_mask & MLX4_CMD_CLEANUP_POOL)) {
2535		pci_pool_destroy(priv->cmd.pool);
2536		priv->cmd.pool = NULL;
2537	}
2538
2539	if (!mlx4_is_slave(dev) && priv->cmd.hcr &&
2540	    (cleanup_mask & MLX4_CMD_CLEANUP_HCR)) {
2541		iounmap(priv->cmd.hcr);
2542		priv->cmd.hcr = NULL;
2543	}
2544	if (mlx4_is_mfunc(dev) && priv->mfunc.vhcr &&
2545	    (cleanup_mask & MLX4_CMD_CLEANUP_VHCR)) {
2546		dma_free_coherent(&dev->persist->pdev->dev, PAGE_SIZE,
2547				  priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
2548		priv->mfunc.vhcr = NULL;
2549	}
2550	if (priv->cmd.initialized && (cleanup_mask & MLX4_CMD_CLEANUP_STRUCT))
2551		priv->cmd.initialized = 0;
2552}
2553
2554/*
2555 * Switch to using events to issue FW commands (can only be called
2556 * after event queue for command events has been initialized).
2557 */
2558int mlx4_cmd_use_events(struct mlx4_dev *dev)
2559{
2560	struct mlx4_priv *priv = mlx4_priv(dev);
2561	int i;
2562	int err = 0;
2563
2564	priv->cmd.context = kmalloc(priv->cmd.max_cmds *
2565				   sizeof (struct mlx4_cmd_context),
2566				   GFP_KERNEL);
2567	if (!priv->cmd.context)
2568		return -ENOMEM;
2569
2570	for (i = 0; i < priv->cmd.max_cmds; ++i) {
2571		priv->cmd.context[i].token = i;
2572		priv->cmd.context[i].next  = i + 1;
2573		/* To support fatal error flow, initialize all
2574		 * cmd contexts to allow simulating completions
2575		 * with complete() at any time.
2576		 */
2577		init_completion(&priv->cmd.context[i].done);
2578	}
2579
2580	priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
2581	priv->cmd.free_head = 0;
2582
2583	sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
2584	spin_lock_init(&priv->cmd.context_lock);
2585
2586	for (priv->cmd.token_mask = 1;
2587	     priv->cmd.token_mask < priv->cmd.max_cmds;
2588	     priv->cmd.token_mask <<= 1)
2589		; /* nothing */
2590	--priv->cmd.token_mask;
2591
2592	down(&priv->cmd.poll_sem);
2593	priv->cmd.use_events = 1;
2594
2595	return err;
2596}
2597
2598/*
2599 * Switch back to polling (used when shutting down the device)
2600 */
2601void mlx4_cmd_use_polling(struct mlx4_dev *dev)
2602{
2603	struct mlx4_priv *priv = mlx4_priv(dev);
2604	int i;
2605
2606	priv->cmd.use_events = 0;
2607
2608	for (i = 0; i < priv->cmd.max_cmds; ++i)
2609		down(&priv->cmd.event_sem);
2610
2611	kfree(priv->cmd.context);
2612
2613	up(&priv->cmd.poll_sem);
2614}
2615
2616struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
2617{
2618	struct mlx4_cmd_mailbox *mailbox;
2619
2620	mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
2621	if (!mailbox)
2622		return ERR_PTR(-ENOMEM);
2623
2624	mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
2625				      &mailbox->dma);
2626	if (!mailbox->buf) {
2627		kfree(mailbox);
2628		return ERR_PTR(-ENOMEM);
2629	}
2630
2631	memset(mailbox->buf, 0, MLX4_MAILBOX_SIZE);
2632
2633	return mailbox;
2634}
2635EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
2636
2637void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
2638			   struct mlx4_cmd_mailbox *mailbox)
2639{
2640	if (!mailbox)
2641		return;
2642
2643	pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
2644	kfree(mailbox);
2645}
2646EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
2647
2648u32 mlx4_comm_get_version(void)
2649{
2650	 return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
2651}
2652
2653static int mlx4_get_slave_indx(struct mlx4_dev *dev, int vf)
2654{
2655	if ((vf < 0) || (vf >= dev->persist->num_vfs)) {
2656		mlx4_err(dev, "Bad vf number:%d (number of activated vf: %d)\n",
2657			 vf, dev->persist->num_vfs);
2658		return -EINVAL;
2659	}
2660
2661	return vf+1;
2662}
2663
2664int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave)
2665{
2666	if (slave < 1 || slave > dev->persist->num_vfs) {
2667		mlx4_err(dev,
2668			 "Bad slave number:%d (number of activated slaves: %lu)\n",
2669			 slave, dev->num_slaves);
2670		return -EINVAL;
2671	}
2672	return slave - 1;
2673}
2674
2675void mlx4_cmd_wake_completions(struct mlx4_dev *dev)
2676{
2677	struct mlx4_priv *priv = mlx4_priv(dev);
2678	struct mlx4_cmd_context *context;
2679	int i;
2680
2681	spin_lock(&priv->cmd.context_lock);
2682	if (priv->cmd.context) {
2683		for (i = 0; i < priv->cmd.max_cmds; ++i) {
2684			context = &priv->cmd.context[i];
2685			context->fw_status = CMD_STAT_INTERNAL_ERR;
2686			context->result    =
2687				mlx4_status_to_errno(CMD_STAT_INTERNAL_ERR);
2688			complete(&context->done);
2689		}
2690	}
2691	spin_unlock(&priv->cmd.context_lock);
2692}
2693
2694struct mlx4_active_ports mlx4_get_active_ports(struct mlx4_dev *dev, int slave)
2695{
2696	struct mlx4_active_ports actv_ports;
2697	int vf;
2698
2699	bitmap_zero(actv_ports.ports, MLX4_MAX_PORTS);
2700
2701	if (slave == 0) {
2702		bitmap_fill(actv_ports.ports, dev->caps.num_ports);
2703		return actv_ports;
2704	}
2705
2706	vf = mlx4_get_vf_indx(dev, slave);
2707	if (vf < 0)
2708		return actv_ports;
2709
2710	bitmap_set(actv_ports.ports, dev->dev_vfs[vf].min_port - 1,
2711		   min((int)dev->dev_vfs[mlx4_get_vf_indx(dev, slave)].n_ports,
2712		   dev->caps.num_ports));
2713
2714	return actv_ports;
2715}
2716EXPORT_SYMBOL_GPL(mlx4_get_active_ports);
2717
2718int mlx4_slave_convert_port(struct mlx4_dev *dev, int slave, int port)
2719{
2720	unsigned n;
2721	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2722	unsigned m = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2723
2724	if (port <= 0 || port > m)
2725		return -EINVAL;
2726
2727	n = find_first_bit(actv_ports.ports, dev->caps.num_ports);
2728	if (port <= n)
2729		port = n + 1;
2730
2731	return port;
2732}
2733EXPORT_SYMBOL_GPL(mlx4_slave_convert_port);
2734
2735int mlx4_phys_to_slave_port(struct mlx4_dev *dev, int slave, int port)
2736{
2737	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2738	if (test_bit(port - 1, actv_ports.ports))
2739		return port -
2740			find_first_bit(actv_ports.ports, dev->caps.num_ports);
2741
2742	return -1;
2743}
2744EXPORT_SYMBOL_GPL(mlx4_phys_to_slave_port);
2745
2746struct mlx4_slaves_pport mlx4_phys_to_slaves_pport(struct mlx4_dev *dev,
2747						   int port)
2748{
2749	unsigned i;
2750	struct mlx4_slaves_pport slaves_pport;
2751
2752	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2753
2754	if (port <= 0 || port > dev->caps.num_ports)
2755		return slaves_pport;
2756
2757	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2758		struct mlx4_active_ports actv_ports =
2759			mlx4_get_active_ports(dev, i);
2760		if (test_bit(port - 1, actv_ports.ports))
2761			set_bit(i, slaves_pport.slaves);
2762	}
2763
2764	return slaves_pport;
2765}
2766EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport);
2767
2768struct mlx4_slaves_pport mlx4_phys_to_slaves_pport_actv(
2769		struct mlx4_dev *dev,
2770		const struct mlx4_active_ports *crit_ports)
2771{
2772	unsigned i;
2773	struct mlx4_slaves_pport slaves_pport;
2774
2775	bitmap_zero(slaves_pport.slaves, MLX4_MFUNC_MAX);
2776
2777	for (i = 0; i < dev->persist->num_vfs + 1; i++) {
2778		struct mlx4_active_ports actv_ports =
2779			mlx4_get_active_ports(dev, i);
2780		if (bitmap_equal(crit_ports->ports, actv_ports.ports,
2781				 dev->caps.num_ports))
2782			set_bit(i, slaves_pport.slaves);
2783	}
2784
2785	return slaves_pport;
2786}
2787EXPORT_SYMBOL_GPL(mlx4_phys_to_slaves_pport_actv);
2788
2789static int mlx4_slaves_closest_port(struct mlx4_dev *dev, int slave, int port)
2790{
2791	struct mlx4_active_ports actv_ports = mlx4_get_active_ports(dev, slave);
2792	int min_port = find_first_bit(actv_ports.ports, dev->caps.num_ports)
2793			+ 1;
2794	int max_port = min_port +
2795		bitmap_weight(actv_ports.ports, dev->caps.num_ports);
2796
2797	if (port < min_port)
2798		port = min_port;
2799	else if (port >= max_port)
2800		port = max_port - 1;
2801
2802	return port;
2803}
2804
2805static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
2806			      int max_tx_rate)
2807{
2808	int i;
2809	int err;
2810	struct mlx4_qos_manager *port_qos;
2811	struct mlx4_dev *dev = &priv->dev;
2812	struct mlx4_vport_qos_param vpp_qos[MLX4_NUM_UP];
2813
2814	port_qos = &priv->mfunc.master.qos_ctl[port];
2815	memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
2816
2817	if (slave > port_qos->num_of_qos_vfs) {
2818		mlx4_info(dev, "No availible VPP resources for this VF\n");
2819		return -EINVAL;
2820	}
2821
2822	/* Query for default QoS values from Vport 0 is needed */
2823	err = mlx4_SET_VPORT_QOS_get(dev, port, 0, vpp_qos);
2824	if (err) {
2825		mlx4_info(dev, "Failed to query Vport 0 QoS values\n");
2826		return err;
2827	}
2828
2829	for (i = 0; i < MLX4_NUM_UP; i++) {
2830		if (test_bit(i, port_qos->priority_bm) && max_tx_rate) {
2831			vpp_qos[i].max_avg_bw = max_tx_rate;
2832			vpp_qos[i].enable = 1;
2833		} else {
2834			/* if user supplied tx_rate == 0, meaning no rate limit
2835			 * configuration is required. so we are leaving the
2836			 * value of max_avg_bw as queried from Vport 0.
2837			 */
2838			vpp_qos[i].enable = 0;
2839		}
2840	}
2841
2842	err = mlx4_SET_VPORT_QOS_set(dev, port, slave, vpp_qos);
2843	if (err) {
2844		mlx4_info(dev, "Failed to set Vport %d QoS values\n", slave);
2845		return err;
2846	}
2847
2848	return 0;
2849}
2850
2851static bool mlx4_is_vf_vst_and_prio_qos(struct mlx4_dev *dev, int port,
2852					struct mlx4_vport_state *vf_admin)
2853{
2854	struct mlx4_qos_manager *info;
2855	struct mlx4_priv *priv = mlx4_priv(dev);
2856
2857	if (!mlx4_is_master(dev) ||
2858	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2859		return false;
2860
2861	info = &priv->mfunc.master.qos_ctl[port];
2862
2863	if (vf_admin->default_vlan != MLX4_VGT &&
2864	    test_bit(vf_admin->default_qos, info->priority_bm))
2865		return true;
2866
2867	return false;
2868}
2869
2870static bool mlx4_valid_vf_state_change(struct mlx4_dev *dev, int port,
2871				       struct mlx4_vport_state *vf_admin,
2872				       int vlan, int qos)
2873{
2874	struct mlx4_vport_state dummy_admin = {0};
2875
2876	if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) ||
2877	    !vf_admin->tx_rate)
2878		return true;
2879
2880	dummy_admin.default_qos = qos;
2881	dummy_admin.default_vlan = vlan;
2882
2883	/* VF wants to move to other VST state which is valid with current
2884	 * rate limit. Either differnt default vlan in VST or other
2885	 * supported QoS priority. Otherwise we don't allow this change when
2886	 * the TX rate is still configured.
2887	 */
2888	if (mlx4_is_vf_vst_and_prio_qos(dev, port, &dummy_admin))
2889		return true;
2890
2891	mlx4_info(dev, "Cannot change VF state to %s while rate is set\n",
2892		  (vlan == MLX4_VGT) ? "VGT" : "VST");
2893
2894	if (vlan != MLX4_VGT)
2895		mlx4_info(dev, "VST priority %d not supported for QoS\n", qos);
2896
2897	mlx4_info(dev, "Please set rate to 0 prior to this VF state change\n");
2898
2899	return false;
2900}
2901
2902int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
2903{
2904	struct mlx4_priv *priv = mlx4_priv(dev);
2905	struct mlx4_vport_state *s_info;
2906	int slave;
2907
2908	if (!mlx4_is_master(dev))
2909		return -EPROTONOSUPPORT;
2910
2911	slave = mlx4_get_slave_indx(dev, vf);
2912	if (slave < 0)
2913		return -EINVAL;
2914
2915	port = mlx4_slaves_closest_port(dev, slave, port);
2916	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
2917	s_info->mac = mac;
2918	mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
2919		  vf, port, s_info->mac);
2920	return 0;
2921}
2922EXPORT_SYMBOL_GPL(mlx4_set_vf_mac);
2923
2924
2925int mlx4_set_vf_vlan(struct mlx4_dev *dev, int port, int vf, u16 vlan, u8 qos)
2926{
2927	struct mlx4_priv *priv = mlx4_priv(dev);
2928	struct mlx4_vport_state *vf_admin;
2929	int slave;
2930
2931	if ((!mlx4_is_master(dev)) ||
2932	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VLAN_CONTROL))
2933		return -EPROTONOSUPPORT;
2934
2935	if ((vlan > 4095) || (qos > 7))
2936		return -EINVAL;
2937
2938	slave = mlx4_get_slave_indx(dev, vf);
2939	if (slave < 0)
2940		return -EINVAL;
2941
2942	port = mlx4_slaves_closest_port(dev, slave, port);
2943	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2944
2945	if (!mlx4_valid_vf_state_change(dev, port, vf_admin, vlan, qos))
2946		return -EPERM;
2947
2948	if ((0 == vlan) && (0 == qos))
2949		vf_admin->default_vlan = MLX4_VGT;
2950	else
2951		vf_admin->default_vlan = vlan;
2952	vf_admin->default_qos = qos;
2953
2954	/* If rate was configured prior to VST, we saved the configured rate
2955	 * in vf_admin->rate and now, if priority supported we enforce the QoS
2956	 */
2957	if (mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin) &&
2958	    vf_admin->tx_rate)
2959		vf_admin->qos_vport = slave;
2960
2961	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
2962		mlx4_info(dev,
2963			  "updating vf %d port %d config will take effect on next VF restart\n",
2964			  vf, port);
2965	return 0;
2966}
2967EXPORT_SYMBOL_GPL(mlx4_set_vf_vlan);
2968
2969int mlx4_set_vf_rate(struct mlx4_dev *dev, int port, int vf, int min_tx_rate,
2970		     int max_tx_rate)
2971{
2972	int err;
2973	int slave;
2974	struct mlx4_vport_state *vf_admin;
2975	struct mlx4_priv *priv = mlx4_priv(dev);
2976
2977	if (!mlx4_is_master(dev) ||
2978	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP))
2979		return -EPROTONOSUPPORT;
2980
2981	if (min_tx_rate) {
2982		mlx4_info(dev, "Minimum BW share not supported\n");
2983		return -EPROTONOSUPPORT;
2984	}
2985
2986	slave = mlx4_get_slave_indx(dev, vf);
2987	if (slave < 0)
2988		return -EINVAL;
2989
2990	port = mlx4_slaves_closest_port(dev, slave, port);
2991	vf_admin = &priv->mfunc.master.vf_admin[slave].vport[port];
2992
2993	err = mlx4_set_vport_qos(priv, slave, port, max_tx_rate);
2994	if (err) {
2995		mlx4_info(dev, "vf %d failed to set rate %d\n", vf,
2996			  max_tx_rate);
2997		return err;
2998	}
2999
3000	vf_admin->tx_rate = max_tx_rate;
3001	/* if VF is not in supported mode (VST with supported prio),
3002	 * we do not change vport configuration for its QPs, but save
3003	 * the rate, so it will be enforced when it moves to supported
3004	 * mode next time.
3005	 */
3006	if (!mlx4_is_vf_vst_and_prio_qos(dev, port, vf_admin)) {
3007		mlx4_info(dev,
3008			  "rate set for VF %d when not in valid state\n", vf);
3009
3010		if (vf_admin->default_vlan != MLX4_VGT)
3011			mlx4_info(dev, "VST priority not supported by QoS\n");
3012		else
3013			mlx4_info(dev, "VF in VGT mode (needed VST)\n");
3014
3015		mlx4_info(dev,
3016			  "rate %d take affect when VF moves to valid state\n",
3017			  max_tx_rate);
3018		return 0;
3019	}
3020
3021	/* If user sets rate 0 assigning default vport for its QPs */
3022	vf_admin->qos_vport = max_tx_rate ? slave : MLX4_VPP_DEFAULT_VPORT;
3023
3024	if (priv->mfunc.master.slave_state[slave].active &&
3025	    dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP)
3026		mlx4_master_immediate_activate_vlan_qos(priv, slave, port);
3027
3028	return 0;
3029}
3030EXPORT_SYMBOL_GPL(mlx4_set_vf_rate);
3031
3032 /* mlx4_get_slave_default_vlan -
3033 * return true if VST ( default vlan)
3034 * if VST, will return vlan & qos (if not NULL)
3035 */
3036bool mlx4_get_slave_default_vlan(struct mlx4_dev *dev, int port, int slave,
3037				 u16 *vlan, u8 *qos)
3038{
3039	struct mlx4_vport_oper_state *vp_oper;
3040	struct mlx4_priv *priv;
3041
3042	priv = mlx4_priv(dev);
3043	port = mlx4_slaves_closest_port(dev, slave, port);
3044	vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
3045
3046	if (MLX4_VGT != vp_oper->state.default_vlan) {
3047		if (vlan)
3048			*vlan = vp_oper->state.default_vlan;
3049		if (qos)
3050			*qos = vp_oper->state.default_qos;
3051		return true;
3052	}
3053	return false;
3054}
3055EXPORT_SYMBOL_GPL(mlx4_get_slave_default_vlan);
3056
3057int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
3058{
3059	struct mlx4_priv *priv = mlx4_priv(dev);
3060	struct mlx4_vport_state *s_info;
3061	int slave;
3062
3063	if ((!mlx4_is_master(dev)) ||
3064	    !(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FSM))
3065		return -EPROTONOSUPPORT;
3066
3067	slave = mlx4_get_slave_indx(dev, vf);
3068	if (slave < 0)
3069		return -EINVAL;
3070
3071	port = mlx4_slaves_closest_port(dev, slave, port);
3072	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3073	s_info->spoofchk = setting;
3074
3075	return 0;
3076}
3077EXPORT_SYMBOL_GPL(mlx4_set_vf_spoofchk);
3078
3079int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_info *ivf)
3080{
3081	struct mlx4_priv *priv = mlx4_priv(dev);
3082	struct mlx4_vport_state *s_info;
3083	int slave;
3084
3085	if (!mlx4_is_master(dev))
3086		return -EPROTONOSUPPORT;
3087
3088	slave = mlx4_get_slave_indx(dev, vf);
3089	if (slave < 0)
3090		return -EINVAL;
3091
3092	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3093	ivf->vf = vf;
3094
3095	/* need to convert it to a func */
3096	ivf->mac[0] = ((s_info->mac >> (5*8)) & 0xff);
3097	ivf->mac[1] = ((s_info->mac >> (4*8)) & 0xff);
3098	ivf->mac[2] = ((s_info->mac >> (3*8)) & 0xff);
3099	ivf->mac[3] = ((s_info->mac >> (2*8)) & 0xff);
3100	ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
3101	ivf->mac[5] = ((s_info->mac)  & 0xff);
3102
3103	ivf->vlan		= s_info->default_vlan;
3104	ivf->qos		= s_info->default_qos;
3105
3106	if (mlx4_is_vf_vst_and_prio_qos(dev, port, s_info))
3107		ivf->max_tx_rate = s_info->tx_rate;
3108	else
3109		ivf->max_tx_rate = 0;
3110
3111	ivf->min_tx_rate	= 0;
3112	ivf->spoofchk		= s_info->spoofchk;
3113	ivf->linkstate		= s_info->link_state;
3114
3115	return 0;
3116}
3117EXPORT_SYMBOL_GPL(mlx4_get_vf_config);
3118
3119int mlx4_set_vf_link_state(struct mlx4_dev *dev, int port, int vf, int link_state)
3120{
3121	struct mlx4_priv *priv = mlx4_priv(dev);
3122	struct mlx4_vport_state *s_info;
3123	int slave;
3124	u8 link_stat_event;
3125
3126	slave = mlx4_get_slave_indx(dev, vf);
3127	if (slave < 0)
3128		return -EINVAL;
3129
3130	port = mlx4_slaves_closest_port(dev, slave, port);
3131	switch (link_state) {
3132	case IFLA_VF_LINK_STATE_AUTO:
3133		/* get current link state */
3134		if (!priv->sense.do_sense_port[port])
3135			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3136		else
3137			link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3138	    break;
3139
3140	case IFLA_VF_LINK_STATE_ENABLE:
3141		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_ACTIVE;
3142	    break;
3143
3144	case IFLA_VF_LINK_STATE_DISABLE:
3145		link_stat_event = MLX4_PORT_CHANGE_SUBTYPE_DOWN;
3146	    break;
3147
3148	default:
3149		mlx4_warn(dev, "unknown value for link_state %02x on slave %d port %d\n",
3150			  link_state, slave, port);
3151		return -EINVAL;
3152	};
3153	s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
3154	s_info->link_state = link_state;
3155
3156	/* send event */
3157	mlx4_gen_port_state_change_eqe(dev, slave, port, link_stat_event);
3158
3159	if (mlx4_master_immediate_activate_vlan_qos(priv, slave, port))
3160		mlx4_dbg(dev,
3161			 "updating vf %d port %d no link state HW enforcment\n",
3162			 vf, port);
3163	return 0;
3164}
3165EXPORT_SYMBOL_GPL(mlx4_set_vf_link_state);
3166
3167int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port)
3168{
3169	struct mlx4_priv *priv = mlx4_priv(dev);
3170
3171	if (slave < 1 || slave >= dev->num_slaves ||
3172	    port < 1 || port > MLX4_MAX_PORTS)
3173		return 0;
3174
3175	return priv->mfunc.master.vf_oper[slave].smi_enabled[port] ==
3176		MLX4_VF_SMI_ENABLED;
3177}
3178EXPORT_SYMBOL_GPL(mlx4_vf_smi_enabled);
3179
3180int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port)
3181{
3182	struct mlx4_priv *priv = mlx4_priv(dev);
3183
3184	if (slave == mlx4_master_func_num(dev))
3185		return 1;
3186
3187	if (slave < 1 || slave >= dev->num_slaves ||
3188	    port < 1 || port > MLX4_MAX_PORTS)
3189		return 0;
3190
3191	return priv->mfunc.master.vf_admin[slave].enable_smi[port] ==
3192		MLX4_VF_SMI_ENABLED;
3193}
3194EXPORT_SYMBOL_GPL(mlx4_vf_get_enable_smi_admin);
3195
3196int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
3197				 int enabled)
3198{
3199	struct mlx4_priv *priv = mlx4_priv(dev);
3200
3201	if (slave == mlx4_master_func_num(dev))
3202		return 0;
3203
3204	if (slave < 1 || slave >= dev->num_slaves ||
3205	    port < 1 || port > MLX4_MAX_PORTS ||
3206	    enabled < 0 || enabled > 1)
3207		return -EINVAL;
3208
3209	priv->mfunc.master.vf_admin[slave].enable_smi[port] = enabled;
3210	return 0;
3211}
3212EXPORT_SYMBOL_GPL(mlx4_vf_set_enable_smi_admin);
3213