1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses.  You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 *     Redistribution and use in source and binary forms, with or
12 *     without modification, are permitted provided that the following
13 *     conditions are met:
14 *
15 *      - Redistributions of source code must retain the above
16 *        copyright notice, this list of conditions and the following
17 *        disclaimer.
18 *
19 *      - Redistributions in binary form must reproduce the above
20 *        copyright notice, this list of conditions and the following
21 *        disclaimer in the documentation and/or other materials
22 *        provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/slab.h>
35#include <linux/export.h>
36#include <linux/errno.h>
37
38#include "mlx4.h"
39
40struct mlx4_device_context {
41	struct list_head	list;
42	struct list_head	bond_list;
43	struct mlx4_interface  *intf;
44	void		       *context;
45};
46
47static LIST_HEAD(intf_list);
48static LIST_HEAD(dev_list);
49static DEFINE_MUTEX(intf_mutex);
50
51static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
52{
53	struct mlx4_device_context *dev_ctx;
54
55	dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
56	if (!dev_ctx)
57		return;
58
59	dev_ctx->intf    = intf;
60	dev_ctx->context = intf->add(&priv->dev);
61
62	if (dev_ctx->context) {
63		spin_lock_irq(&priv->ctx_lock);
64		list_add_tail(&dev_ctx->list, &priv->ctx_list);
65		spin_unlock_irq(&priv->ctx_lock);
66	} else
67		kfree(dev_ctx);
68}
69
70static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
71{
72	struct mlx4_device_context *dev_ctx;
73
74	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
75		if (dev_ctx->intf == intf) {
76			spin_lock_irq(&priv->ctx_lock);
77			list_del(&dev_ctx->list);
78			spin_unlock_irq(&priv->ctx_lock);
79
80			intf->remove(&priv->dev, dev_ctx->context);
81			kfree(dev_ctx);
82			return;
83		}
84}
85
86int mlx4_register_interface(struct mlx4_interface *intf)
87{
88	struct mlx4_priv *priv;
89
90	if (!intf->add || !intf->remove)
91		return -EINVAL;
92
93	mutex_lock(&intf_mutex);
94
95	list_add_tail(&intf->list, &intf_list);
96	list_for_each_entry(priv, &dev_list, dev_list) {
97		if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
98			mlx4_dbg(&priv->dev,
99				 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
100			intf->flags &= ~MLX4_INTFF_BONDING;
101		}
102		mlx4_add_device(intf, priv);
103	}
104
105	mutex_unlock(&intf_mutex);
106
107	return 0;
108}
109EXPORT_SYMBOL_GPL(mlx4_register_interface);
110
111void mlx4_unregister_interface(struct mlx4_interface *intf)
112{
113	struct mlx4_priv *priv;
114
115	mutex_lock(&intf_mutex);
116
117	list_for_each_entry(priv, &dev_list, dev_list)
118		mlx4_remove_device(intf, priv);
119
120	list_del(&intf->list);
121
122	mutex_unlock(&intf_mutex);
123}
124EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
125
126int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
127{
128	struct mlx4_priv *priv = mlx4_priv(dev);
129	struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
130	unsigned long flags;
131	int ret;
132	LIST_HEAD(bond_list);
133
134	if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
135		return -ENOTSUPP;
136
137	ret = mlx4_disable_rx_port_check(dev, enable);
138	if (ret) {
139		mlx4_err(dev, "Fail to %s rx port check\n",
140			 enable ? "enable" : "disable");
141		return ret;
142	}
143	if (enable) {
144		dev->flags |= MLX4_FLAG_BONDED;
145	} else {
146		 ret = mlx4_virt2phy_port_map(dev, 1, 2);
147		if (ret) {
148			mlx4_err(dev, "Fail to reset port map\n");
149			return ret;
150		}
151		dev->flags &= ~MLX4_FLAG_BONDED;
152	}
153
154	spin_lock_irqsave(&priv->ctx_lock, flags);
155	list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
156		if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
157			list_add_tail(&dev_ctx->bond_list, &bond_list);
158			list_del(&dev_ctx->list);
159		}
160	}
161	spin_unlock_irqrestore(&priv->ctx_lock, flags);
162
163	list_for_each_entry(dev_ctx, &bond_list, bond_list) {
164		dev_ctx->intf->remove(dev, dev_ctx->context);
165		dev_ctx->context =  dev_ctx->intf->add(dev);
166
167		spin_lock_irqsave(&priv->ctx_lock, flags);
168		list_add_tail(&dev_ctx->list, &priv->ctx_list);
169		spin_unlock_irqrestore(&priv->ctx_lock, flags);
170
171		mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
172			 dev_ctx->intf->protocol, enable ?
173			 "enabled" : "disabled");
174	}
175	return 0;
176}
177
178void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
179			 unsigned long param)
180{
181	struct mlx4_priv *priv = mlx4_priv(dev);
182	struct mlx4_device_context *dev_ctx;
183	unsigned long flags;
184
185	spin_lock_irqsave(&priv->ctx_lock, flags);
186
187	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
188		if (dev_ctx->intf->event)
189			dev_ctx->intf->event(dev, dev_ctx->context, type, param);
190
191	spin_unlock_irqrestore(&priv->ctx_lock, flags);
192}
193
194int mlx4_register_device(struct mlx4_dev *dev)
195{
196	struct mlx4_priv *priv = mlx4_priv(dev);
197	struct mlx4_interface *intf;
198
199	mutex_lock(&intf_mutex);
200
201	dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
202	list_add_tail(&priv->dev_list, &dev_list);
203	list_for_each_entry(intf, &intf_list, list)
204		mlx4_add_device(intf, priv);
205
206	mutex_unlock(&intf_mutex);
207	mlx4_start_catas_poll(dev);
208
209	return 0;
210}
211
212void mlx4_unregister_device(struct mlx4_dev *dev)
213{
214	struct mlx4_priv *priv = mlx4_priv(dev);
215	struct mlx4_interface *intf;
216
217	mlx4_stop_catas_poll(dev);
218	mutex_lock(&intf_mutex);
219
220	list_for_each_entry(intf, &intf_list, list)
221		mlx4_remove_device(intf, priv);
222
223	list_del(&priv->dev_list);
224	dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
225
226	mutex_unlock(&intf_mutex);
227}
228
229void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
230{
231	struct mlx4_priv *priv = mlx4_priv(dev);
232	struct mlx4_device_context *dev_ctx;
233	unsigned long flags;
234	void *result = NULL;
235
236	spin_lock_irqsave(&priv->ctx_lock, flags);
237
238	list_for_each_entry(dev_ctx, &priv->ctx_list, list)
239		if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
240			result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
241			break;
242		}
243
244	spin_unlock_irqrestore(&priv->ctx_lock, flags);
245
246	return result;
247}
248EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
249