1 /*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/slab.h>
35 #include <linux/export.h>
36 #include <linux/errno.h>
37
38 #include "mlx4.h"
39
40 struct mlx4_device_context {
41 struct list_head list;
42 struct list_head bond_list;
43 struct mlx4_interface *intf;
44 void *context;
45 };
46
47 static LIST_HEAD(intf_list);
48 static LIST_HEAD(dev_list);
49 static DEFINE_MUTEX(intf_mutex);
50
mlx4_add_device(struct mlx4_interface * intf,struct mlx4_priv * priv)51 static void mlx4_add_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
52 {
53 struct mlx4_device_context *dev_ctx;
54
55 dev_ctx = kmalloc(sizeof *dev_ctx, GFP_KERNEL);
56 if (!dev_ctx)
57 return;
58
59 dev_ctx->intf = intf;
60 dev_ctx->context = intf->add(&priv->dev);
61
62 if (dev_ctx->context) {
63 spin_lock_irq(&priv->ctx_lock);
64 list_add_tail(&dev_ctx->list, &priv->ctx_list);
65 spin_unlock_irq(&priv->ctx_lock);
66 if (intf->activate)
67 intf->activate(&priv->dev, dev_ctx->context);
68 } else
69 kfree(dev_ctx);
70
71 }
72
mlx4_remove_device(struct mlx4_interface * intf,struct mlx4_priv * priv)73 static void mlx4_remove_device(struct mlx4_interface *intf, struct mlx4_priv *priv)
74 {
75 struct mlx4_device_context *dev_ctx;
76
77 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
78 if (dev_ctx->intf == intf) {
79 spin_lock_irq(&priv->ctx_lock);
80 list_del(&dev_ctx->list);
81 spin_unlock_irq(&priv->ctx_lock);
82
83 intf->remove(&priv->dev, dev_ctx->context);
84 kfree(dev_ctx);
85 return;
86 }
87 }
88
mlx4_register_interface(struct mlx4_interface * intf)89 int mlx4_register_interface(struct mlx4_interface *intf)
90 {
91 struct mlx4_priv *priv;
92
93 if (!intf->add || !intf->remove)
94 return -EINVAL;
95
96 mutex_lock(&intf_mutex);
97
98 list_add_tail(&intf->list, &intf_list);
99 list_for_each_entry(priv, &dev_list, dev_list) {
100 if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
101 mlx4_dbg(&priv->dev,
102 "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
103 intf->flags &= ~MLX4_INTFF_BONDING;
104 }
105 mlx4_add_device(intf, priv);
106 }
107
108 mutex_unlock(&intf_mutex);
109
110 return 0;
111 }
112 EXPORT_SYMBOL_GPL(mlx4_register_interface);
113
mlx4_unregister_interface(struct mlx4_interface * intf)114 void mlx4_unregister_interface(struct mlx4_interface *intf)
115 {
116 struct mlx4_priv *priv;
117
118 mutex_lock(&intf_mutex);
119
120 list_for_each_entry(priv, &dev_list, dev_list)
121 mlx4_remove_device(intf, priv);
122
123 list_del(&intf->list);
124
125 mutex_unlock(&intf_mutex);
126 }
127 EXPORT_SYMBOL_GPL(mlx4_unregister_interface);
128
mlx4_do_bond(struct mlx4_dev * dev,bool enable)129 int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
130 {
131 struct mlx4_priv *priv = mlx4_priv(dev);
132 struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx;
133 unsigned long flags;
134 int ret;
135 LIST_HEAD(bond_list);
136
137 if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP))
138 return -ENOTSUPP;
139
140 ret = mlx4_disable_rx_port_check(dev, enable);
141 if (ret) {
142 mlx4_err(dev, "Fail to %s rx port check\n",
143 enable ? "enable" : "disable");
144 return ret;
145 }
146 if (enable) {
147 dev->flags |= MLX4_FLAG_BONDED;
148 } else {
149 ret = mlx4_virt2phy_port_map(dev, 1, 2);
150 if (ret) {
151 mlx4_err(dev, "Fail to reset port map\n");
152 return ret;
153 }
154 dev->flags &= ~MLX4_FLAG_BONDED;
155 }
156
157 spin_lock_irqsave(&priv->ctx_lock, flags);
158 list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) {
159 if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) {
160 list_add_tail(&dev_ctx->bond_list, &bond_list);
161 list_del(&dev_ctx->list);
162 }
163 }
164 spin_unlock_irqrestore(&priv->ctx_lock, flags);
165
166 list_for_each_entry(dev_ctx, &bond_list, bond_list) {
167 dev_ctx->intf->remove(dev, dev_ctx->context);
168 dev_ctx->context = dev_ctx->intf->add(dev);
169
170 spin_lock_irqsave(&priv->ctx_lock, flags);
171 list_add_tail(&dev_ctx->list, &priv->ctx_list);
172 spin_unlock_irqrestore(&priv->ctx_lock, flags);
173
174 mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n",
175 dev_ctx->intf->protocol, enable ?
176 "enabled" : "disabled");
177 }
178 return 0;
179 }
180
mlx4_dispatch_event(struct mlx4_dev * dev,enum mlx4_dev_event type,unsigned long param)181 void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
182 unsigned long param)
183 {
184 struct mlx4_priv *priv = mlx4_priv(dev);
185 struct mlx4_device_context *dev_ctx;
186 unsigned long flags;
187
188 spin_lock_irqsave(&priv->ctx_lock, flags);
189
190 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
191 if (dev_ctx->intf->event)
192 dev_ctx->intf->event(dev, dev_ctx->context, type, param);
193
194 spin_unlock_irqrestore(&priv->ctx_lock, flags);
195 }
196
mlx4_register_device(struct mlx4_dev * dev)197 int mlx4_register_device(struct mlx4_dev *dev)
198 {
199 struct mlx4_priv *priv = mlx4_priv(dev);
200 struct mlx4_interface *intf;
201
202 mutex_lock(&intf_mutex);
203
204 dev->persist->interface_state |= MLX4_INTERFACE_STATE_UP;
205 list_add_tail(&priv->dev_list, &dev_list);
206 list_for_each_entry(intf, &intf_list, list)
207 mlx4_add_device(intf, priv);
208
209 mutex_unlock(&intf_mutex);
210 mlx4_start_catas_poll(dev);
211
212 return 0;
213 }
214
mlx4_unregister_device(struct mlx4_dev * dev)215 void mlx4_unregister_device(struct mlx4_dev *dev)
216 {
217 struct mlx4_priv *priv = mlx4_priv(dev);
218 struct mlx4_interface *intf;
219
220 mlx4_stop_catas_poll(dev);
221 mutex_lock(&intf_mutex);
222
223 list_for_each_entry(intf, &intf_list, list)
224 mlx4_remove_device(intf, priv);
225
226 list_del(&priv->dev_list);
227 dev->persist->interface_state &= ~MLX4_INTERFACE_STATE_UP;
228
229 mutex_unlock(&intf_mutex);
230 }
231
mlx4_get_protocol_dev(struct mlx4_dev * dev,enum mlx4_protocol proto,int port)232 void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port)
233 {
234 struct mlx4_priv *priv = mlx4_priv(dev);
235 struct mlx4_device_context *dev_ctx;
236 unsigned long flags;
237 void *result = NULL;
238
239 spin_lock_irqsave(&priv->ctx_lock, flags);
240
241 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
242 if (dev_ctx->intf->protocol == proto && dev_ctx->intf->get_dev) {
243 result = dev_ctx->intf->get_dev(dev, dev_ctx->context, port);
244 break;
245 }
246
247 spin_unlock_irqrestore(&priv->ctx_lock, flags);
248
249 return result;
250 }
251 EXPORT_SYMBOL_GPL(mlx4_get_protocol_dev);
252