This source file includes following definitions.
- mlxsw_sp_bridge_device_find
- mlxsw_sp_bridge_device_is_offloaded
- mlxsw_sp_bridge_device_upper_rif_destroy
- mlxsw_sp_bridge_device_rifs_destroy
- mlxsw_sp_bridge_device_create
- mlxsw_sp_bridge_device_destroy
- mlxsw_sp_bridge_device_get
- mlxsw_sp_bridge_device_put
- __mlxsw_sp_bridge_port_find
- mlxsw_sp_bridge_port_find
- mlxsw_sp_bridge_port_create
- mlxsw_sp_bridge_port_destroy
- mlxsw_sp_bridge_port_get
- mlxsw_sp_bridge_port_put
- mlxsw_sp_port_vlan_find_by_bridge
- mlxsw_sp_port_vlan_find_by_fid
- mlxsw_sp_bridge_vlan_find
- mlxsw_sp_bridge_vlan_create
- mlxsw_sp_bridge_vlan_destroy
- mlxsw_sp_bridge_vlan_get
- mlxsw_sp_bridge_vlan_put
- mlxsw_sp_port_bridge_vlan_stp_set
- mlxsw_sp_port_attr_stp_state_set
- mlxsw_sp_port_bridge_vlan_flood_set
- mlxsw_sp_bridge_port_flood_table_set
- mlxsw_sp_port_bridge_vlan_learning_set
- mlxsw_sp_bridge_port_learning_set
- mlxsw_sp_port_attr_br_pre_flags_set
- mlxsw_sp_port_attr_br_flags_set
- mlxsw_sp_ageing_set
- mlxsw_sp_port_attr_br_ageing_set
- mlxsw_sp_port_attr_br_vlan_set
- mlxsw_sp_port_attr_mrouter_set
- mlxsw_sp_mc_flood
- mlxsw_sp_port_mc_disabled_set
- mlxsw_sp_smid_router_port_set
- mlxsw_sp_bridge_mrouter_update_mdb
- mlxsw_sp_port_attr_br_mrouter_set
- mlxsw_sp_port_attr_set
- mlxsw_sp_port_vlan_fid_join
- mlxsw_sp_port_vlan_fid_leave
- mlxsw_sp_port_pvid_determine
- mlxsw_sp_port_vlan_bridge_join
- mlxsw_sp_port_vlan_bridge_leave
- mlxsw_sp_bridge_port_vlan_add
- mlxsw_sp_br_ban_rif_pvid_change
- mlxsw_sp_port_vlans_add
- mlxsw_sp_fdb_flush_type
- mlxsw_sp_bridge_port_fdb_flush
- mlxsw_sp_sfd_rec_policy
- mlxsw_sp_sfd_op
- mlxsw_sp_port_fdb_tunnel_uc_op
- __mlxsw_sp_port_fdb_uc_op
- mlxsw_sp_port_fdb_uc_op
- mlxsw_sp_rif_fdb_op
- mlxsw_sp_port_fdb_uc_lag_op
- mlxsw_sp_port_fdb_set
- mlxsw_sp_port_mdb_op
- mlxsw_sp_port_smid_full_entry
- mlxsw_sp_port_smid_set
- __mlxsw_sp_mc_get
- mlxsw_sp_bridge_port_get_ports_bitmap
- mlxsw_sp_mc_get_mrouters_bitmap
- mlxsw_sp_mc_write_mdb_entry
- mlxsw_sp_mc_remove_mdb_entry
- __mlxsw_sp_mc_alloc
- mlxsw_sp_port_remove_from_mid
- mlxsw_sp_port_mdb_add
- mlxsw_sp_bridge_mdb_mc_enable_sync
- mlxsw_sp_port_mrouter_update_mdb
- mlxsw_sp_span_respin_work
- mlxsw_sp_span_respin_schedule
- mlxsw_sp_port_obj_add
- mlxsw_sp_bridge_port_vlan_del
- mlxsw_sp_port_vlans_del
- __mlxsw_sp_port_mdb_del
- mlxsw_sp_port_mdb_del
- mlxsw_sp_bridge_port_mdb_flush
- mlxsw_sp_port_obj_del
- mlxsw_sp_lag_rep_port
- mlxsw_sp_bridge_8021q_port_join
- mlxsw_sp_bridge_8021q_port_leave
- mlxsw_sp_bridge_8021q_vxlan_join
- mlxsw_sp_bridge_8021q_vxlan_dev_find
- mlxsw_sp_bridge_8021q_fid_get
- mlxsw_sp_bridge_8021q_fid_lookup
- mlxsw_sp_bridge_8021q_fid_vid
- mlxsw_sp_port_is_br_member
- mlxsw_sp_bridge_8021d_port_join
- mlxsw_sp_bridge_8021d_port_leave
- mlxsw_sp_bridge_8021d_vxlan_join
- mlxsw_sp_bridge_8021d_fid_get
- mlxsw_sp_bridge_8021d_fid_lookup
- mlxsw_sp_bridge_8021d_fid_vid
- mlxsw_sp_port_bridge_join
- mlxsw_sp_port_bridge_leave
- mlxsw_sp_bridge_vxlan_join
- mlxsw_sp_bridge_vxlan_leave
- mlxsw_sp_bridge_fid_get
- mlxsw_sp_switchdev_vxlan_addr_convert
- mlxsw_sp_switchdev_addr_vxlan_convert
- mlxsw_sp_fdb_vxlan_call_notifiers
- mlxsw_sp_fdb_nve_call_notifiers
- mlxsw_sp_fdb_call_notifiers
- mlxsw_sp_fdb_notify_mac_process
- mlxsw_sp_fdb_notify_mac_lag_process
- __mlxsw_sp_fdb_notify_mac_uc_tunnel_process
- mlxsw_sp_fdb_notify_mac_uc_tunnel_process
- mlxsw_sp_fdb_notify_rec_process
- mlxsw_sp_fdb_notify_work_schedule
- mlxsw_sp_fdb_notify_work
- mlxsw_sp_switchdev_bridge_vxlan_fdb_event
- mlxsw_sp_switchdev_bridge_nve_fdb_event
- mlxsw_sp_switchdev_bridge_fdb_event_work
- mlxsw_sp_switchdev_vxlan_fdb_add
- mlxsw_sp_switchdev_vxlan_fdb_del
- mlxsw_sp_switchdev_vxlan_fdb_event_work
- mlxsw_sp_switchdev_vxlan_work_prepare
- mlxsw_sp_switchdev_event
- mlxsw_sp_switchdev_vxlan_vlan_add
- mlxsw_sp_switchdev_vxlan_vlan_del
- mlxsw_sp_switchdev_vxlan_vlans_add
- mlxsw_sp_switchdev_vxlan_vlans_del
- mlxsw_sp_switchdev_handle_vxlan_obj_add
- mlxsw_sp_switchdev_handle_vxlan_obj_del
- mlxsw_sp_switchdev_blocking_event
- mlxsw_sp_bridge_port_stp_state
- mlxsw_sp_fdb_init
- mlxsw_sp_fdb_fini
- mlxsw_sp_switchdev_init
- mlxsw_sp_switchdev_fini
1
2
3
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/slab.h>
9 #include <linux/device.h>
10 #include <linux/skbuff.h>
11 #include <linux/if_vlan.h>
12 #include <linux/if_bridge.h>
13 #include <linux/workqueue.h>
14 #include <linux/jiffies.h>
15 #include <linux/rtnetlink.h>
16 #include <linux/netlink.h>
17 #include <net/switchdev.h>
18 #include <net/vxlan.h>
19
20 #include "spectrum_span.h"
21 #include "spectrum_switchdev.h"
22 #include "spectrum.h"
23 #include "core.h"
24 #include "reg.h"
25
26 struct mlxsw_sp_bridge_ops;
27
28 struct mlxsw_sp_bridge {
29 struct mlxsw_sp *mlxsw_sp;
30 struct {
31 struct delayed_work dw;
32 #define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
33 unsigned int interval;
34 } fdb_notify;
35 #define MLXSW_SP_MIN_AGEING_TIME 10
36 #define MLXSW_SP_MAX_AGEING_TIME 1000000
37 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
38 u32 ageing_time;
39 bool vlan_enabled_exists;
40 struct list_head bridges_list;
41 DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
42 const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
43 const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
44 };
45
46 struct mlxsw_sp_bridge_device {
47 struct net_device *dev;
48 struct list_head list;
49 struct list_head ports_list;
50 struct list_head mids_list;
51 u8 vlan_enabled:1,
52 multicast_enabled:1,
53 mrouter:1;
54 const struct mlxsw_sp_bridge_ops *ops;
55 };
56
57 struct mlxsw_sp_bridge_port {
58 struct net_device *dev;
59 struct mlxsw_sp_bridge_device *bridge_device;
60 struct list_head list;
61 struct list_head vlans_list;
62 unsigned int ref_count;
63 u8 stp_state;
64 unsigned long flags;
65 bool mrouter;
66 bool lagged;
67 union {
68 u16 lag_id;
69 u16 system_port;
70 };
71 };
72
73 struct mlxsw_sp_bridge_vlan {
74 struct list_head list;
75 struct list_head port_vlan_list;
76 u16 vid;
77 };
78
79 struct mlxsw_sp_bridge_ops {
80 int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
81 struct mlxsw_sp_bridge_port *bridge_port,
82 struct mlxsw_sp_port *mlxsw_sp_port,
83 struct netlink_ext_ack *extack);
84 void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
85 struct mlxsw_sp_bridge_port *bridge_port,
86 struct mlxsw_sp_port *mlxsw_sp_port);
87 int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
88 const struct net_device *vxlan_dev, u16 vid,
89 struct netlink_ext_ack *extack);
90 struct mlxsw_sp_fid *
91 (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
92 u16 vid, struct netlink_ext_ack *extack);
93 struct mlxsw_sp_fid *
94 (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
95 u16 vid);
96 u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
97 const struct mlxsw_sp_fid *fid);
98 };
99
100 static int
101 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
102 struct mlxsw_sp_bridge_port *bridge_port,
103 u16 fid_index);
104
105 static void
106 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
107 struct mlxsw_sp_bridge_port *bridge_port);
108
109 static void
110 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
111 struct mlxsw_sp_bridge_device
112 *bridge_device);
113
114 static void
115 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
116 struct mlxsw_sp_bridge_port *bridge_port,
117 bool add);
118
119 static struct mlxsw_sp_bridge_device *
120 mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
121 const struct net_device *br_dev)
122 {
123 struct mlxsw_sp_bridge_device *bridge_device;
124
125 list_for_each_entry(bridge_device, &bridge->bridges_list, list)
126 if (bridge_device->dev == br_dev)
127 return bridge_device;
128
129 return NULL;
130 }
131
132 bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
133 const struct net_device *br_dev)
134 {
135 return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
136 }
137
138 static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
139 void *data)
140 {
141 struct mlxsw_sp *mlxsw_sp = data;
142
143 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
144 return 0;
145 }
146
147 static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
148 struct net_device *dev)
149 {
150 mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
151 netdev_walk_all_upper_dev_rcu(dev,
152 mlxsw_sp_bridge_device_upper_rif_destroy,
153 mlxsw_sp);
154 }
155
156 static struct mlxsw_sp_bridge_device *
157 mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
158 struct net_device *br_dev)
159 {
160 struct device *dev = bridge->mlxsw_sp->bus_info->dev;
161 struct mlxsw_sp_bridge_device *bridge_device;
162 bool vlan_enabled = br_vlan_enabled(br_dev);
163
164 if (vlan_enabled && bridge->vlan_enabled_exists) {
165 dev_err(dev, "Only one VLAN-aware bridge is supported\n");
166 return ERR_PTR(-EINVAL);
167 }
168
169 bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
170 if (!bridge_device)
171 return ERR_PTR(-ENOMEM);
172
173 bridge_device->dev = br_dev;
174 bridge_device->vlan_enabled = vlan_enabled;
175 bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
176 bridge_device->mrouter = br_multicast_router(br_dev);
177 INIT_LIST_HEAD(&bridge_device->ports_list);
178 if (vlan_enabled) {
179 bridge->vlan_enabled_exists = true;
180 bridge_device->ops = bridge->bridge_8021q_ops;
181 } else {
182 bridge_device->ops = bridge->bridge_8021d_ops;
183 }
184 INIT_LIST_HEAD(&bridge_device->mids_list);
185 list_add(&bridge_device->list, &bridge->bridges_list);
186
187 return bridge_device;
188 }
189
190 static void
191 mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
192 struct mlxsw_sp_bridge_device *bridge_device)
193 {
194 mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
195 bridge_device->dev);
196 list_del(&bridge_device->list);
197 if (bridge_device->vlan_enabled)
198 bridge->vlan_enabled_exists = false;
199 WARN_ON(!list_empty(&bridge_device->ports_list));
200 WARN_ON(!list_empty(&bridge_device->mids_list));
201 kfree(bridge_device);
202 }
203
204 static struct mlxsw_sp_bridge_device *
205 mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
206 struct net_device *br_dev)
207 {
208 struct mlxsw_sp_bridge_device *bridge_device;
209
210 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
211 if (bridge_device)
212 return bridge_device;
213
214 return mlxsw_sp_bridge_device_create(bridge, br_dev);
215 }
216
217 static void
218 mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
219 struct mlxsw_sp_bridge_device *bridge_device)
220 {
221 if (list_empty(&bridge_device->ports_list))
222 mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
223 }
224
225 static struct mlxsw_sp_bridge_port *
226 __mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
227 const struct net_device *brport_dev)
228 {
229 struct mlxsw_sp_bridge_port *bridge_port;
230
231 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
232 if (bridge_port->dev == brport_dev)
233 return bridge_port;
234 }
235
236 return NULL;
237 }
238
239 struct mlxsw_sp_bridge_port *
240 mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
241 struct net_device *brport_dev)
242 {
243 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
244 struct mlxsw_sp_bridge_device *bridge_device;
245
246 if (!br_dev)
247 return NULL;
248
249 bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
250 if (!bridge_device)
251 return NULL;
252
253 return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
254 }
255
256 static struct mlxsw_sp_bridge_port *
257 mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
258 struct net_device *brport_dev)
259 {
260 struct mlxsw_sp_bridge_port *bridge_port;
261 struct mlxsw_sp_port *mlxsw_sp_port;
262
263 bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
264 if (!bridge_port)
265 return NULL;
266
267 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
268 bridge_port->lagged = mlxsw_sp_port->lagged;
269 if (bridge_port->lagged)
270 bridge_port->lag_id = mlxsw_sp_port->lag_id;
271 else
272 bridge_port->system_port = mlxsw_sp_port->local_port;
273 bridge_port->dev = brport_dev;
274 bridge_port->bridge_device = bridge_device;
275 bridge_port->stp_state = BR_STATE_DISABLED;
276 bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
277 BR_MCAST_FLOOD;
278 INIT_LIST_HEAD(&bridge_port->vlans_list);
279 list_add(&bridge_port->list, &bridge_device->ports_list);
280 bridge_port->ref_count = 1;
281
282 return bridge_port;
283 }
284
285 static void
286 mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
287 {
288 list_del(&bridge_port->list);
289 WARN_ON(!list_empty(&bridge_port->vlans_list));
290 kfree(bridge_port);
291 }
292
293 static struct mlxsw_sp_bridge_port *
294 mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
295 struct net_device *brport_dev)
296 {
297 struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
298 struct mlxsw_sp_bridge_device *bridge_device;
299 struct mlxsw_sp_bridge_port *bridge_port;
300 int err;
301
302 bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
303 if (bridge_port) {
304 bridge_port->ref_count++;
305 return bridge_port;
306 }
307
308 bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
309 if (IS_ERR(bridge_device))
310 return ERR_CAST(bridge_device);
311
312 bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
313 if (!bridge_port) {
314 err = -ENOMEM;
315 goto err_bridge_port_create;
316 }
317
318 return bridge_port;
319
320 err_bridge_port_create:
321 mlxsw_sp_bridge_device_put(bridge, bridge_device);
322 return ERR_PTR(err);
323 }
324
325 static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
326 struct mlxsw_sp_bridge_port *bridge_port)
327 {
328 struct mlxsw_sp_bridge_device *bridge_device;
329
330 if (--bridge_port->ref_count != 0)
331 return;
332 bridge_device = bridge_port->bridge_device;
333 mlxsw_sp_bridge_port_destroy(bridge_port);
334 mlxsw_sp_bridge_device_put(bridge, bridge_device);
335 }
336
337 static struct mlxsw_sp_port_vlan *
338 mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
339 const struct mlxsw_sp_bridge_device *
340 bridge_device,
341 u16 vid)
342 {
343 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
344
345 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
346 list) {
347 if (!mlxsw_sp_port_vlan->bridge_port)
348 continue;
349 if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
350 bridge_device)
351 continue;
352 if (bridge_device->vlan_enabled &&
353 mlxsw_sp_port_vlan->vid != vid)
354 continue;
355 return mlxsw_sp_port_vlan;
356 }
357
358 return NULL;
359 }
360
361 static struct mlxsw_sp_port_vlan*
362 mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
363 u16 fid_index)
364 {
365 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
366
367 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
368 list) {
369 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
370
371 if (fid && mlxsw_sp_fid_index(fid) == fid_index)
372 return mlxsw_sp_port_vlan;
373 }
374
375 return NULL;
376 }
377
378 static struct mlxsw_sp_bridge_vlan *
379 mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
380 u16 vid)
381 {
382 struct mlxsw_sp_bridge_vlan *bridge_vlan;
383
384 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
385 if (bridge_vlan->vid == vid)
386 return bridge_vlan;
387 }
388
389 return NULL;
390 }
391
392 static struct mlxsw_sp_bridge_vlan *
393 mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
394 {
395 struct mlxsw_sp_bridge_vlan *bridge_vlan;
396
397 bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
398 if (!bridge_vlan)
399 return NULL;
400
401 INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
402 bridge_vlan->vid = vid;
403 list_add(&bridge_vlan->list, &bridge_port->vlans_list);
404
405 return bridge_vlan;
406 }
407
408 static void
409 mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
410 {
411 list_del(&bridge_vlan->list);
412 WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
413 kfree(bridge_vlan);
414 }
415
416 static struct mlxsw_sp_bridge_vlan *
417 mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
418 {
419 struct mlxsw_sp_bridge_vlan *bridge_vlan;
420
421 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
422 if (bridge_vlan)
423 return bridge_vlan;
424
425 return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
426 }
427
428 static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
429 {
430 if (list_empty(&bridge_vlan->port_vlan_list))
431 mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
432 }
433
434 static int
435 mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
436 struct mlxsw_sp_bridge_vlan *bridge_vlan,
437 u8 state)
438 {
439 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
440
441 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
442 bridge_vlan_node) {
443 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
444 continue;
445 return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
446 bridge_vlan->vid, state);
447 }
448
449 return 0;
450 }
451
452 static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
453 struct switchdev_trans *trans,
454 struct net_device *orig_dev,
455 u8 state)
456 {
457 struct mlxsw_sp_bridge_port *bridge_port;
458 struct mlxsw_sp_bridge_vlan *bridge_vlan;
459 int err;
460
461 if (switchdev_trans_ph_prepare(trans))
462 return 0;
463
464
465
466
467 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
468 orig_dev);
469 if (!bridge_port)
470 return 0;
471
472 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
473 err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
474 bridge_vlan, state);
475 if (err)
476 goto err_port_bridge_vlan_stp_set;
477 }
478
479 bridge_port->stp_state = state;
480
481 return 0;
482
483 err_port_bridge_vlan_stp_set:
484 list_for_each_entry_continue_reverse(bridge_vlan,
485 &bridge_port->vlans_list, list)
486 mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
487 bridge_port->stp_state);
488 return err;
489 }
490
491 static int
492 mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
493 struct mlxsw_sp_bridge_vlan *bridge_vlan,
494 enum mlxsw_sp_flood_type packet_type,
495 bool member)
496 {
497 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
498
499 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
500 bridge_vlan_node) {
501 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
502 continue;
503 return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
504 packet_type,
505 mlxsw_sp_port->local_port,
506 member);
507 }
508
509 return 0;
510 }
511
512 static int
513 mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
514 struct mlxsw_sp_bridge_port *bridge_port,
515 enum mlxsw_sp_flood_type packet_type,
516 bool member)
517 {
518 struct mlxsw_sp_bridge_vlan *bridge_vlan;
519 int err;
520
521 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
522 err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
523 bridge_vlan,
524 packet_type,
525 member);
526 if (err)
527 goto err_port_bridge_vlan_flood_set;
528 }
529
530 return 0;
531
532 err_port_bridge_vlan_flood_set:
533 list_for_each_entry_continue_reverse(bridge_vlan,
534 &bridge_port->vlans_list, list)
535 mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
536 packet_type, !member);
537 return err;
538 }
539
540 static int
541 mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
542 struct mlxsw_sp_bridge_vlan *bridge_vlan,
543 bool set)
544 {
545 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
546 u16 vid = bridge_vlan->vid;
547
548 list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
549 bridge_vlan_node) {
550 if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
551 continue;
552 return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
553 }
554
555 return 0;
556 }
557
558 static int
559 mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
560 struct mlxsw_sp_bridge_port *bridge_port,
561 bool set)
562 {
563 struct mlxsw_sp_bridge_vlan *bridge_vlan;
564 int err;
565
566 list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
567 err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
568 bridge_vlan, set);
569 if (err)
570 goto err_port_bridge_vlan_learning_set;
571 }
572
573 return 0;
574
575 err_port_bridge_vlan_learning_set:
576 list_for_each_entry_continue_reverse(bridge_vlan,
577 &bridge_port->vlans_list, list)
578 mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
579 bridge_vlan, !set);
580 return err;
581 }
582
583 static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
584 *mlxsw_sp_port,
585 struct switchdev_trans *trans,
586 unsigned long brport_flags)
587 {
588 if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
589 return -EINVAL;
590
591 return 0;
592 }
593
594 static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
595 struct switchdev_trans *trans,
596 struct net_device *orig_dev,
597 unsigned long brport_flags)
598 {
599 struct mlxsw_sp_bridge_port *bridge_port;
600 int err;
601
602 if (switchdev_trans_ph_prepare(trans))
603 return 0;
604
605 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
606 orig_dev);
607 if (!bridge_port)
608 return 0;
609
610 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
611 MLXSW_SP_FLOOD_TYPE_UC,
612 brport_flags & BR_FLOOD);
613 if (err)
614 return err;
615
616 err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
617 brport_flags & BR_LEARNING);
618 if (err)
619 return err;
620
621 if (bridge_port->bridge_device->multicast_enabled)
622 goto out;
623
624 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
625 MLXSW_SP_FLOOD_TYPE_MC,
626 brport_flags &
627 BR_MCAST_FLOOD);
628 if (err)
629 return err;
630
631 out:
632 memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
633 return 0;
634 }
635
636 static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
637 {
638 char sfdat_pl[MLXSW_REG_SFDAT_LEN];
639 int err;
640
641 mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
642 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
643 if (err)
644 return err;
645 mlxsw_sp->bridge->ageing_time = ageing_time;
646 return 0;
647 }
648
649 static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
650 struct switchdev_trans *trans,
651 unsigned long ageing_clock_t)
652 {
653 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
654 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
655 u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
656
657 if (switchdev_trans_ph_prepare(trans)) {
658 if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
659 ageing_time > MLXSW_SP_MAX_AGEING_TIME)
660 return -ERANGE;
661 else
662 return 0;
663 }
664
665 return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
666 }
667
668 static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
669 struct switchdev_trans *trans,
670 struct net_device *orig_dev,
671 bool vlan_enabled)
672 {
673 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
674 struct mlxsw_sp_bridge_device *bridge_device;
675
676 if (!switchdev_trans_ph_prepare(trans))
677 return 0;
678
679 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
680 if (WARN_ON(!bridge_device))
681 return -EINVAL;
682
683 if (bridge_device->vlan_enabled == vlan_enabled)
684 return 0;
685
686 netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
687 return -EINVAL;
688 }
689
690 static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
691 struct switchdev_trans *trans,
692 struct net_device *orig_dev,
693 bool is_port_mrouter)
694 {
695 struct mlxsw_sp_bridge_port *bridge_port;
696 int err;
697
698 if (switchdev_trans_ph_prepare(trans))
699 return 0;
700
701 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
702 orig_dev);
703 if (!bridge_port)
704 return 0;
705
706 if (!bridge_port->bridge_device->multicast_enabled)
707 goto out;
708
709 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
710 MLXSW_SP_FLOOD_TYPE_MC,
711 is_port_mrouter);
712 if (err)
713 return err;
714
715 mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
716 is_port_mrouter);
717 out:
718 bridge_port->mrouter = is_port_mrouter;
719 return 0;
720 }
721
722 static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
723 {
724 const struct mlxsw_sp_bridge_device *bridge_device;
725
726 bridge_device = bridge_port->bridge_device;
727 return bridge_device->multicast_enabled ? bridge_port->mrouter :
728 bridge_port->flags & BR_MCAST_FLOOD;
729 }
730
731 static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
732 struct switchdev_trans *trans,
733 struct net_device *orig_dev,
734 bool mc_disabled)
735 {
736 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
737 struct mlxsw_sp_bridge_device *bridge_device;
738 struct mlxsw_sp_bridge_port *bridge_port;
739 int err;
740
741 if (switchdev_trans_ph_prepare(trans))
742 return 0;
743
744
745
746
747 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
748 if (!bridge_device)
749 return 0;
750
751 if (bridge_device->multicast_enabled != !mc_disabled) {
752 bridge_device->multicast_enabled = !mc_disabled;
753 mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
754 bridge_device);
755 }
756
757 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
758 enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
759 bool member = mlxsw_sp_mc_flood(bridge_port);
760
761 err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
762 bridge_port,
763 packet_type, member);
764 if (err)
765 return err;
766 }
767
768 bridge_device->multicast_enabled = !mc_disabled;
769
770 return 0;
771 }
772
773 static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
774 u16 mid_idx, bool add)
775 {
776 char *smid_pl;
777 int err;
778
779 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
780 if (!smid_pl)
781 return -ENOMEM;
782
783 mlxsw_reg_smid_pack(smid_pl, mid_idx,
784 mlxsw_sp_router_port(mlxsw_sp), add);
785 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
786 kfree(smid_pl);
787 return err;
788 }
789
790 static void
791 mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
792 struct mlxsw_sp_bridge_device *bridge_device,
793 bool add)
794 {
795 struct mlxsw_sp_mid *mid;
796
797 list_for_each_entry(mid, &bridge_device->mids_list, list)
798 mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
799 }
800
801 static int
802 mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
803 struct switchdev_trans *trans,
804 struct net_device *orig_dev,
805 bool is_mrouter)
806 {
807 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
808 struct mlxsw_sp_bridge_device *bridge_device;
809
810 if (switchdev_trans_ph_prepare(trans))
811 return 0;
812
813
814
815
816 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
817 if (!bridge_device)
818 return 0;
819
820 if (bridge_device->mrouter != is_mrouter)
821 mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
822 is_mrouter);
823 bridge_device->mrouter = is_mrouter;
824 return 0;
825 }
826
827 static int mlxsw_sp_port_attr_set(struct net_device *dev,
828 const struct switchdev_attr *attr,
829 struct switchdev_trans *trans)
830 {
831 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
832 int err;
833
834 switch (attr->id) {
835 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
836 err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
837 attr->orig_dev,
838 attr->u.stp_state);
839 break;
840 case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
841 err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
842 trans,
843 attr->u.brport_flags);
844 break;
845 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
846 err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
847 attr->orig_dev,
848 attr->u.brport_flags);
849 break;
850 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
851 err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
852 attr->u.ageing_time);
853 break;
854 case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
855 err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
856 attr->orig_dev,
857 attr->u.vlan_filtering);
858 break;
859 case SWITCHDEV_ATTR_ID_PORT_MROUTER:
860 err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
861 attr->orig_dev,
862 attr->u.mrouter);
863 break;
864 case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
865 err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
866 attr->orig_dev,
867 attr->u.mc_disabled);
868 break;
869 case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
870 err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
871 attr->orig_dev,
872 attr->u.mrouter);
873 break;
874 default:
875 err = -EOPNOTSUPP;
876 break;
877 }
878
879 if (switchdev_trans_ph_commit(trans))
880 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
881
882 return err;
883 }
884
885 static int
886 mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
887 struct mlxsw_sp_bridge_port *bridge_port,
888 struct netlink_ext_ack *extack)
889 {
890 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
891 struct mlxsw_sp_bridge_device *bridge_device;
892 u8 local_port = mlxsw_sp_port->local_port;
893 u16 vid = mlxsw_sp_port_vlan->vid;
894 struct mlxsw_sp_fid *fid;
895 int err;
896
897 bridge_device = bridge_port->bridge_device;
898 fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
899 if (IS_ERR(fid))
900 return PTR_ERR(fid);
901
902 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
903 bridge_port->flags & BR_FLOOD);
904 if (err)
905 goto err_fid_uc_flood_set;
906
907 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
908 mlxsw_sp_mc_flood(bridge_port));
909 if (err)
910 goto err_fid_mc_flood_set;
911
912 err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
913 true);
914 if (err)
915 goto err_fid_bc_flood_set;
916
917 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
918 if (err)
919 goto err_fid_port_vid_map;
920
921 mlxsw_sp_port_vlan->fid = fid;
922
923 return 0;
924
925 err_fid_port_vid_map:
926 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
927 err_fid_bc_flood_set:
928 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
929 err_fid_mc_flood_set:
930 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
931 err_fid_uc_flood_set:
932 mlxsw_sp_fid_put(fid);
933 return err;
934 }
935
936 static void
937 mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
938 {
939 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
940 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
941 u8 local_port = mlxsw_sp_port->local_port;
942 u16 vid = mlxsw_sp_port_vlan->vid;
943
944 mlxsw_sp_port_vlan->fid = NULL;
945 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
946 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
947 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
948 mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
949 mlxsw_sp_fid_put(fid);
950 }
951
952 static u16
953 mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
954 u16 vid, bool is_pvid)
955 {
956 if (is_pvid)
957 return vid;
958 else if (mlxsw_sp_port->pvid == vid)
959 return 0;
960 else
961 return mlxsw_sp_port->pvid;
962 }
963
964 static int
965 mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
966 struct mlxsw_sp_bridge_port *bridge_port,
967 struct netlink_ext_ack *extack)
968 {
969 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
970 struct mlxsw_sp_bridge_vlan *bridge_vlan;
971 u16 vid = mlxsw_sp_port_vlan->vid;
972 int err;
973
974
975 if (mlxsw_sp_port_vlan->bridge_port)
976 return 0;
977
978 err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
979 extack);
980 if (err)
981 return err;
982
983 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
984 bridge_port->flags & BR_LEARNING);
985 if (err)
986 goto err_port_vid_learning_set;
987
988 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
989 bridge_port->stp_state);
990 if (err)
991 goto err_port_vid_stp_set;
992
993 bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
994 if (!bridge_vlan) {
995 err = -ENOMEM;
996 goto err_bridge_vlan_get;
997 }
998
999 list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1000 &bridge_vlan->port_vlan_list);
1001
1002 mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1003 bridge_port->dev);
1004 mlxsw_sp_port_vlan->bridge_port = bridge_port;
1005
1006 return 0;
1007
1008 err_bridge_vlan_get:
1009 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1010 err_port_vid_stp_set:
1011 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1012 err_port_vid_learning_set:
1013 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1014 return err;
1015 }
1016
1017 void
1018 mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1019 {
1020 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1021 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1022 struct mlxsw_sp_bridge_vlan *bridge_vlan;
1023 struct mlxsw_sp_bridge_port *bridge_port;
1024 u16 vid = mlxsw_sp_port_vlan->vid;
1025 bool last_port, last_vlan;
1026
1027 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1028 mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1029 return;
1030
1031 bridge_port = mlxsw_sp_port_vlan->bridge_port;
1032 last_vlan = list_is_singular(&bridge_port->vlans_list);
1033 bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1034 last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1035
1036 list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1037 mlxsw_sp_bridge_vlan_put(bridge_vlan);
1038 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1039 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1040 if (last_port)
1041 mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1042 bridge_port,
1043 mlxsw_sp_fid_index(fid));
1044 if (last_vlan)
1045 mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
1046
1047 mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1048
1049 mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1050 mlxsw_sp_port_vlan->bridge_port = NULL;
1051 }
1052
1053 static int
1054 mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1055 struct mlxsw_sp_bridge_port *bridge_port,
1056 u16 vid, bool is_untagged, bool is_pvid,
1057 struct netlink_ext_ack *extack)
1058 {
1059 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1060 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1061 u16 old_pvid = mlxsw_sp_port->pvid;
1062 int err;
1063
1064
1065
1066
1067
1068 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1069 if (mlxsw_sp_port_vlan &&
1070 mlxsw_sp_port_vlan->bridge_port != bridge_port)
1071 return -EEXIST;
1072
1073 if (!mlxsw_sp_port_vlan) {
1074 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1075 vid);
1076 if (IS_ERR(mlxsw_sp_port_vlan))
1077 return PTR_ERR(mlxsw_sp_port_vlan);
1078 }
1079
1080 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1081 is_untagged);
1082 if (err)
1083 goto err_port_vlan_set;
1084
1085 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1086 if (err)
1087 goto err_port_pvid_set;
1088
1089 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1090 extack);
1091 if (err)
1092 goto err_port_vlan_bridge_join;
1093
1094 return 0;
1095
1096 err_port_vlan_bridge_join:
1097 mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1098 err_port_pvid_set:
1099 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1100 err_port_vlan_set:
1101 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1102 return err;
1103 }
1104
1105 static int
1106 mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1107 const struct net_device *br_dev,
1108 const struct switchdev_obj_port_vlan *vlan)
1109 {
1110 struct mlxsw_sp_rif *rif;
1111 struct mlxsw_sp_fid *fid;
1112 u16 pvid;
1113 u16 vid;
1114
1115 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1116 if (!rif)
1117 return 0;
1118 fid = mlxsw_sp_rif_fid(rif);
1119 pvid = mlxsw_sp_fid_8021q_vid(fid);
1120
1121 for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1122 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1123 if (vid != pvid) {
1124 netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1125 return -EBUSY;
1126 }
1127 } else {
1128 if (vid == pvid) {
1129 netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1130 return -EBUSY;
1131 }
1132 }
1133 }
1134
1135 return 0;
1136 }
1137
1138 static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1139 const struct switchdev_obj_port_vlan *vlan,
1140 struct switchdev_trans *trans,
1141 struct netlink_ext_ack *extack)
1142 {
1143 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1144 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1145 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1146 struct net_device *orig_dev = vlan->obj.orig_dev;
1147 struct mlxsw_sp_bridge_port *bridge_port;
1148 u16 vid;
1149
1150 if (netif_is_bridge_master(orig_dev)) {
1151 int err = 0;
1152
1153 if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1154 br_vlan_enabled(orig_dev) &&
1155 switchdev_trans_ph_prepare(trans))
1156 err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1157 orig_dev, vlan);
1158 if (!err)
1159 err = -EOPNOTSUPP;
1160 return err;
1161 }
1162
1163 if (switchdev_trans_ph_commit(trans))
1164 return 0;
1165
1166 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1167 if (WARN_ON(!bridge_port))
1168 return -EINVAL;
1169
1170 if (!bridge_port->bridge_device->vlan_enabled)
1171 return 0;
1172
1173 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1174 int err;
1175
1176 err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1177 vid, flag_untagged,
1178 flag_pvid, extack);
1179 if (err)
1180 return err;
1181 }
1182
1183 return 0;
1184 }
1185
1186 static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1187 {
1188 return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1189 MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1190 }
1191
1192 static int
1193 mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1194 struct mlxsw_sp_bridge_port *bridge_port,
1195 u16 fid_index)
1196 {
1197 bool lagged = bridge_port->lagged;
1198 char sfdf_pl[MLXSW_REG_SFDF_LEN];
1199 u16 system_port;
1200
1201 system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1202 mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1203 mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1204 mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1205
1206 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1207 }
1208
1209 static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1210 {
1211 return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1212 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1213 }
1214
1215 static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1216 {
1217 return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1218 MLXSW_REG_SFD_OP_WRITE_REMOVE;
1219 }
1220
1221 static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1222 const char *mac, u16 fid,
1223 enum mlxsw_sp_l3proto proto,
1224 const union mlxsw_sp_l3addr *addr,
1225 bool adding, bool dynamic)
1226 {
1227 enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1228 char *sfd_pl;
1229 u8 num_rec;
1230 u32 uip;
1231 int err;
1232
1233 switch (proto) {
1234 case MLXSW_SP_L3_PROTO_IPV4:
1235 uip = be32_to_cpu(addr->addr4);
1236 sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1237 break;
1238 case MLXSW_SP_L3_PROTO_IPV6:
1239 default:
1240 WARN_ON(1);
1241 return -EOPNOTSUPP;
1242 }
1243
1244 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1245 if (!sfd_pl)
1246 return -ENOMEM;
1247
1248 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1249 mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1250 mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1251 MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1252 sfd_proto);
1253 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1254 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1255 if (err)
1256 goto out;
1257
1258 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1259 err = -EBUSY;
1260
1261 out:
1262 kfree(sfd_pl);
1263 return err;
1264 }
1265
1266 static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1267 const char *mac, u16 fid, bool adding,
1268 enum mlxsw_reg_sfd_rec_action action,
1269 enum mlxsw_reg_sfd_rec_policy policy)
1270 {
1271 char *sfd_pl;
1272 u8 num_rec;
1273 int err;
1274
1275 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1276 if (!sfd_pl)
1277 return -ENOMEM;
1278
1279 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1280 mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
1281 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1282 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1283 if (err)
1284 goto out;
1285
1286 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1287 err = -EBUSY;
1288
1289 out:
1290 kfree(sfd_pl);
1291 return err;
1292 }
1293
1294 static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1295 const char *mac, u16 fid, bool adding,
1296 bool dynamic)
1297 {
1298 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1299 MLXSW_REG_SFD_REC_ACTION_NOP,
1300 mlxsw_sp_sfd_rec_policy(dynamic));
1301 }
1302
1303 int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1304 bool adding)
1305 {
1306 return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1307 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1308 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1309 }
1310
1311 static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1312 const char *mac, u16 fid, u16 lag_vid,
1313 bool adding, bool dynamic)
1314 {
1315 char *sfd_pl;
1316 u8 num_rec;
1317 int err;
1318
1319 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1320 if (!sfd_pl)
1321 return -ENOMEM;
1322
1323 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1324 mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1325 mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1326 lag_vid, lag_id);
1327 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1328 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1329 if (err)
1330 goto out;
1331
1332 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1333 err = -EBUSY;
1334
1335 out:
1336 kfree(sfd_pl);
1337 return err;
1338 }
1339
1340 static int
1341 mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1342 struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1343 {
1344 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1345 struct net_device *orig_dev = fdb_info->info.dev;
1346 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1347 struct mlxsw_sp_bridge_device *bridge_device;
1348 struct mlxsw_sp_bridge_port *bridge_port;
1349 u16 fid_index, vid;
1350
1351 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1352 if (!bridge_port)
1353 return -EINVAL;
1354
1355 bridge_device = bridge_port->bridge_device;
1356 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1357 bridge_device,
1358 fdb_info->vid);
1359 if (!mlxsw_sp_port_vlan)
1360 return 0;
1361
1362 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1363 vid = mlxsw_sp_port_vlan->vid;
1364
1365 if (!bridge_port->lagged)
1366 return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1367 bridge_port->system_port,
1368 fdb_info->addr, fid_index,
1369 adding, false);
1370 else
1371 return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1372 bridge_port->lag_id,
1373 fdb_info->addr, fid_index,
1374 vid, adding, false);
1375 }
1376
1377 static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1378 u16 fid, u16 mid_idx, bool adding)
1379 {
1380 char *sfd_pl;
1381 u8 num_rec;
1382 int err;
1383
1384 sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1385 if (!sfd_pl)
1386 return -ENOMEM;
1387
1388 mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1389 mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1390 MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
1391 num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1392 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1393 if (err)
1394 goto out;
1395
1396 if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1397 err = -EBUSY;
1398
1399 out:
1400 kfree(sfd_pl);
1401 return err;
1402 }
1403
1404 static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1405 long *ports_bitmap,
1406 bool set_router_port)
1407 {
1408 char *smid_pl;
1409 int err, i;
1410
1411 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1412 if (!smid_pl)
1413 return -ENOMEM;
1414
1415 mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1416 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1417 if (mlxsw_sp->ports[i])
1418 mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1419 }
1420
1421 mlxsw_reg_smid_port_mask_set(smid_pl,
1422 mlxsw_sp_router_port(mlxsw_sp), 1);
1423
1424 for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1425 mlxsw_reg_smid_port_set(smid_pl, i, 1);
1426
1427 mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1428 set_router_port);
1429
1430 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1431 kfree(smid_pl);
1432 return err;
1433 }
1434
1435 static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1436 u16 mid_idx, bool add)
1437 {
1438 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1439 char *smid_pl;
1440 int err;
1441
1442 smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1443 if (!smid_pl)
1444 return -ENOMEM;
1445
1446 mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1447 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1448 kfree(smid_pl);
1449 return err;
1450 }
1451
1452 static struct
1453 mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1454 const unsigned char *addr,
1455 u16 fid)
1456 {
1457 struct mlxsw_sp_mid *mid;
1458
1459 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1460 if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1461 return mid;
1462 }
1463 return NULL;
1464 }
1465
1466 static void
1467 mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1468 struct mlxsw_sp_bridge_port *bridge_port,
1469 unsigned long *ports_bitmap)
1470 {
1471 struct mlxsw_sp_port *mlxsw_sp_port;
1472 u64 max_lag_members, i;
1473 int lag_id;
1474
1475 if (!bridge_port->lagged) {
1476 set_bit(bridge_port->system_port, ports_bitmap);
1477 } else {
1478 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1479 MAX_LAG_MEMBERS);
1480 lag_id = bridge_port->lag_id;
1481 for (i = 0; i < max_lag_members; i++) {
1482 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1483 lag_id, i);
1484 if (mlxsw_sp_port)
1485 set_bit(mlxsw_sp_port->local_port,
1486 ports_bitmap);
1487 }
1488 }
1489 }
1490
1491 static void
1492 mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1493 struct mlxsw_sp_bridge_device *bridge_device,
1494 struct mlxsw_sp *mlxsw_sp)
1495 {
1496 struct mlxsw_sp_bridge_port *bridge_port;
1497
1498 list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1499 if (bridge_port->mrouter) {
1500 mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1501 bridge_port,
1502 flood_bitmap);
1503 }
1504 }
1505 }
1506
1507 static bool
1508 mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1509 struct mlxsw_sp_mid *mid,
1510 struct mlxsw_sp_bridge_device *bridge_device)
1511 {
1512 long *flood_bitmap;
1513 int num_of_ports;
1514 int alloc_size;
1515 u16 mid_idx;
1516 int err;
1517
1518 mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1519 MLXSW_SP_MID_MAX);
1520 if (mid_idx == MLXSW_SP_MID_MAX)
1521 return false;
1522
1523 num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1524 alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1525 flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1526 if (!flood_bitmap)
1527 return false;
1528
1529 bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports);
1530 mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1531
1532 mid->mid = mid_idx;
1533 err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1534 bridge_device->mrouter);
1535 kfree(flood_bitmap);
1536 if (err)
1537 return false;
1538
1539 err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1540 true);
1541 if (err)
1542 return false;
1543
1544 set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1545 mid->in_hw = true;
1546 return true;
1547 }
1548
1549 static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1550 struct mlxsw_sp_mid *mid)
1551 {
1552 if (!mid->in_hw)
1553 return 0;
1554
1555 clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1556 mid->in_hw = false;
1557 return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1558 false);
1559 }
1560
1561 static struct
1562 mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1563 struct mlxsw_sp_bridge_device *bridge_device,
1564 const unsigned char *addr,
1565 u16 fid)
1566 {
1567 struct mlxsw_sp_mid *mid;
1568 size_t alloc_size;
1569
1570 mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1571 if (!mid)
1572 return NULL;
1573
1574 alloc_size = sizeof(unsigned long) *
1575 BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
1576
1577 mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1578 if (!mid->ports_in_mid)
1579 goto err_ports_in_mid_alloc;
1580
1581 ether_addr_copy(mid->addr, addr);
1582 mid->fid = fid;
1583 mid->in_hw = false;
1584
1585 if (!bridge_device->multicast_enabled)
1586 goto out;
1587
1588 if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1589 goto err_write_mdb_entry;
1590
1591 out:
1592 list_add_tail(&mid->list, &bridge_device->mids_list);
1593 return mid;
1594
1595 err_write_mdb_entry:
1596 kfree(mid->ports_in_mid);
1597 err_ports_in_mid_alloc:
1598 kfree(mid);
1599 return NULL;
1600 }
1601
1602 static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1603 struct mlxsw_sp_mid *mid)
1604 {
1605 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1606 int err = 0;
1607
1608 clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1609 if (bitmap_empty(mid->ports_in_mid,
1610 mlxsw_core_max_ports(mlxsw_sp->core))) {
1611 err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1612 list_del(&mid->list);
1613 kfree(mid->ports_in_mid);
1614 kfree(mid);
1615 }
1616 return err;
1617 }
1618
1619 static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1620 const struct switchdev_obj_port_mdb *mdb,
1621 struct switchdev_trans *trans)
1622 {
1623 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1624 struct net_device *orig_dev = mdb->obj.orig_dev;
1625 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1626 struct net_device *dev = mlxsw_sp_port->dev;
1627 struct mlxsw_sp_bridge_device *bridge_device;
1628 struct mlxsw_sp_bridge_port *bridge_port;
1629 struct mlxsw_sp_mid *mid;
1630 u16 fid_index;
1631 int err = 0;
1632
1633 if (switchdev_trans_ph_commit(trans))
1634 return 0;
1635
1636 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1637 if (!bridge_port)
1638 return 0;
1639
1640 bridge_device = bridge_port->bridge_device;
1641 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1642 bridge_device,
1643 mdb->vid);
1644 if (!mlxsw_sp_port_vlan)
1645 return 0;
1646
1647 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1648
1649 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1650 if (!mid) {
1651 mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1652 fid_index);
1653 if (!mid) {
1654 netdev_err(dev, "Unable to allocate MC group\n");
1655 return -ENOMEM;
1656 }
1657 }
1658 set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1659
1660 if (!bridge_device->multicast_enabled)
1661 return 0;
1662
1663 if (bridge_port->mrouter)
1664 return 0;
1665
1666 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1667 if (err) {
1668 netdev_err(dev, "Unable to set SMID\n");
1669 goto err_out;
1670 }
1671
1672 return 0;
1673
1674 err_out:
1675 mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1676 return err;
1677 }
1678
1679 static void
1680 mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1681 struct mlxsw_sp_bridge_device
1682 *bridge_device)
1683 {
1684 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1685 struct mlxsw_sp_mid *mid;
1686 bool mc_enabled;
1687
1688 mc_enabled = bridge_device->multicast_enabled;
1689
1690 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1691 if (mc_enabled)
1692 mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1693 bridge_device);
1694 else
1695 mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1696 }
1697 }
1698
1699 static void
1700 mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1701 struct mlxsw_sp_bridge_port *bridge_port,
1702 bool add)
1703 {
1704 struct mlxsw_sp_bridge_device *bridge_device;
1705 struct mlxsw_sp_mid *mid;
1706
1707 bridge_device = bridge_port->bridge_device;
1708
1709 list_for_each_entry(mid, &bridge_device->mids_list, list) {
1710 if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1711 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
1712 }
1713 }
1714
1715 struct mlxsw_sp_span_respin_work {
1716 struct work_struct work;
1717 struct mlxsw_sp *mlxsw_sp;
1718 };
1719
1720 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1721 {
1722 struct mlxsw_sp_span_respin_work *respin_work =
1723 container_of(work, struct mlxsw_sp_span_respin_work, work);
1724
1725 rtnl_lock();
1726 mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1727 rtnl_unlock();
1728 kfree(respin_work);
1729 }
1730
1731 static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1732 {
1733 struct mlxsw_sp_span_respin_work *respin_work;
1734
1735 respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1736 if (!respin_work)
1737 return;
1738
1739 INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1740 respin_work->mlxsw_sp = mlxsw_sp;
1741
1742 mlxsw_core_schedule_work(&respin_work->work);
1743 }
1744
1745 static int mlxsw_sp_port_obj_add(struct net_device *dev,
1746 const struct switchdev_obj *obj,
1747 struct switchdev_trans *trans,
1748 struct netlink_ext_ack *extack)
1749 {
1750 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1751 const struct switchdev_obj_port_vlan *vlan;
1752 int err = 0;
1753
1754 switch (obj->id) {
1755 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1756 vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1757 err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
1758 extack);
1759
1760 if (switchdev_trans_ph_prepare(trans)) {
1761
1762
1763
1764
1765
1766 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1767 }
1768 break;
1769 case SWITCHDEV_OBJ_ID_PORT_MDB:
1770 err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1771 SWITCHDEV_OBJ_PORT_MDB(obj),
1772 trans);
1773 break;
1774 default:
1775 err = -EOPNOTSUPP;
1776 break;
1777 }
1778
1779 return err;
1780 }
1781
1782 static void
1783 mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1784 struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1785 {
1786 u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1787 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1788
1789 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1790 if (WARN_ON(!mlxsw_sp_port_vlan))
1791 return;
1792
1793 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1794 mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
1795 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1796 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1797 }
1798
1799 static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1800 const struct switchdev_obj_port_vlan *vlan)
1801 {
1802 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1803 struct net_device *orig_dev = vlan->obj.orig_dev;
1804 struct mlxsw_sp_bridge_port *bridge_port;
1805 u16 vid;
1806
1807 if (netif_is_bridge_master(orig_dev))
1808 return -EOPNOTSUPP;
1809
1810 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1811 if (WARN_ON(!bridge_port))
1812 return -EINVAL;
1813
1814 if (!bridge_port->bridge_device->vlan_enabled)
1815 return 0;
1816
1817 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1818 mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1819
1820 return 0;
1821 }
1822
1823 static int
1824 __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1825 struct mlxsw_sp_bridge_port *bridge_port,
1826 struct mlxsw_sp_mid *mid)
1827 {
1828 struct net_device *dev = mlxsw_sp_port->dev;
1829 int err;
1830
1831 if (bridge_port->bridge_device->multicast_enabled &&
1832 !bridge_port->mrouter) {
1833 err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1834 if (err)
1835 netdev_err(dev, "Unable to remove port from SMID\n");
1836 }
1837
1838 err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1839 if (err)
1840 netdev_err(dev, "Unable to remove MC SFD\n");
1841
1842 return err;
1843 }
1844
1845 static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1846 const struct switchdev_obj_port_mdb *mdb)
1847 {
1848 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1849 struct net_device *orig_dev = mdb->obj.orig_dev;
1850 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1851 struct mlxsw_sp_bridge_device *bridge_device;
1852 struct net_device *dev = mlxsw_sp_port->dev;
1853 struct mlxsw_sp_bridge_port *bridge_port;
1854 struct mlxsw_sp_mid *mid;
1855 u16 fid_index;
1856
1857 bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1858 if (!bridge_port)
1859 return 0;
1860
1861 bridge_device = bridge_port->bridge_device;
1862 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1863 bridge_device,
1864 mdb->vid);
1865 if (!mlxsw_sp_port_vlan)
1866 return 0;
1867
1868 fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1869
1870 mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1871 if (!mid) {
1872 netdev_err(dev, "Unable to remove port from MC DB\n");
1873 return -EINVAL;
1874 }
1875
1876 return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
1877 }
1878
1879 static void
1880 mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1881 struct mlxsw_sp_bridge_port *bridge_port)
1882 {
1883 struct mlxsw_sp_bridge_device *bridge_device;
1884 struct mlxsw_sp_mid *mid, *tmp;
1885
1886 bridge_device = bridge_port->bridge_device;
1887
1888 list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1889 if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1890 __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1891 mid);
1892 } else if (bridge_device->multicast_enabled &&
1893 bridge_port->mrouter) {
1894 mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1895 }
1896 }
1897 }
1898
1899 static int mlxsw_sp_port_obj_del(struct net_device *dev,
1900 const struct switchdev_obj *obj)
1901 {
1902 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1903 int err = 0;
1904
1905 switch (obj->id) {
1906 case SWITCHDEV_OBJ_ID_PORT_VLAN:
1907 err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1908 SWITCHDEV_OBJ_PORT_VLAN(obj));
1909 break;
1910 case SWITCHDEV_OBJ_ID_PORT_MDB:
1911 err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1912 SWITCHDEV_OBJ_PORT_MDB(obj));
1913 break;
1914 default:
1915 err = -EOPNOTSUPP;
1916 break;
1917 }
1918
1919 mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1920
1921 return err;
1922 }
1923
1924 static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1925 u16 lag_id)
1926 {
1927 struct mlxsw_sp_port *mlxsw_sp_port;
1928 u64 max_lag_members;
1929 int i;
1930
1931 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1932 MAX_LAG_MEMBERS);
1933 for (i = 0; i < max_lag_members; i++) {
1934 mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1935 if (mlxsw_sp_port)
1936 return mlxsw_sp_port;
1937 }
1938 return NULL;
1939 }
1940
1941 static int
1942 mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1943 struct mlxsw_sp_bridge_port *bridge_port,
1944 struct mlxsw_sp_port *mlxsw_sp_port,
1945 struct netlink_ext_ack *extack)
1946 {
1947 if (is_vlan_dev(bridge_port->dev)) {
1948 NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1949 return -EINVAL;
1950 }
1951
1952
1953 if (mlxsw_sp_port->default_vlan->fid)
1954 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1955
1956 return 0;
1957 }
1958
1959 static void
1960 mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1961 struct mlxsw_sp_bridge_port *bridge_port,
1962 struct mlxsw_sp_port *mlxsw_sp_port)
1963 {
1964
1965 mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1966 }
1967
1968 static int
1969 mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
1970 const struct net_device *vxlan_dev, u16 vid,
1971 struct netlink_ext_ack *extack)
1972 {
1973 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1974 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
1975 struct mlxsw_sp_nve_params params = {
1976 .type = MLXSW_SP_NVE_TYPE_VXLAN,
1977 .vni = vxlan->cfg.vni,
1978 .dev = vxlan_dev,
1979 };
1980 struct mlxsw_sp_fid *fid;
1981 int err;
1982
1983
1984
1985
1986
1987 if (!vid) {
1988 err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
1989 if (err || !vid)
1990 return err;
1991 }
1992
1993
1994
1995
1996 fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
1997 if (!fid)
1998 return 0;
1999
2000 if (mlxsw_sp_fid_vni_is_set(fid)) {
2001 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2002 err = -EINVAL;
2003 goto err_vni_exists;
2004 }
2005
2006 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2007 if (err)
2008 goto err_nve_fid_enable;
2009
2010
2011
2012
2013 mlxsw_sp_fid_put(fid);
2014
2015 return 0;
2016
2017 err_nve_fid_enable:
2018 err_vni_exists:
2019 mlxsw_sp_fid_put(fid);
2020 return err;
2021 }
2022
2023 static struct net_device *
2024 mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2025 {
2026 struct net_device *dev;
2027 struct list_head *iter;
2028
2029 netdev_for_each_lower_dev(br_dev, dev, iter) {
2030 u16 pvid;
2031 int err;
2032
2033 if (!netif_is_vxlan(dev))
2034 continue;
2035
2036 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2037 if (err || pvid != vid)
2038 continue;
2039
2040 return dev;
2041 }
2042
2043 return NULL;
2044 }
2045
2046 static struct mlxsw_sp_fid *
2047 mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2048 u16 vid, struct netlink_ext_ack *extack)
2049 {
2050 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2051 struct net_device *vxlan_dev;
2052 struct mlxsw_sp_fid *fid;
2053 int err;
2054
2055 fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2056 if (IS_ERR(fid))
2057 return fid;
2058
2059 if (mlxsw_sp_fid_vni_is_set(fid))
2060 return fid;
2061
2062
2063
2064
2065 vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
2066 vid);
2067 if (!vxlan_dev)
2068 return fid;
2069
2070 if (!netif_running(vxlan_dev))
2071 return fid;
2072
2073 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
2074 extack);
2075 if (err)
2076 goto err_vxlan_join;
2077
2078 return fid;
2079
2080 err_vxlan_join:
2081 mlxsw_sp_fid_put(fid);
2082 return ERR_PTR(err);
2083 }
2084
2085 static struct mlxsw_sp_fid *
2086 mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2087 u16 vid)
2088 {
2089 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2090
2091 return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2092 }
2093
2094 static u16
2095 mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2096 const struct mlxsw_sp_fid *fid)
2097 {
2098 return mlxsw_sp_fid_8021q_vid(fid);
2099 }
2100
2101 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2102 .port_join = mlxsw_sp_bridge_8021q_port_join,
2103 .port_leave = mlxsw_sp_bridge_8021q_port_leave,
2104 .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join,
2105 .fid_get = mlxsw_sp_bridge_8021q_fid_get,
2106 .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup,
2107 .fid_vid = mlxsw_sp_bridge_8021q_fid_vid,
2108 };
2109
2110 static bool
2111 mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2112 const struct net_device *br_dev)
2113 {
2114 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2115
2116 list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2117 list) {
2118 if (mlxsw_sp_port_vlan->bridge_port &&
2119 mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2120 br_dev)
2121 return true;
2122 }
2123
2124 return false;
2125 }
2126
2127 static int
2128 mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2129 struct mlxsw_sp_bridge_port *bridge_port,
2130 struct mlxsw_sp_port *mlxsw_sp_port,
2131 struct netlink_ext_ack *extack)
2132 {
2133 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2134 struct net_device *dev = bridge_port->dev;
2135 u16 vid;
2136
2137 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2138 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2139 if (WARN_ON(!mlxsw_sp_port_vlan))
2140 return -EINVAL;
2141
2142 if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2143 NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2144 return -EINVAL;
2145 }
2146
2147
2148 if (mlxsw_sp_port_vlan->fid)
2149 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2150
2151 return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2152 extack);
2153 }
2154
2155 static void
2156 mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2157 struct mlxsw_sp_bridge_port *bridge_port,
2158 struct mlxsw_sp_port *mlxsw_sp_port)
2159 {
2160 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2161 struct net_device *dev = bridge_port->dev;
2162 u16 vid;
2163
2164 vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2165 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2166 if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2167 return;
2168
2169 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2170 }
2171
2172 static int
2173 mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2174 const struct net_device *vxlan_dev, u16 vid,
2175 struct netlink_ext_ack *extack)
2176 {
2177 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2178 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2179 struct mlxsw_sp_nve_params params = {
2180 .type = MLXSW_SP_NVE_TYPE_VXLAN,
2181 .vni = vxlan->cfg.vni,
2182 .dev = vxlan_dev,
2183 };
2184 struct mlxsw_sp_fid *fid;
2185 int err;
2186
2187 fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2188 if (!fid) {
2189 NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
2190 return -EINVAL;
2191 }
2192
2193 if (mlxsw_sp_fid_vni_is_set(fid)) {
2194 NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2195 err = -EINVAL;
2196 goto err_vni_exists;
2197 }
2198
2199 err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack);
2200 if (err)
2201 goto err_nve_fid_enable;
2202
2203
2204
2205
2206 mlxsw_sp_fid_put(fid);
2207
2208 return 0;
2209
2210 err_nve_fid_enable:
2211 err_vni_exists:
2212 mlxsw_sp_fid_put(fid);
2213 return err;
2214 }
2215
2216 static struct mlxsw_sp_fid *
2217 mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2218 u16 vid, struct netlink_ext_ack *extack)
2219 {
2220 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2221 struct net_device *vxlan_dev;
2222 struct mlxsw_sp_fid *fid;
2223 int err;
2224
2225 fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2226 if (IS_ERR(fid))
2227 return fid;
2228
2229 if (mlxsw_sp_fid_vni_is_set(fid))
2230 return fid;
2231
2232 vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2233 if (!vxlan_dev)
2234 return fid;
2235
2236 if (!netif_running(vxlan_dev))
2237 return fid;
2238
2239 err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
2240 extack);
2241 if (err)
2242 goto err_vxlan_join;
2243
2244 return fid;
2245
2246 err_vxlan_join:
2247 mlxsw_sp_fid_put(fid);
2248 return ERR_PTR(err);
2249 }
2250
2251 static struct mlxsw_sp_fid *
2252 mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2253 u16 vid)
2254 {
2255 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2256
2257
2258 if (vid)
2259 return NULL;
2260
2261 return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2262 }
2263
2264 static u16
2265 mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2266 const struct mlxsw_sp_fid *fid)
2267 {
2268 return 0;
2269 }
2270
2271 static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2272 .port_join = mlxsw_sp_bridge_8021d_port_join,
2273 .port_leave = mlxsw_sp_bridge_8021d_port_leave,
2274 .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join,
2275 .fid_get = mlxsw_sp_bridge_8021d_fid_get,
2276 .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup,
2277 .fid_vid = mlxsw_sp_bridge_8021d_fid_vid,
2278 };
2279
2280 int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2281 struct net_device *brport_dev,
2282 struct net_device *br_dev,
2283 struct netlink_ext_ack *extack)
2284 {
2285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2286 struct mlxsw_sp_bridge_device *bridge_device;
2287 struct mlxsw_sp_bridge_port *bridge_port;
2288 int err;
2289
2290 bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
2291 if (IS_ERR(bridge_port))
2292 return PTR_ERR(bridge_port);
2293 bridge_device = bridge_port->bridge_device;
2294
2295 err = bridge_device->ops->port_join(bridge_device, bridge_port,
2296 mlxsw_sp_port, extack);
2297 if (err)
2298 goto err_port_join;
2299
2300 return 0;
2301
2302 err_port_join:
2303 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2304 return err;
2305 }
2306
2307 void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2308 struct net_device *brport_dev,
2309 struct net_device *br_dev)
2310 {
2311 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2312 struct mlxsw_sp_bridge_device *bridge_device;
2313 struct mlxsw_sp_bridge_port *bridge_port;
2314
2315 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2316 if (!bridge_device)
2317 return;
2318 bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2319 if (!bridge_port)
2320 return;
2321
2322 bridge_device->ops->port_leave(bridge_device, bridge_port,
2323 mlxsw_sp_port);
2324 mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2325 }
2326
2327 int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2328 const struct net_device *br_dev,
2329 const struct net_device *vxlan_dev, u16 vid,
2330 struct netlink_ext_ack *extack)
2331 {
2332 struct mlxsw_sp_bridge_device *bridge_device;
2333
2334 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2335 if (WARN_ON(!bridge_device))
2336 return -EINVAL;
2337
2338 return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2339 extack);
2340 }
2341
2342 void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2343 const struct net_device *vxlan_dev)
2344 {
2345 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2346 struct mlxsw_sp_fid *fid;
2347
2348
2349 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2350 if (!fid)
2351 return;
2352
2353 mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2354 mlxsw_sp_fid_put(fid);
2355 }
2356
2357 struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2358 const struct net_device *br_dev,
2359 u16 vid,
2360 struct netlink_ext_ack *extack)
2361 {
2362 struct mlxsw_sp_bridge_device *bridge_device;
2363
2364 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2365 if (WARN_ON(!bridge_device))
2366 return ERR_PTR(-EINVAL);
2367
2368 return bridge_device->ops->fid_get(bridge_device, vid, extack);
2369 }
2370
2371 static void
2372 mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2373 enum mlxsw_sp_l3proto *proto,
2374 union mlxsw_sp_l3addr *addr)
2375 {
2376 if (vxlan_addr->sa.sa_family == AF_INET) {
2377 addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2378 *proto = MLXSW_SP_L3_PROTO_IPV4;
2379 } else {
2380 addr->addr6 = vxlan_addr->sin6.sin6_addr;
2381 *proto = MLXSW_SP_L3_PROTO_IPV6;
2382 }
2383 }
2384
2385 static void
2386 mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2387 const union mlxsw_sp_l3addr *addr,
2388 union vxlan_addr *vxlan_addr)
2389 {
2390 switch (proto) {
2391 case MLXSW_SP_L3_PROTO_IPV4:
2392 vxlan_addr->sa.sa_family = AF_INET;
2393 vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2394 break;
2395 case MLXSW_SP_L3_PROTO_IPV6:
2396 vxlan_addr->sa.sa_family = AF_INET6;
2397 vxlan_addr->sin6.sin6_addr = addr->addr6;
2398 break;
2399 }
2400 }
2401
2402 static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2403 const char *mac,
2404 enum mlxsw_sp_l3proto proto,
2405 union mlxsw_sp_l3addr *addr,
2406 __be32 vni, bool adding)
2407 {
2408 struct switchdev_notifier_vxlan_fdb_info info;
2409 struct vxlan_dev *vxlan = netdev_priv(dev);
2410 enum switchdev_notifier_type type;
2411
2412 type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2413 SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2414 mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2415 info.remote_port = vxlan->cfg.dst_port;
2416 info.remote_vni = vni;
2417 info.remote_ifindex = 0;
2418 ether_addr_copy(info.eth_addr, mac);
2419 info.vni = vni;
2420 info.offloaded = adding;
2421 call_switchdev_notifiers(type, dev, &info.info, NULL);
2422 }
2423
2424 static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2425 const char *mac,
2426 enum mlxsw_sp_l3proto proto,
2427 union mlxsw_sp_l3addr *addr,
2428 __be32 vni,
2429 bool adding)
2430 {
2431 if (netif_is_vxlan(dev))
2432 mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2433 adding);
2434 }
2435
2436 static void
2437 mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2438 const char *mac, u16 vid,
2439 struct net_device *dev, bool offloaded)
2440 {
2441 struct switchdev_notifier_fdb_info info;
2442
2443 info.addr = mac;
2444 info.vid = vid;
2445 info.offloaded = offloaded;
2446 call_switchdev_notifiers(type, dev, &info.info, NULL);
2447 }
2448
2449 static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2450 char *sfn_pl, int rec_index,
2451 bool adding)
2452 {
2453 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2454 struct mlxsw_sp_bridge_device *bridge_device;
2455 struct mlxsw_sp_bridge_port *bridge_port;
2456 struct mlxsw_sp_port *mlxsw_sp_port;
2457 enum switchdev_notifier_type type;
2458 char mac[ETH_ALEN];
2459 u8 local_port;
2460 u16 vid, fid;
2461 bool do_notification = true;
2462 int err;
2463
2464 mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
2465 mlxsw_sp_port = mlxsw_sp->ports[local_port];
2466 if (!mlxsw_sp_port) {
2467 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2468 goto just_remove;
2469 }
2470
2471 if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2472 goto just_remove;
2473
2474 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2475 if (!mlxsw_sp_port_vlan) {
2476 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2477 goto just_remove;
2478 }
2479
2480 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2481 if (!bridge_port) {
2482 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2483 goto just_remove;
2484 }
2485
2486 bridge_device = bridge_port->bridge_device;
2487 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2488
2489 do_fdb_op:
2490 err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2491 adding, true);
2492 if (err) {
2493 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2494 return;
2495 }
2496
2497 if (!do_notification)
2498 return;
2499 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2500 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2501
2502 return;
2503
2504 just_remove:
2505 adding = false;
2506 do_notification = false;
2507 goto do_fdb_op;
2508 }
2509
2510 static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2511 char *sfn_pl, int rec_index,
2512 bool adding)
2513 {
2514 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2515 struct mlxsw_sp_bridge_device *bridge_device;
2516 struct mlxsw_sp_bridge_port *bridge_port;
2517 struct mlxsw_sp_port *mlxsw_sp_port;
2518 enum switchdev_notifier_type type;
2519 char mac[ETH_ALEN];
2520 u16 lag_vid = 0;
2521 u16 lag_id;
2522 u16 vid, fid;
2523 bool do_notification = true;
2524 int err;
2525
2526 mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2527 mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2528 if (!mlxsw_sp_port) {
2529 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2530 goto just_remove;
2531 }
2532
2533 if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2534 goto just_remove;
2535
2536 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2537 if (!mlxsw_sp_port_vlan) {
2538 netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2539 goto just_remove;
2540 }
2541
2542 bridge_port = mlxsw_sp_port_vlan->bridge_port;
2543 if (!bridge_port) {
2544 netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2545 goto just_remove;
2546 }
2547
2548 bridge_device = bridge_port->bridge_device;
2549 vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2550 lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2551 mlxsw_sp_port_vlan->vid : 0;
2552
2553 do_fdb_op:
2554 err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2555 adding, true);
2556 if (err) {
2557 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2558 return;
2559 }
2560
2561 if (!do_notification)
2562 return;
2563 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2564 mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
2565
2566 return;
2567
2568 just_remove:
2569 adding = false;
2570 do_notification = false;
2571 goto do_fdb_op;
2572 }
2573
2574 static int
2575 __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2576 const struct mlxsw_sp_fid *fid,
2577 bool adding,
2578 struct net_device **nve_dev,
2579 u16 *p_vid, __be32 *p_vni)
2580 {
2581 struct mlxsw_sp_bridge_device *bridge_device;
2582 struct net_device *br_dev, *dev;
2583 int nve_ifindex;
2584 int err;
2585
2586 err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2587 if (err)
2588 return err;
2589
2590 err = mlxsw_sp_fid_vni(fid, p_vni);
2591 if (err)
2592 return err;
2593
2594 dev = __dev_get_by_index(&init_net, nve_ifindex);
2595 if (!dev)
2596 return -EINVAL;
2597 *nve_dev = dev;
2598
2599 if (!netif_running(dev))
2600 return -EINVAL;
2601
2602 if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2603 return -EINVAL;
2604
2605 if (adding && netif_is_vxlan(dev)) {
2606 struct vxlan_dev *vxlan = netdev_priv(dev);
2607
2608 if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2609 return -EINVAL;
2610 }
2611
2612 br_dev = netdev_master_upper_dev_get(dev);
2613 if (!br_dev)
2614 return -EINVAL;
2615
2616 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2617 if (!bridge_device)
2618 return -EINVAL;
2619
2620 *p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2621
2622 return 0;
2623 }
2624
2625 static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2626 char *sfn_pl,
2627 int rec_index,
2628 bool adding)
2629 {
2630 enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2631 enum switchdev_notifier_type type;
2632 struct net_device *nve_dev;
2633 union mlxsw_sp_l3addr addr;
2634 struct mlxsw_sp_fid *fid;
2635 char mac[ETH_ALEN];
2636 u16 fid_index, vid;
2637 __be32 vni;
2638 u32 uip;
2639 int err;
2640
2641 mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2642 &uip, &sfn_proto);
2643
2644 fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2645 if (!fid)
2646 goto err_fid_lookup;
2647
2648 err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2649 (enum mlxsw_sp_l3proto) sfn_proto,
2650 &addr);
2651 if (err)
2652 goto err_ip_resolve;
2653
2654 err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2655 &nve_dev, &vid, &vni);
2656 if (err)
2657 goto err_fdb_process;
2658
2659 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2660 (enum mlxsw_sp_l3proto) sfn_proto,
2661 &addr, adding, true);
2662 if (err)
2663 goto err_fdb_op;
2664
2665 mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2666 (enum mlxsw_sp_l3proto) sfn_proto,
2667 &addr, vni, adding);
2668
2669 type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2670 SWITCHDEV_FDB_DEL_TO_BRIDGE;
2671 mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2672
2673 mlxsw_sp_fid_put(fid);
2674
2675 return;
2676
2677 err_fdb_op:
2678 err_fdb_process:
2679 err_ip_resolve:
2680 mlxsw_sp_fid_put(fid);
2681 err_fid_lookup:
2682
2683
2684
2685 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2686 (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2687 false, true);
2688 }
2689
2690 static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2691 char *sfn_pl, int rec_index)
2692 {
2693 switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2694 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2695 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2696 rec_index, true);
2697 break;
2698 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2699 mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2700 rec_index, false);
2701 break;
2702 case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2703 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2704 rec_index, true);
2705 break;
2706 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2707 mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2708 rec_index, false);
2709 break;
2710 case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2711 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2712 rec_index, true);
2713 break;
2714 case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2715 mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2716 rec_index, false);
2717 break;
2718 }
2719 }
2720
2721 static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2722 {
2723 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2724
2725 mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2726 msecs_to_jiffies(bridge->fdb_notify.interval));
2727 }
2728
2729 static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2730 {
2731 struct mlxsw_sp_bridge *bridge;
2732 struct mlxsw_sp *mlxsw_sp;
2733 char *sfn_pl;
2734 u8 num_rec;
2735 int i;
2736 int err;
2737
2738 sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2739 if (!sfn_pl)
2740 return;
2741
2742 bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2743 mlxsw_sp = bridge->mlxsw_sp;
2744
2745 rtnl_lock();
2746 mlxsw_reg_sfn_pack(sfn_pl);
2747 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2748 if (err) {
2749 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2750 goto out;
2751 }
2752 num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2753 for (i = 0; i < num_rec; i++)
2754 mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2755
2756 out:
2757 rtnl_unlock();
2758 kfree(sfn_pl);
2759 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
2760 }
2761
2762 struct mlxsw_sp_switchdev_event_work {
2763 struct work_struct work;
2764 union {
2765 struct switchdev_notifier_fdb_info fdb_info;
2766 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2767 };
2768 struct net_device *dev;
2769 unsigned long event;
2770 };
2771
2772 static void
2773 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2774 struct mlxsw_sp_switchdev_event_work *
2775 switchdev_work,
2776 struct mlxsw_sp_fid *fid, __be32 vni)
2777 {
2778 struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2779 struct switchdev_notifier_fdb_info *fdb_info;
2780 struct net_device *dev = switchdev_work->dev;
2781 enum mlxsw_sp_l3proto proto;
2782 union mlxsw_sp_l3addr addr;
2783 int err;
2784
2785 fdb_info = &switchdev_work->fdb_info;
2786 err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2787 if (err)
2788 return;
2789
2790 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2791 &proto, &addr);
2792
2793 switch (switchdev_work->event) {
2794 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2795 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2796 vxlan_fdb_info.eth_addr,
2797 mlxsw_sp_fid_index(fid),
2798 proto, &addr, true, false);
2799 if (err)
2800 return;
2801 vxlan_fdb_info.offloaded = true;
2802 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2803 &vxlan_fdb_info.info, NULL);
2804 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2805 vxlan_fdb_info.eth_addr,
2806 fdb_info->vid, dev, true);
2807 break;
2808 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2809 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2810 vxlan_fdb_info.eth_addr,
2811 mlxsw_sp_fid_index(fid),
2812 proto, &addr, false,
2813 false);
2814 vxlan_fdb_info.offloaded = false;
2815 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2816 &vxlan_fdb_info.info, NULL);
2817 break;
2818 }
2819 }
2820
2821 static void
2822 mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2823 switchdev_work)
2824 {
2825 struct mlxsw_sp_bridge_device *bridge_device;
2826 struct net_device *dev = switchdev_work->dev;
2827 struct net_device *br_dev;
2828 struct mlxsw_sp *mlxsw_sp;
2829 struct mlxsw_sp_fid *fid;
2830 __be32 vni;
2831 int err;
2832
2833 if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2834 switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2835 return;
2836
2837 if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2838 !switchdev_work->fdb_info.added_by_user)
2839 return;
2840
2841 if (!netif_running(dev))
2842 return;
2843 br_dev = netdev_master_upper_dev_get(dev);
2844 if (!br_dev)
2845 return;
2846 if (!netif_is_bridge_master(br_dev))
2847 return;
2848 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2849 if (!mlxsw_sp)
2850 return;
2851 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2852 if (!bridge_device)
2853 return;
2854
2855 fid = bridge_device->ops->fid_lookup(bridge_device,
2856 switchdev_work->fdb_info.vid);
2857 if (!fid)
2858 return;
2859
2860 err = mlxsw_sp_fid_vni(fid, &vni);
2861 if (err)
2862 goto out;
2863
2864 mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2865 vni);
2866
2867 out:
2868 mlxsw_sp_fid_put(fid);
2869 }
2870
2871 static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2872 {
2873 struct mlxsw_sp_switchdev_event_work *switchdev_work =
2874 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2875 struct net_device *dev = switchdev_work->dev;
2876 struct switchdev_notifier_fdb_info *fdb_info;
2877 struct mlxsw_sp_port *mlxsw_sp_port;
2878 int err;
2879
2880 rtnl_lock();
2881 if (netif_is_vxlan(dev)) {
2882 mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2883 goto out;
2884 }
2885
2886 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2887 if (!mlxsw_sp_port)
2888 goto out;
2889
2890 switch (switchdev_work->event) {
2891 case SWITCHDEV_FDB_ADD_TO_DEVICE:
2892 fdb_info = &switchdev_work->fdb_info;
2893 if (!fdb_info->added_by_user)
2894 break;
2895 err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2896 if (err)
2897 break;
2898 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2899 fdb_info->addr,
2900 fdb_info->vid, dev, true);
2901 break;
2902 case SWITCHDEV_FDB_DEL_TO_DEVICE:
2903 fdb_info = &switchdev_work->fdb_info;
2904 mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2905 break;
2906 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
2907 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2908
2909
2910
2911 break;
2912 }
2913
2914 mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2915
2916 out:
2917 rtnl_unlock();
2918 kfree(switchdev_work->fdb_info.addr);
2919 kfree(switchdev_work);
2920 dev_put(dev);
2921 }
2922
2923 static void
2924 mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2925 struct mlxsw_sp_switchdev_event_work *
2926 switchdev_work)
2927 {
2928 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2929 struct mlxsw_sp_bridge_device *bridge_device;
2930 struct net_device *dev = switchdev_work->dev;
2931 u8 all_zeros_mac[ETH_ALEN] = { 0 };
2932 enum mlxsw_sp_l3proto proto;
2933 union mlxsw_sp_l3addr addr;
2934 struct net_device *br_dev;
2935 struct mlxsw_sp_fid *fid;
2936 u16 vid;
2937 int err;
2938
2939 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2940 br_dev = netdev_master_upper_dev_get(dev);
2941
2942 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2943 if (!bridge_device)
2944 return;
2945
2946 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2947 if (!fid)
2948 return;
2949
2950 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2951 &proto, &addr);
2952
2953 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2954 err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2955 if (err) {
2956 mlxsw_sp_fid_put(fid);
2957 return;
2958 }
2959 vxlan_fdb_info->offloaded = true;
2960 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2961 &vxlan_fdb_info->info, NULL);
2962 mlxsw_sp_fid_put(fid);
2963 return;
2964 }
2965
2966
2967
2968
2969
2970
2971 vid = bridge_device->ops->fid_vid(bridge_device, fid);
2972 if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2973 goto err_br_fdb_find;
2974
2975 err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2976 mlxsw_sp_fid_index(fid), proto,
2977 &addr, true, false);
2978 if (err)
2979 goto err_fdb_tunnel_uc_op;
2980 vxlan_fdb_info->offloaded = true;
2981 call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2982 &vxlan_fdb_info->info, NULL);
2983 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2984 vxlan_fdb_info->eth_addr, vid, dev, true);
2985
2986 mlxsw_sp_fid_put(fid);
2987
2988 return;
2989
2990 err_fdb_tunnel_uc_op:
2991 err_br_fdb_find:
2992 mlxsw_sp_fid_put(fid);
2993 }
2994
2995 static void
2996 mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2997 struct mlxsw_sp_switchdev_event_work *
2998 switchdev_work)
2999 {
3000 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3001 struct mlxsw_sp_bridge_device *bridge_device;
3002 struct net_device *dev = switchdev_work->dev;
3003 struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3004 u8 all_zeros_mac[ETH_ALEN] = { 0 };
3005 enum mlxsw_sp_l3proto proto;
3006 union mlxsw_sp_l3addr addr;
3007 struct mlxsw_sp_fid *fid;
3008 u16 vid;
3009
3010 vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3011
3012 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3013 if (!bridge_device)
3014 return;
3015
3016 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3017 if (!fid)
3018 return;
3019
3020 mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3021 &proto, &addr);
3022
3023 if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3024 mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3025 mlxsw_sp_fid_put(fid);
3026 return;
3027 }
3028
3029 mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3030 mlxsw_sp_fid_index(fid), proto, &addr,
3031 false, false);
3032 vid = bridge_device->ops->fid_vid(bridge_device, fid);
3033 mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3034 vxlan_fdb_info->eth_addr, vid, dev, false);
3035
3036 mlxsw_sp_fid_put(fid);
3037 }
3038
3039 static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3040 {
3041 struct mlxsw_sp_switchdev_event_work *switchdev_work =
3042 container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3043 struct net_device *dev = switchdev_work->dev;
3044 struct mlxsw_sp *mlxsw_sp;
3045 struct net_device *br_dev;
3046
3047 rtnl_lock();
3048
3049 if (!netif_running(dev))
3050 goto out;
3051 br_dev = netdev_master_upper_dev_get(dev);
3052 if (!br_dev)
3053 goto out;
3054 if (!netif_is_bridge_master(br_dev))
3055 goto out;
3056 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3057 if (!mlxsw_sp)
3058 goto out;
3059
3060 switch (switchdev_work->event) {
3061 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3062 mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3063 break;
3064 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3065 mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3066 break;
3067 }
3068
3069 out:
3070 rtnl_unlock();
3071 kfree(switchdev_work);
3072 dev_put(dev);
3073 }
3074
3075 static int
3076 mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3077 switchdev_work,
3078 struct switchdev_notifier_info *info)
3079 {
3080 struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3081 struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3082 struct vxlan_config *cfg = &vxlan->cfg;
3083 struct netlink_ext_ack *extack;
3084
3085 extack = switchdev_notifier_info_to_extack(info);
3086 vxlan_fdb_info = container_of(info,
3087 struct switchdev_notifier_vxlan_fdb_info,
3088 info);
3089
3090 if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3091 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3092 return -EOPNOTSUPP;
3093 }
3094 if (vxlan_fdb_info->remote_vni != cfg->vni ||
3095 vxlan_fdb_info->vni != cfg->vni) {
3096 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3097 return -EOPNOTSUPP;
3098 }
3099 if (vxlan_fdb_info->remote_ifindex) {
3100 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3101 return -EOPNOTSUPP;
3102 }
3103 if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3104 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3105 return -EOPNOTSUPP;
3106 }
3107 if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3108 NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3109 return -EOPNOTSUPP;
3110 }
3111
3112 switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3113
3114 return 0;
3115 }
3116
3117
3118 static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3119 unsigned long event, void *ptr)
3120 {
3121 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3122 struct mlxsw_sp_switchdev_event_work *switchdev_work;
3123 struct switchdev_notifier_fdb_info *fdb_info;
3124 struct switchdev_notifier_info *info = ptr;
3125 struct net_device *br_dev;
3126 int err;
3127
3128 if (event == SWITCHDEV_PORT_ATTR_SET) {
3129 err = switchdev_handle_port_attr_set(dev, ptr,
3130 mlxsw_sp_port_dev_check,
3131 mlxsw_sp_port_attr_set);
3132 return notifier_from_errno(err);
3133 }
3134
3135
3136 br_dev = netdev_master_upper_dev_get_rcu(dev);
3137 if (!br_dev)
3138 return NOTIFY_DONE;
3139 if (!netif_is_bridge_master(br_dev))
3140 return NOTIFY_DONE;
3141 if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3142 return NOTIFY_DONE;
3143
3144 switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3145 if (!switchdev_work)
3146 return NOTIFY_BAD;
3147
3148 switchdev_work->dev = dev;
3149 switchdev_work->event = event;
3150
3151 switch (event) {
3152 case SWITCHDEV_FDB_ADD_TO_DEVICE:
3153 case SWITCHDEV_FDB_DEL_TO_DEVICE:
3154 case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3155 case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3156 fdb_info = container_of(info,
3157 struct switchdev_notifier_fdb_info,
3158 info);
3159 INIT_WORK(&switchdev_work->work,
3160 mlxsw_sp_switchdev_bridge_fdb_event_work);
3161 memcpy(&switchdev_work->fdb_info, ptr,
3162 sizeof(switchdev_work->fdb_info));
3163 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3164 if (!switchdev_work->fdb_info.addr)
3165 goto err_addr_alloc;
3166 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3167 fdb_info->addr);
3168
3169
3170
3171
3172 dev_hold(dev);
3173 break;
3174 case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3175 case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3176 INIT_WORK(&switchdev_work->work,
3177 mlxsw_sp_switchdev_vxlan_fdb_event_work);
3178 err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3179 info);
3180 if (err)
3181 goto err_vxlan_work_prepare;
3182 dev_hold(dev);
3183 break;
3184 default:
3185 kfree(switchdev_work);
3186 return NOTIFY_DONE;
3187 }
3188
3189 mlxsw_core_schedule_work(&switchdev_work->work);
3190
3191 return NOTIFY_DONE;
3192
3193 err_vxlan_work_prepare:
3194 err_addr_alloc:
3195 kfree(switchdev_work);
3196 return NOTIFY_BAD;
3197 }
3198
3199 struct notifier_block mlxsw_sp_switchdev_notifier = {
3200 .notifier_call = mlxsw_sp_switchdev_event,
3201 };
3202
3203 static int
3204 mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3205 struct mlxsw_sp_bridge_device *bridge_device,
3206 const struct net_device *vxlan_dev, u16 vid,
3207 bool flag_untagged, bool flag_pvid,
3208 struct netlink_ext_ack *extack)
3209 {
3210 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3211 __be32 vni = vxlan->cfg.vni;
3212 struct mlxsw_sp_fid *fid;
3213 u16 old_vid;
3214 int err;
3215
3216
3217
3218
3219
3220
3221 if (flag_untagged && flag_pvid &&
3222 mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3223 NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3224 return -EINVAL;
3225 }
3226
3227 if (!netif_running(vxlan_dev))
3228 return 0;
3229
3230
3231
3232
3233
3234 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3235 if (!fid) {
3236 if (!flag_untagged || !flag_pvid)
3237 return 0;
3238 return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3239 vxlan_dev, vid, extack);
3240 }
3241
3242
3243
3244
3245
3246
3247 old_vid = mlxsw_sp_fid_8021q_vid(fid);
3248 if (vid == old_vid) {
3249 if (WARN_ON(flag_untagged && flag_pvid)) {
3250 mlxsw_sp_fid_put(fid);
3251 return -EINVAL;
3252 }
3253 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3254 mlxsw_sp_fid_put(fid);
3255 return 0;
3256 }
3257
3258
3259
3260
3261 if (!flag_pvid) {
3262 mlxsw_sp_fid_put(fid);
3263 return 0;
3264 }
3265
3266
3267
3268
3269 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3270 mlxsw_sp_fid_put(fid);
3271
3272
3273
3274
3275 if (!flag_untagged)
3276 return 0;
3277
3278 err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3279 extack);
3280 if (err)
3281 goto err_vxlan_join;
3282
3283 return 0;
3284
3285 err_vxlan_join:
3286 mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3287 NULL);
3288 return err;
3289 }
3290
3291 static void
3292 mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3293 struct mlxsw_sp_bridge_device *bridge_device,
3294 const struct net_device *vxlan_dev, u16 vid)
3295 {
3296 struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3297 __be32 vni = vxlan->cfg.vni;
3298 struct mlxsw_sp_fid *fid;
3299
3300 if (!netif_running(vxlan_dev))
3301 return;
3302
3303 fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3304 if (!fid)
3305 return;
3306
3307
3308 if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3309 goto out;
3310
3311 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3312
3313 out:
3314 mlxsw_sp_fid_put(fid);
3315 }
3316
3317 static int
3318 mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3319 struct switchdev_notifier_port_obj_info *
3320 port_obj_info)
3321 {
3322 struct switchdev_obj_port_vlan *vlan =
3323 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3324 bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3325 bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3326 struct switchdev_trans *trans = port_obj_info->trans;
3327 struct mlxsw_sp_bridge_device *bridge_device;
3328 struct netlink_ext_ack *extack;
3329 struct mlxsw_sp *mlxsw_sp;
3330 struct net_device *br_dev;
3331 u16 vid;
3332
3333 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3334 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3335 if (!br_dev)
3336 return 0;
3337
3338 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3339 if (!mlxsw_sp)
3340 return 0;
3341
3342 port_obj_info->handled = true;
3343
3344 if (switchdev_trans_ph_commit(trans))
3345 return 0;
3346
3347 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3348 if (!bridge_device)
3349 return -EINVAL;
3350
3351 if (!bridge_device->vlan_enabled)
3352 return 0;
3353
3354 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3355 int err;
3356
3357 err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3358 vxlan_dev, vid,
3359 flag_untagged,
3360 flag_pvid, extack);
3361 if (err)
3362 return err;
3363 }
3364
3365 return 0;
3366 }
3367
3368 static void
3369 mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3370 struct switchdev_notifier_port_obj_info *
3371 port_obj_info)
3372 {
3373 struct switchdev_obj_port_vlan *vlan =
3374 SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3375 struct mlxsw_sp_bridge_device *bridge_device;
3376 struct mlxsw_sp *mlxsw_sp;
3377 struct net_device *br_dev;
3378 u16 vid;
3379
3380 br_dev = netdev_master_upper_dev_get(vxlan_dev);
3381 if (!br_dev)
3382 return;
3383
3384 mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3385 if (!mlxsw_sp)
3386 return;
3387
3388 port_obj_info->handled = true;
3389
3390 bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3391 if (!bridge_device)
3392 return;
3393
3394 if (!bridge_device->vlan_enabled)
3395 return;
3396
3397 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3398 mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3399 vxlan_dev, vid);
3400 }
3401
3402 static int
3403 mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3404 struct switchdev_notifier_port_obj_info *
3405 port_obj_info)
3406 {
3407 int err = 0;
3408
3409 switch (port_obj_info->obj->id) {
3410 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3411 err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3412 port_obj_info);
3413 break;
3414 default:
3415 break;
3416 }
3417
3418 return err;
3419 }
3420
3421 static void
3422 mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3423 struct switchdev_notifier_port_obj_info *
3424 port_obj_info)
3425 {
3426 switch (port_obj_info->obj->id) {
3427 case SWITCHDEV_OBJ_ID_PORT_VLAN:
3428 mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3429 break;
3430 default:
3431 break;
3432 }
3433 }
3434
3435 static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3436 unsigned long event, void *ptr)
3437 {
3438 struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3439 int err = 0;
3440
3441 switch (event) {
3442 case SWITCHDEV_PORT_OBJ_ADD:
3443 if (netif_is_vxlan(dev))
3444 err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3445 else
3446 err = switchdev_handle_port_obj_add(dev, ptr,
3447 mlxsw_sp_port_dev_check,
3448 mlxsw_sp_port_obj_add);
3449 return notifier_from_errno(err);
3450 case SWITCHDEV_PORT_OBJ_DEL:
3451 if (netif_is_vxlan(dev))
3452 mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3453 else
3454 err = switchdev_handle_port_obj_del(dev, ptr,
3455 mlxsw_sp_port_dev_check,
3456 mlxsw_sp_port_obj_del);
3457 return notifier_from_errno(err);
3458 case SWITCHDEV_PORT_ATTR_SET:
3459 err = switchdev_handle_port_attr_set(dev, ptr,
3460 mlxsw_sp_port_dev_check,
3461 mlxsw_sp_port_attr_set);
3462 return notifier_from_errno(err);
3463 }
3464
3465 return NOTIFY_DONE;
3466 }
3467
3468 static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3469 .notifier_call = mlxsw_sp_switchdev_blocking_event,
3470 };
3471
3472 u8
3473 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3474 {
3475 return bridge_port->stp_state;
3476 }
3477
3478 static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3479 {
3480 struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3481 struct notifier_block *nb;
3482 int err;
3483
3484 err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3485 if (err) {
3486 dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3487 return err;
3488 }
3489
3490 err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3491 if (err) {
3492 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3493 return err;
3494 }
3495
3496 nb = &mlxsw_sp_switchdev_blocking_notifier;
3497 err = register_switchdev_blocking_notifier(nb);
3498 if (err) {
3499 dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3500 goto err_register_switchdev_blocking_notifier;
3501 }
3502
3503 INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3504 bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3505 mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
3506 return 0;
3507
3508 err_register_switchdev_blocking_notifier:
3509 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3510 return err;
3511 }
3512
3513 static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3514 {
3515 struct notifier_block *nb;
3516
3517 cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3518
3519 nb = &mlxsw_sp_switchdev_blocking_notifier;
3520 unregister_switchdev_blocking_notifier(nb);
3521
3522 unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3523 }
3524
3525 int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3526 {
3527 struct mlxsw_sp_bridge *bridge;
3528
3529 bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3530 if (!bridge)
3531 return -ENOMEM;
3532 mlxsw_sp->bridge = bridge;
3533 bridge->mlxsw_sp = mlxsw_sp;
3534
3535 INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3536
3537 bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3538 bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
3539
3540 return mlxsw_sp_fdb_init(mlxsw_sp);
3541 }
3542
3543 void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3544 {
3545 mlxsw_sp_fdb_fini(mlxsw_sp);
3546 WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3547 kfree(mlxsw_sp->bridge);
3548 }
3549