This source file includes following definitions.
- mlx5e_rep_get_drvinfo
- mlx5e_uplink_rep_get_drvinfo
- mlx5e_rep_get_strings
- mlx5e_rep_update_hw_counters
- mlx5e_uplink_rep_update_hw_counters
- mlx5e_rep_update_sw_counters
- mlx5e_rep_get_ethtool_stats
- mlx5e_rep_get_sset_count
- mlx5e_rep_get_ringparam
- mlx5e_rep_set_ringparam
- mlx5e_replace_rep_vport_rx_rule
- mlx5e_rep_get_channels
- mlx5e_rep_set_channels
- mlx5e_rep_get_coalesce
- mlx5e_rep_set_coalesce
- mlx5e_rep_get_rxfh_key_size
- mlx5e_rep_get_rxfh_indir_size
- mlx5e_uplink_rep_get_pauseparam
- mlx5e_uplink_rep_set_pauseparam
- mlx5e_uplink_rep_get_link_ksettings
- mlx5e_uplink_rep_set_link_ksettings
- mlx5e_rep_get_port_parent_id
- mlx5e_sqs2vport_stop
- mlx5e_sqs2vport_start
- mlx5e_add_sqs_fwd_rules
- mlx5e_remove_sqs_fwd_rules
- mlx5e_rep_ipv6_interval
- mlx5e_rep_neigh_update_init_interval
- mlx5e_rep_queue_neigh_stats_work
- mlx5e_rep_neigh_entry_hold
- mlx5e_rep_neigh_entry_release
- mlx5e_get_next_nhe
- mlx5e_rep_neigh_stats_work
- mlx5e_rep_update_flows
- mlx5e_rep_neigh_update
- mlx5e_rep_indr_block_priv_lookup
- mlx5e_rep_indr_clean_block_privs
- mlx5e_rep_indr_offload
- mlx5e_rep_indr_setup_block_cb
- mlx5e_rep_indr_tc_block_unbind
- mlx5e_rep_indr_setup_tc_block
- mlx5e_rep_indr_setup_tc_cb
- mlx5e_rep_indr_register_block
- mlx5e_rep_indr_unregister_block
- mlx5e_nic_rep_netdevice_event
- mlx5e_rep_queue_neigh_update_work
- mlx5e_rep_netevent_event
- mlx5e_rep_neigh_init
- mlx5e_rep_neigh_cleanup
- mlx5e_rep_neigh_entry_insert
- mlx5e_rep_neigh_entry_remove
- mlx5e_rep_neigh_entry_lookup
- mlx5e_rep_neigh_entry_create
- mlx5e_rep_encap_entry_attach
- mlx5e_rep_encap_entry_detach
- mlx5e_rep_open
- mlx5e_rep_close
- mlx5e_rep_setup_tc_cls_flower
- mlx5e_rep_setup_tc_cls_matchall
- mlx5e_rep_setup_tc_cb
- mlx5e_rep_setup_tc
- mlx5e_is_uplink_rep
- mlx5e_rep_has_offload_stats
- mlx5e_get_sw_stats64
- mlx5e_rep_get_offload_stats
- mlx5e_rep_get_stats
- mlx5e_rep_change_mtu
- mlx5e_uplink_rep_change_mtu
- mlx5e_uplink_rep_set_mac
- mlx5e_uplink_rep_set_vf_vlan
- mlx5e_get_devlink_port
- mlx5e_eswitch_rep
- mlx5e_build_rep_params
- mlx5e_build_rep_netdev
- mlx5e_init_rep
- mlx5e_cleanup_rep
- mlx5e_create_rep_ttc_table
- mlx5e_create_rep_vport_rx_rule
- mlx5e_init_rep_rx
- mlx5e_cleanup_rep_rx
- mlx5e_init_rep_tx
- mlx5e_cleanup_rep_tx
- mlx5e_rep_enable
- mlx5e_update_rep_rx
- uplink_rep_async_event
- mlx5e_uplink_rep_enable
- mlx5e_uplink_rep_disable
- is_devlink_port_supported
- vport_to_devlink_port_index
- register_devlink_port
- unregister_devlink_port
- mlx5e_vport_rep_load
- mlx5e_vport_rep_unload
- mlx5e_vport_rep_get_proto_dev
- mlx5e_rep_register_vport_reps
- mlx5e_rep_unregister_vport_reps
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
39 #include <net/arp.h>
40 #include <net/devlink.h>
41 #include <net/ipv6_stubs.h>
42
43 #include "eswitch.h"
44 #include "en.h"
45 #include "en_rep.h"
46 #include "en_tc.h"
47 #include "en/tc_tun.h"
48 #include "fs_core.h"
49 #include "lib/port_tun.h"
50 #define CREATE_TRACE_POINTS
51 #include "diag/en_rep_tracepoint.h"
52
53 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
54 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
55 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
56
57 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
58
59 struct mlx5e_rep_indr_block_priv {
60 struct net_device *netdev;
61 struct mlx5e_rep_priv *rpriv;
62
63 struct list_head list;
64 };
65
66 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
67 struct net_device *netdev);
68
69 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
70 struct ethtool_drvinfo *drvinfo)
71 {
72 struct mlx5e_priv *priv = netdev_priv(dev);
73 struct mlx5_core_dev *mdev = priv->mdev;
74
75 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
76 sizeof(drvinfo->driver));
77 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
78 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
79 "%d.%d.%04d (%.16s)",
80 fw_rev_maj(mdev), fw_rev_min(mdev),
81 fw_rev_sub(mdev), mdev->board_id);
82 }
83
84 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
85 struct ethtool_drvinfo *drvinfo)
86 {
87 struct mlx5e_priv *priv = netdev_priv(dev);
88
89 mlx5e_rep_get_drvinfo(dev, drvinfo);
90 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
91 sizeof(drvinfo->bus_info));
92 }
93
94 static const struct counter_desc sw_rep_stats_desc[] = {
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
99 };
100
101 struct vport_stats {
102 u64 vport_rx_packets;
103 u64 vport_tx_packets;
104 u64 vport_rx_bytes;
105 u64 vport_tx_bytes;
106 };
107
108 static const struct counter_desc vport_rep_stats_desc[] = {
109 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
110 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
111 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
112 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
113 };
114
115 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
116 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
117
118 static void mlx5e_rep_get_strings(struct net_device *dev,
119 u32 stringset, uint8_t *data)
120 {
121 int i, j;
122
123 switch (stringset) {
124 case ETH_SS_STATS:
125 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
126 strcpy(data + (i * ETH_GSTRING_LEN),
127 sw_rep_stats_desc[i].format);
128 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
129 strcpy(data + (i * ETH_GSTRING_LEN),
130 vport_rep_stats_desc[j].format);
131 break;
132 }
133 }
134
135 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
136 {
137 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
138 struct mlx5e_rep_priv *rpriv = priv->ppriv;
139 struct mlx5_eswitch_rep *rep = rpriv->rep;
140 struct rtnl_link_stats64 *vport_stats;
141 struct ifla_vf_stats vf_stats;
142 int err;
143
144 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
145 if (err) {
146 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
147 return;
148 }
149
150 vport_stats = &priv->stats.vf_vport;
151
152 vport_stats->rx_packets = vf_stats.tx_packets;
153 vport_stats->rx_bytes = vf_stats.tx_bytes;
154 vport_stats->tx_packets = vf_stats.rx_packets;
155 vport_stats->tx_bytes = vf_stats.rx_bytes;
156 }
157
158 static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
159 {
160 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
161 struct rtnl_link_stats64 *vport_stats;
162
163 mlx5e_grp_802_3_update_stats(priv);
164
165 vport_stats = &priv->stats.vf_vport;
166
167 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
168 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
169 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
170 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
171 }
172
173 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
174 {
175 struct mlx5e_sw_stats *s = &priv->stats.sw;
176 struct rtnl_link_stats64 stats64 = {};
177
178 memset(s, 0, sizeof(*s));
179 mlx5e_fold_sw_stats64(priv, &stats64);
180
181 s->rx_packets = stats64.rx_packets;
182 s->rx_bytes = stats64.rx_bytes;
183 s->tx_packets = stats64.tx_packets;
184 s->tx_bytes = stats64.tx_bytes;
185 s->tx_queue_dropped = stats64.tx_dropped;
186 }
187
188 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
189 struct ethtool_stats *stats, u64 *data)
190 {
191 struct mlx5e_priv *priv = netdev_priv(dev);
192 int i, j;
193
194 if (!data)
195 return;
196
197 mutex_lock(&priv->state_lock);
198 mlx5e_rep_update_sw_counters(priv);
199 priv->profile->update_stats(priv);
200 mutex_unlock(&priv->state_lock);
201
202 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
203 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
204 sw_rep_stats_desc, i);
205
206 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
207 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
208 vport_rep_stats_desc, j);
209 }
210
211 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
212 {
213 switch (sset) {
214 case ETH_SS_STATS:
215 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
216 default:
217 return -EOPNOTSUPP;
218 }
219 }
220
221 static void mlx5e_rep_get_ringparam(struct net_device *dev,
222 struct ethtool_ringparam *param)
223 {
224 struct mlx5e_priv *priv = netdev_priv(dev);
225
226 mlx5e_ethtool_get_ringparam(priv, param);
227 }
228
229 static int mlx5e_rep_set_ringparam(struct net_device *dev,
230 struct ethtool_ringparam *param)
231 {
232 struct mlx5e_priv *priv = netdev_priv(dev);
233
234 return mlx5e_ethtool_set_ringparam(priv, param);
235 }
236
237 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
238 struct mlx5_flow_destination *dest)
239 {
240 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
241 struct mlx5e_rep_priv *rpriv = priv->ppriv;
242 struct mlx5_eswitch_rep *rep = rpriv->rep;
243 struct mlx5_flow_handle *flow_rule;
244
245 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
246 rep->vport,
247 dest);
248 if (IS_ERR(flow_rule))
249 return PTR_ERR(flow_rule);
250
251 mlx5_del_flow_rules(rpriv->vport_rx_rule);
252 rpriv->vport_rx_rule = flow_rule;
253 return 0;
254 }
255
256 static void mlx5e_rep_get_channels(struct net_device *dev,
257 struct ethtool_channels *ch)
258 {
259 struct mlx5e_priv *priv = netdev_priv(dev);
260
261 mlx5e_ethtool_get_channels(priv, ch);
262 }
263
264 static int mlx5e_rep_set_channels(struct net_device *dev,
265 struct ethtool_channels *ch)
266 {
267 struct mlx5e_priv *priv = netdev_priv(dev);
268 u16 curr_channels_amount = priv->channels.params.num_channels;
269 u32 new_channels_amount = ch->combined_count;
270 struct mlx5_flow_destination new_dest;
271 int err = 0;
272
273 err = mlx5e_ethtool_set_channels(priv, ch);
274 if (err)
275 return err;
276
277 if (curr_channels_amount == 1 && new_channels_amount > 1) {
278 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
279 new_dest.ft = priv->fs.ttc.ft.t;
280 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
281 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
282 new_dest.tir_num = priv->direct_tir[0].tirn;
283 } else {
284 return 0;
285 }
286
287 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
288 if (err) {
289 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
290 curr_channels_amount, new_channels_amount);
291 return err;
292 }
293
294 return 0;
295 }
296
297 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
298 struct ethtool_coalesce *coal)
299 {
300 struct mlx5e_priv *priv = netdev_priv(netdev);
301
302 return mlx5e_ethtool_get_coalesce(priv, coal);
303 }
304
305 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
306 struct ethtool_coalesce *coal)
307 {
308 struct mlx5e_priv *priv = netdev_priv(netdev);
309
310 return mlx5e_ethtool_set_coalesce(priv, coal);
311 }
312
313 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
314 {
315 struct mlx5e_priv *priv = netdev_priv(netdev);
316
317 return mlx5e_ethtool_get_rxfh_key_size(priv);
318 }
319
320 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
321 {
322 struct mlx5e_priv *priv = netdev_priv(netdev);
323
324 return mlx5e_ethtool_get_rxfh_indir_size(priv);
325 }
326
327 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
328 struct ethtool_pauseparam *pauseparam)
329 {
330 struct mlx5e_priv *priv = netdev_priv(netdev);
331
332 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
333 }
334
335 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
336 struct ethtool_pauseparam *pauseparam)
337 {
338 struct mlx5e_priv *priv = netdev_priv(netdev);
339
340 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
341 }
342
343 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
344 struct ethtool_link_ksettings *link_ksettings)
345 {
346 struct mlx5e_priv *priv = netdev_priv(netdev);
347
348 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
349 }
350
351 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
352 const struct ethtool_link_ksettings *link_ksettings)
353 {
354 struct mlx5e_priv *priv = netdev_priv(netdev);
355
356 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
357 }
358
359 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
360 .get_drvinfo = mlx5e_rep_get_drvinfo,
361 .get_link = ethtool_op_get_link,
362 .get_strings = mlx5e_rep_get_strings,
363 .get_sset_count = mlx5e_rep_get_sset_count,
364 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
365 .get_ringparam = mlx5e_rep_get_ringparam,
366 .set_ringparam = mlx5e_rep_set_ringparam,
367 .get_channels = mlx5e_rep_get_channels,
368 .set_channels = mlx5e_rep_set_channels,
369 .get_coalesce = mlx5e_rep_get_coalesce,
370 .set_coalesce = mlx5e_rep_set_coalesce,
371 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
372 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
373 };
374
375 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
376 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
377 .get_link = ethtool_op_get_link,
378 .get_strings = mlx5e_rep_get_strings,
379 .get_sset_count = mlx5e_rep_get_sset_count,
380 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
381 .get_ringparam = mlx5e_rep_get_ringparam,
382 .set_ringparam = mlx5e_rep_set_ringparam,
383 .get_channels = mlx5e_rep_get_channels,
384 .set_channels = mlx5e_rep_set_channels,
385 .get_coalesce = mlx5e_rep_get_coalesce,
386 .set_coalesce = mlx5e_rep_set_coalesce,
387 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
388 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
389 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
390 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
391 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
392 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
393 };
394
395 static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
396 struct netdev_phys_item_id *ppid)
397 {
398 struct mlx5e_priv *priv;
399 u64 parent_id;
400
401 priv = netdev_priv(dev);
402
403 parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
404 ppid->id_len = sizeof(parent_id);
405 memcpy(ppid->id, &parent_id, sizeof(parent_id));
406 }
407
408 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
409 struct mlx5_eswitch_rep *rep)
410 {
411 struct mlx5e_rep_sq *rep_sq, *tmp;
412 struct mlx5e_rep_priv *rpriv;
413
414 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
415 return;
416
417 rpriv = mlx5e_rep_to_rep_priv(rep);
418 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
419 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
420 list_del(&rep_sq->list);
421 kfree(rep_sq);
422 }
423 }
424
425 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
426 struct mlx5_eswitch_rep *rep,
427 u32 *sqns_array, int sqns_num)
428 {
429 struct mlx5_flow_handle *flow_rule;
430 struct mlx5e_rep_priv *rpriv;
431 struct mlx5e_rep_sq *rep_sq;
432 int err;
433 int i;
434
435 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
436 return 0;
437
438 rpriv = mlx5e_rep_to_rep_priv(rep);
439 for (i = 0; i < sqns_num; i++) {
440 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
441 if (!rep_sq) {
442 err = -ENOMEM;
443 goto out_err;
444 }
445
446
447 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
448 rep->vport,
449 sqns_array[i]);
450 if (IS_ERR(flow_rule)) {
451 err = PTR_ERR(flow_rule);
452 kfree(rep_sq);
453 goto out_err;
454 }
455 rep_sq->send_to_vport_rule = flow_rule;
456 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
457 }
458 return 0;
459
460 out_err:
461 mlx5e_sqs2vport_stop(esw, rep);
462 return err;
463 }
464
465 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
466 {
467 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
468 struct mlx5e_rep_priv *rpriv = priv->ppriv;
469 struct mlx5_eswitch_rep *rep = rpriv->rep;
470 struct mlx5e_channel *c;
471 int n, tc, num_sqs = 0;
472 int err = -ENOMEM;
473 u32 *sqs;
474
475 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
476 if (!sqs)
477 goto out;
478
479 for (n = 0; n < priv->channels.num; n++) {
480 c = priv->channels.c[n];
481 for (tc = 0; tc < c->num_tc; tc++)
482 sqs[num_sqs++] = c->sq[tc].sqn;
483 }
484
485 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
486 kfree(sqs);
487
488 out:
489 if (err)
490 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
491 return err;
492 }
493
494 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
495 {
496 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
497 struct mlx5e_rep_priv *rpriv = priv->ppriv;
498 struct mlx5_eswitch_rep *rep = rpriv->rep;
499
500 mlx5e_sqs2vport_stop(esw, rep);
501 }
502
503 static unsigned long mlx5e_rep_ipv6_interval(void)
504 {
505 if (IS_ENABLED(CONFIG_IPV6) && ipv6_stub->nd_tbl)
506 return NEIGH_VAR(&ipv6_stub->nd_tbl->parms, DELAY_PROBE_TIME);
507
508 return ~0UL;
509 }
510
511 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
512 {
513 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
514 unsigned long ipv6_interval = mlx5e_rep_ipv6_interval();
515 struct net_device *netdev = rpriv->netdev;
516 struct mlx5e_priv *priv = netdev_priv(netdev);
517
518 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
519 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
520 }
521
522 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
523 {
524 struct mlx5e_rep_priv *rpriv = priv->ppriv;
525 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
526
527 mlx5_fc_queue_stats_work(priv->mdev,
528 &neigh_update->neigh_stats_work,
529 neigh_update->min_interval);
530 }
531
532 static bool mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
533 {
534 return refcount_inc_not_zero(&nhe->refcnt);
535 }
536
537 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe);
538
539 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
540 {
541 if (refcount_dec_and_test(&nhe->refcnt)) {
542 mlx5e_rep_neigh_entry_remove(nhe);
543 kfree_rcu(nhe, rcu);
544 }
545 }
546
547 static struct mlx5e_neigh_hash_entry *
548 mlx5e_get_next_nhe(struct mlx5e_rep_priv *rpriv,
549 struct mlx5e_neigh_hash_entry *nhe)
550 {
551 struct mlx5e_neigh_hash_entry *next = NULL;
552
553 rcu_read_lock();
554
555 for (next = nhe ?
556 list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
557 &nhe->neigh_list,
558 struct mlx5e_neigh_hash_entry,
559 neigh_list) :
560 list_first_or_null_rcu(&rpriv->neigh_update.neigh_list,
561 struct mlx5e_neigh_hash_entry,
562 neigh_list);
563 next;
564 next = list_next_or_null_rcu(&rpriv->neigh_update.neigh_list,
565 &next->neigh_list,
566 struct mlx5e_neigh_hash_entry,
567 neigh_list))
568 if (mlx5e_rep_neigh_entry_hold(next))
569 break;
570
571 rcu_read_unlock();
572
573 if (nhe)
574 mlx5e_rep_neigh_entry_release(nhe);
575
576 return next;
577 }
578
579 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
580 {
581 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
582 neigh_update.neigh_stats_work.work);
583 struct net_device *netdev = rpriv->netdev;
584 struct mlx5e_priv *priv = netdev_priv(netdev);
585 struct mlx5e_neigh_hash_entry *nhe = NULL;
586
587 rtnl_lock();
588 if (!list_empty(&rpriv->neigh_update.neigh_list))
589 mlx5e_rep_queue_neigh_stats_work(priv);
590
591 while ((nhe = mlx5e_get_next_nhe(rpriv, nhe)) != NULL)
592 mlx5e_tc_update_neigh_used_value(nhe);
593
594 rtnl_unlock();
595 }
596
597 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
598 struct mlx5e_encap_entry *e,
599 bool neigh_connected,
600 unsigned char ha[ETH_ALEN])
601 {
602 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
603 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
604 bool encap_connected;
605 LIST_HEAD(flow_list);
606
607 ASSERT_RTNL();
608
609
610 wait_for_completion(&e->res_ready);
611
612 mutex_lock(&esw->offloads.encap_tbl_lock);
613 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
614 if (e->compl_result < 0 || (encap_connected == neigh_connected &&
615 ether_addr_equal(e->h_dest, ha)))
616 goto unlock;
617
618 mlx5e_take_all_encap_flows(e, &flow_list);
619
620 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
621 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
622 mlx5e_tc_encap_flows_del(priv, e, &flow_list);
623
624 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
625 ether_addr_copy(e->h_dest, ha);
626 ether_addr_copy(eth->h_dest, ha);
627
628
629
630 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
631
632 mlx5e_tc_encap_flows_add(priv, e, &flow_list);
633 }
634 unlock:
635 mutex_unlock(&esw->offloads.encap_tbl_lock);
636 mlx5e_put_encap_flow_list(priv, &flow_list);
637 }
638
639 static void mlx5e_rep_neigh_update(struct work_struct *work)
640 {
641 struct mlx5e_neigh_hash_entry *nhe =
642 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
643 struct neighbour *n = nhe->n;
644 struct mlx5e_encap_entry *e;
645 unsigned char ha[ETH_ALEN];
646 struct mlx5e_priv *priv;
647 bool neigh_connected;
648 u8 nud_state, dead;
649
650 rtnl_lock();
651
652
653
654
655
656
657 read_lock_bh(&n->lock);
658 memcpy(ha, n->ha, ETH_ALEN);
659 nud_state = n->nud_state;
660 dead = n->dead;
661 read_unlock_bh(&n->lock);
662
663 neigh_connected = (nud_state & NUD_VALID) && !dead;
664
665 trace_mlx5e_rep_neigh_update(nhe, ha, neigh_connected);
666
667 list_for_each_entry(e, &nhe->encap_list, encap_list) {
668 if (!mlx5e_encap_take(e))
669 continue;
670
671 priv = netdev_priv(e->out_dev);
672 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
673 mlx5e_encap_put(priv, e);
674 }
675 mlx5e_rep_neigh_entry_release(nhe);
676 rtnl_unlock();
677 neigh_release(n);
678 }
679
680 static struct mlx5e_rep_indr_block_priv *
681 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
682 struct net_device *netdev)
683 {
684 struct mlx5e_rep_indr_block_priv *cb_priv;
685
686
687 ASSERT_RTNL();
688
689 list_for_each_entry(cb_priv,
690 &rpriv->uplink_priv.tc_indr_block_priv_list,
691 list)
692 if (cb_priv->netdev == netdev)
693 return cb_priv;
694
695 return NULL;
696 }
697
698 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
699 {
700 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
701 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
702
703 list_for_each_entry_safe(cb_priv, temp, head, list) {
704 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
705 kfree(cb_priv);
706 }
707 }
708
709 static int
710 mlx5e_rep_indr_offload(struct net_device *netdev,
711 struct flow_cls_offload *flower,
712 struct mlx5e_rep_indr_block_priv *indr_priv)
713 {
714 unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
715 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
716 int err = 0;
717
718 switch (flower->command) {
719 case FLOW_CLS_REPLACE:
720 err = mlx5e_configure_flower(netdev, priv, flower, flags);
721 break;
722 case FLOW_CLS_DESTROY:
723 err = mlx5e_delete_flower(netdev, priv, flower, flags);
724 break;
725 case FLOW_CLS_STATS:
726 err = mlx5e_stats_flower(netdev, priv, flower, flags);
727 break;
728 default:
729 err = -EOPNOTSUPP;
730 }
731
732 return err;
733 }
734
735 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
736 void *type_data, void *indr_priv)
737 {
738 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
739
740 switch (type) {
741 case TC_SETUP_CLSFLOWER:
742 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
743 default:
744 return -EOPNOTSUPP;
745 }
746 }
747
748 static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
749 {
750 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
751
752 list_del(&indr_priv->list);
753 kfree(indr_priv);
754 }
755
756 static LIST_HEAD(mlx5e_block_cb_list);
757
758 static int
759 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
760 struct mlx5e_rep_priv *rpriv,
761 struct flow_block_offload *f)
762 {
763 struct mlx5e_rep_indr_block_priv *indr_priv;
764 struct flow_block_cb *block_cb;
765
766 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
767 return -EOPNOTSUPP;
768
769 f->unlocked_driver_cb = true;
770 f->driver_block_list = &mlx5e_block_cb_list;
771
772 switch (f->command) {
773 case FLOW_BLOCK_BIND:
774 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
775 if (indr_priv)
776 return -EEXIST;
777
778 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
779 if (!indr_priv)
780 return -ENOMEM;
781
782 indr_priv->netdev = netdev;
783 indr_priv->rpriv = rpriv;
784 list_add(&indr_priv->list,
785 &rpriv->uplink_priv.tc_indr_block_priv_list);
786
787 block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
788 indr_priv, indr_priv,
789 mlx5e_rep_indr_tc_block_unbind);
790 if (IS_ERR(block_cb)) {
791 list_del(&indr_priv->list);
792 kfree(indr_priv);
793 return PTR_ERR(block_cb);
794 }
795 flow_block_cb_add(block_cb, f);
796 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
797
798 return 0;
799 case FLOW_BLOCK_UNBIND:
800 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
801 if (!indr_priv)
802 return -ENOENT;
803
804 block_cb = flow_block_cb_lookup(f->block,
805 mlx5e_rep_indr_setup_block_cb,
806 indr_priv);
807 if (!block_cb)
808 return -ENOENT;
809
810 flow_block_cb_remove(block_cb, f);
811 list_del(&block_cb->driver_list);
812 return 0;
813 default:
814 return -EOPNOTSUPP;
815 }
816 return 0;
817 }
818
819 static
820 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
821 enum tc_setup_type type, void *type_data)
822 {
823 switch (type) {
824 case TC_SETUP_BLOCK:
825 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
826 type_data);
827 default:
828 return -EOPNOTSUPP;
829 }
830 }
831
832 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
833 struct net_device *netdev)
834 {
835 int err;
836
837 err = __flow_indr_block_cb_register(netdev, rpriv,
838 mlx5e_rep_indr_setup_tc_cb,
839 rpriv);
840 if (err) {
841 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
842
843 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
844 netdev_name(netdev), err);
845 }
846 return err;
847 }
848
849 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
850 struct net_device *netdev)
851 {
852 __flow_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
853 rpriv);
854 }
855
856 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
857 unsigned long event, void *ptr)
858 {
859 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
860 uplink_priv.netdevice_nb);
861 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
862 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
863
864 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
865 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
866 return NOTIFY_OK;
867
868 switch (event) {
869 case NETDEV_REGISTER:
870 mlx5e_rep_indr_register_block(rpriv, netdev);
871 break;
872 case NETDEV_UNREGISTER:
873 mlx5e_rep_indr_unregister_block(rpriv, netdev);
874 break;
875 }
876 return NOTIFY_OK;
877 }
878
879 static void
880 mlx5e_rep_queue_neigh_update_work(struct mlx5e_priv *priv,
881 struct mlx5e_neigh_hash_entry *nhe,
882 struct neighbour *n)
883 {
884
885
886
887
888 neigh_hold(n);
889
890
891
892
893 nhe->n = n;
894
895 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
896 mlx5e_rep_neigh_entry_release(nhe);
897 neigh_release(n);
898 }
899 }
900
901 static struct mlx5e_neigh_hash_entry *
902 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
903 struct mlx5e_neigh *m_neigh);
904
905 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
906 unsigned long event, void *ptr)
907 {
908 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
909 neigh_update.netevent_nb);
910 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
911 struct net_device *netdev = rpriv->netdev;
912 struct mlx5e_priv *priv = netdev_priv(netdev);
913 struct mlx5e_neigh_hash_entry *nhe = NULL;
914 struct mlx5e_neigh m_neigh = {};
915 struct neigh_parms *p;
916 struct neighbour *n;
917 bool found = false;
918
919 switch (event) {
920 case NETEVENT_NEIGH_UPDATE:
921 n = ptr;
922 #if IS_ENABLED(CONFIG_IPV6)
923 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
924 #else
925 if (n->tbl != &arp_tbl)
926 #endif
927 return NOTIFY_DONE;
928
929 m_neigh.dev = n->dev;
930 m_neigh.family = n->ops->family;
931 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
932
933 rcu_read_lock();
934 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
935 rcu_read_unlock();
936 if (!nhe)
937 return NOTIFY_DONE;
938
939 mlx5e_rep_queue_neigh_update_work(priv, nhe, n);
940 break;
941
942 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
943 p = ptr;
944
945
946
947
948
949 #if IS_ENABLED(CONFIG_IPV6)
950 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
951 #else
952 if (!p->dev || p->tbl != &arp_tbl)
953 #endif
954 return NOTIFY_DONE;
955
956 rcu_read_lock();
957 list_for_each_entry_rcu(nhe, &neigh_update->neigh_list,
958 neigh_list) {
959 if (p->dev == nhe->m_neigh.dev) {
960 found = true;
961 break;
962 }
963 }
964 rcu_read_unlock();
965 if (!found)
966 return NOTIFY_DONE;
967
968 neigh_update->min_interval = min_t(unsigned long,
969 NEIGH_VAR(p, DELAY_PROBE_TIME),
970 neigh_update->min_interval);
971 mlx5_fc_update_sampling_interval(priv->mdev,
972 neigh_update->min_interval);
973 break;
974 }
975 return NOTIFY_DONE;
976 }
977
978 static const struct rhashtable_params mlx5e_neigh_ht_params = {
979 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
980 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
981 .key_len = sizeof(struct mlx5e_neigh),
982 .automatic_shrinking = true,
983 };
984
985 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
986 {
987 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
988 int err;
989
990 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
991 if (err)
992 return err;
993
994 INIT_LIST_HEAD(&neigh_update->neigh_list);
995 mutex_init(&neigh_update->encap_lock);
996 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
997 mlx5e_rep_neigh_stats_work);
998 mlx5e_rep_neigh_update_init_interval(rpriv);
999
1000 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
1001 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
1002 if (err)
1003 goto out_err;
1004 return 0;
1005
1006 out_err:
1007 rhashtable_destroy(&neigh_update->neigh_ht);
1008 return err;
1009 }
1010
1011 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
1012 {
1013 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1014 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1015
1016 unregister_netevent_notifier(&neigh_update->netevent_nb);
1017
1018 flush_workqueue(priv->wq);
1019
1020 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
1021
1022 mutex_destroy(&neigh_update->encap_lock);
1023 rhashtable_destroy(&neigh_update->neigh_ht);
1024 }
1025
1026 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
1027 struct mlx5e_neigh_hash_entry *nhe)
1028 {
1029 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1030 int err;
1031
1032 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
1033 &nhe->rhash_node,
1034 mlx5e_neigh_ht_params);
1035 if (err)
1036 return err;
1037
1038 list_add_rcu(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
1039
1040 return err;
1041 }
1042
1043 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_neigh_hash_entry *nhe)
1044 {
1045 struct mlx5e_rep_priv *rpriv = nhe->priv->ppriv;
1046
1047 mutex_lock(&rpriv->neigh_update.encap_lock);
1048
1049 list_del_rcu(&nhe->neigh_list);
1050
1051 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1052 &nhe->rhash_node,
1053 mlx5e_neigh_ht_params);
1054 mutex_unlock(&rpriv->neigh_update.encap_lock);
1055 }
1056
1057
1058
1059
1060 static struct mlx5e_neigh_hash_entry *
1061 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1062 struct mlx5e_neigh *m_neigh)
1063 {
1064 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1065 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1066 struct mlx5e_neigh_hash_entry *nhe;
1067
1068 nhe = rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1069 mlx5e_neigh_ht_params);
1070 return nhe && mlx5e_rep_neigh_entry_hold(nhe) ? nhe : NULL;
1071 }
1072
1073 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1074 struct mlx5e_encap_entry *e,
1075 struct mlx5e_neigh_hash_entry **nhe)
1076 {
1077 int err;
1078
1079 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1080 if (!*nhe)
1081 return -ENOMEM;
1082
1083 (*nhe)->priv = priv;
1084 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1085 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1086 spin_lock_init(&(*nhe)->encap_list_lock);
1087 INIT_LIST_HEAD(&(*nhe)->encap_list);
1088 refcount_set(&(*nhe)->refcnt, 1);
1089
1090 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1091 if (err)
1092 goto out_free;
1093 return 0;
1094
1095 out_free:
1096 kfree(*nhe);
1097 return err;
1098 }
1099
1100 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1101 struct mlx5e_encap_entry *e)
1102 {
1103 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1104 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1105 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1106 struct mlx5e_neigh_hash_entry *nhe;
1107 int err;
1108
1109 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1110 if (err)
1111 return err;
1112
1113 mutex_lock(&rpriv->neigh_update.encap_lock);
1114 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1115 if (!nhe) {
1116 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1117 if (err) {
1118 mutex_unlock(&rpriv->neigh_update.encap_lock);
1119 mlx5_tun_entropy_refcount_dec(tun_entropy,
1120 e->reformat_type);
1121 return err;
1122 }
1123 }
1124
1125 e->nhe = nhe;
1126 spin_lock(&nhe->encap_list_lock);
1127 list_add_rcu(&e->encap_list, &nhe->encap_list);
1128 spin_unlock(&nhe->encap_list_lock);
1129
1130 mutex_unlock(&rpriv->neigh_update.encap_lock);
1131
1132 return 0;
1133 }
1134
1135 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1136 struct mlx5e_encap_entry *e)
1137 {
1138 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1139 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1140 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1141
1142 if (!e->nhe)
1143 return;
1144
1145 spin_lock(&e->nhe->encap_list_lock);
1146 list_del_rcu(&e->encap_list);
1147 spin_unlock(&e->nhe->encap_list_lock);
1148
1149 mlx5e_rep_neigh_entry_release(e->nhe);
1150 e->nhe = NULL;
1151 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1152 }
1153
1154 static int mlx5e_rep_open(struct net_device *dev)
1155 {
1156 struct mlx5e_priv *priv = netdev_priv(dev);
1157 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1158 struct mlx5_eswitch_rep *rep = rpriv->rep;
1159 int err;
1160
1161 mutex_lock(&priv->state_lock);
1162 err = mlx5e_open_locked(dev);
1163 if (err)
1164 goto unlock;
1165
1166 if (!mlx5_modify_vport_admin_state(priv->mdev,
1167 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1168 rep->vport, 1,
1169 MLX5_VPORT_ADMIN_STATE_UP))
1170 netif_carrier_on(dev);
1171
1172 unlock:
1173 mutex_unlock(&priv->state_lock);
1174 return err;
1175 }
1176
1177 static int mlx5e_rep_close(struct net_device *dev)
1178 {
1179 struct mlx5e_priv *priv = netdev_priv(dev);
1180 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1181 struct mlx5_eswitch_rep *rep = rpriv->rep;
1182 int ret;
1183
1184 mutex_lock(&priv->state_lock);
1185 mlx5_modify_vport_admin_state(priv->mdev,
1186 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1187 rep->vport, 1,
1188 MLX5_VPORT_ADMIN_STATE_DOWN);
1189 ret = mlx5e_close_locked(dev);
1190 mutex_unlock(&priv->state_lock);
1191 return ret;
1192 }
1193
1194 static int
1195 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1196 struct flow_cls_offload *cls_flower, int flags)
1197 {
1198 switch (cls_flower->command) {
1199 case FLOW_CLS_REPLACE:
1200 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1201 flags);
1202 case FLOW_CLS_DESTROY:
1203 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1204 flags);
1205 case FLOW_CLS_STATS:
1206 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1207 flags);
1208 default:
1209 return -EOPNOTSUPP;
1210 }
1211 }
1212
1213 static
1214 int mlx5e_rep_setup_tc_cls_matchall(struct mlx5e_priv *priv,
1215 struct tc_cls_matchall_offload *ma)
1216 {
1217 switch (ma->command) {
1218 case TC_CLSMATCHALL_REPLACE:
1219 return mlx5e_tc_configure_matchall(priv, ma);
1220 case TC_CLSMATCHALL_DESTROY:
1221 return mlx5e_tc_delete_matchall(priv, ma);
1222 case TC_CLSMATCHALL_STATS:
1223 mlx5e_tc_stats_matchall(priv, ma);
1224 return 0;
1225 default:
1226 return -EOPNOTSUPP;
1227 }
1228 }
1229
1230 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1231 void *cb_priv)
1232 {
1233 unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
1234 struct mlx5e_priv *priv = cb_priv;
1235
1236 switch (type) {
1237 case TC_SETUP_CLSFLOWER:
1238 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, flags);
1239 case TC_SETUP_CLSMATCHALL:
1240 return mlx5e_rep_setup_tc_cls_matchall(priv, type_data);
1241 default:
1242 return -EOPNOTSUPP;
1243 }
1244 }
1245
1246 static LIST_HEAD(mlx5e_rep_block_cb_list);
1247
1248 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1249 void *type_data)
1250 {
1251 struct mlx5e_priv *priv = netdev_priv(dev);
1252 struct flow_block_offload *f = type_data;
1253
1254 switch (type) {
1255 case TC_SETUP_BLOCK:
1256 f->unlocked_driver_cb = true;
1257 return flow_block_cb_setup_simple(type_data,
1258 &mlx5e_rep_block_cb_list,
1259 mlx5e_rep_setup_tc_cb,
1260 priv, priv, true);
1261 default:
1262 return -EOPNOTSUPP;
1263 }
1264 }
1265
1266 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1267 {
1268 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1269 struct mlx5_eswitch_rep *rep;
1270
1271 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1272 return false;
1273
1274 if (!rpriv)
1275 return false;
1276
1277 rep = rpriv->rep;
1278 return (rep->vport == MLX5_VPORT_UPLINK);
1279 }
1280
1281 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1282 {
1283 switch (attr_id) {
1284 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1285 return true;
1286 }
1287
1288 return false;
1289 }
1290
1291 static int
1292 mlx5e_get_sw_stats64(const struct net_device *dev,
1293 struct rtnl_link_stats64 *stats)
1294 {
1295 struct mlx5e_priv *priv = netdev_priv(dev);
1296
1297 mlx5e_fold_sw_stats64(priv, stats);
1298 return 0;
1299 }
1300
1301 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1302 void *sp)
1303 {
1304 switch (attr_id) {
1305 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1306 return mlx5e_get_sw_stats64(dev, sp);
1307 }
1308
1309 return -EINVAL;
1310 }
1311
1312 static void
1313 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1314 {
1315 struct mlx5e_priv *priv = netdev_priv(dev);
1316
1317
1318 mlx5e_queue_update_stats(priv);
1319 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1320 }
1321
1322 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
1323 {
1324 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1325 }
1326
1327 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1328 {
1329 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
1330 }
1331
1332 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1333 {
1334 struct sockaddr *saddr = addr;
1335
1336 if (!is_valid_ether_addr(saddr->sa_data))
1337 return -EADDRNOTAVAIL;
1338
1339 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1340 return 0;
1341 }
1342
1343 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1344 __be16 vlan_proto)
1345 {
1346 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1347
1348 if (vlan != 0)
1349 return -EOPNOTSUPP;
1350
1351
1352 return 0;
1353 }
1354
1355 static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
1356 {
1357 struct mlx5e_priv *priv = netdev_priv(dev);
1358 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1359
1360 return &rpriv->dl_port;
1361 }
1362
1363 static const struct net_device_ops mlx5e_netdev_ops_rep = {
1364 .ndo_open = mlx5e_rep_open,
1365 .ndo_stop = mlx5e_rep_close,
1366 .ndo_start_xmit = mlx5e_xmit,
1367 .ndo_setup_tc = mlx5e_rep_setup_tc,
1368 .ndo_get_devlink_port = mlx5e_get_devlink_port,
1369 .ndo_get_stats64 = mlx5e_rep_get_stats,
1370 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1371 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1372 .ndo_change_mtu = mlx5e_rep_change_mtu,
1373 };
1374
1375 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1376 .ndo_open = mlx5e_open,
1377 .ndo_stop = mlx5e_close,
1378 .ndo_start_xmit = mlx5e_xmit,
1379 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
1380 .ndo_setup_tc = mlx5e_rep_setup_tc,
1381 .ndo_get_devlink_port = mlx5e_get_devlink_port,
1382 .ndo_get_stats64 = mlx5e_get_stats,
1383 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1384 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1385 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
1386 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1387 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1388 .ndo_features_check = mlx5e_features_check,
1389 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1390 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1391 .ndo_get_vf_config = mlx5e_get_vf_config,
1392 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1393 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1394 .ndo_set_features = mlx5e_set_features,
1395 };
1396
1397 bool mlx5e_eswitch_rep(struct net_device *netdev)
1398 {
1399 if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
1400 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1401 return true;
1402
1403 return false;
1404 }
1405
1406 static void mlx5e_build_rep_params(struct net_device *netdev)
1407 {
1408 struct mlx5e_priv *priv = netdev_priv(netdev);
1409 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1410 struct mlx5_eswitch_rep *rep = rpriv->rep;
1411 struct mlx5_core_dev *mdev = priv->mdev;
1412 struct mlx5e_params *params;
1413
1414 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1415 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1416 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1417
1418 params = &priv->channels.params;
1419 params->hard_mtu = MLX5E_ETH_HARD_MTU;
1420 params->sw_mtu = netdev->mtu;
1421
1422
1423 if (rep->vport == MLX5_VPORT_UPLINK)
1424 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1425 else
1426 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1427
1428
1429 mlx5e_build_rq_params(mdev, params);
1430
1431
1432 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1433 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1434
1435 params->num_tc = 1;
1436 params->tunneled_offload_en = false;
1437
1438 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1439
1440
1441 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1442 }
1443
1444 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1445 {
1446 struct mlx5e_priv *priv = netdev_priv(netdev);
1447 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1448 struct mlx5_eswitch_rep *rep = rpriv->rep;
1449 struct mlx5_core_dev *mdev = priv->mdev;
1450
1451 if (rep->vport == MLX5_VPORT_UPLINK) {
1452 SET_NETDEV_DEV(netdev, mdev->device);
1453 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1454
1455 mlx5_query_mac_address(mdev, netdev->dev_addr);
1456 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1457 #ifdef CONFIG_MLX5_CORE_EN_DCB
1458 if (MLX5_CAP_GEN(mdev, qos))
1459 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1460 #endif
1461 } else {
1462 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1463 eth_hw_addr_random(netdev);
1464 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1465 }
1466
1467 netdev->watchdog_timeo = 15 * HZ;
1468
1469 netdev->features |= NETIF_F_NETNS_LOCAL;
1470
1471 netdev->hw_features |= NETIF_F_HW_TC;
1472 netdev->hw_features |= NETIF_F_SG;
1473 netdev->hw_features |= NETIF_F_IP_CSUM;
1474 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1475 netdev->hw_features |= NETIF_F_GRO;
1476 netdev->hw_features |= NETIF_F_TSO;
1477 netdev->hw_features |= NETIF_F_TSO6;
1478 netdev->hw_features |= NETIF_F_RXCSUM;
1479
1480 if (rep->vport == MLX5_VPORT_UPLINK)
1481 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1482 else
1483 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1484
1485 netdev->features |= netdev->hw_features;
1486 }
1487
1488 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1489 struct net_device *netdev,
1490 const struct mlx5e_profile *profile,
1491 void *ppriv)
1492 {
1493 struct mlx5e_priv *priv = netdev_priv(netdev);
1494 int err;
1495
1496 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1497 if (err)
1498 return err;
1499
1500 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1501
1502 mlx5e_build_rep_params(netdev);
1503 mlx5e_build_rep_netdev(netdev);
1504
1505 mlx5e_timestamp_init(priv);
1506
1507 return 0;
1508 }
1509
1510 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1511 {
1512 mlx5e_netdev_cleanup(priv->netdev, priv);
1513 }
1514
1515 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1516 {
1517 struct ttc_params ttc_params = {};
1518 int tt, err;
1519
1520 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1521 MLX5_FLOW_NAMESPACE_KERNEL);
1522
1523
1524 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1525 mlx5e_set_ttc_ft_params(&ttc_params);
1526 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1527 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1528
1529 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1530 if (err) {
1531 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1532 return err;
1533 }
1534 return 0;
1535 }
1536
1537 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1538 {
1539 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1540 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1541 struct mlx5_eswitch_rep *rep = rpriv->rep;
1542 struct mlx5_flow_handle *flow_rule;
1543 struct mlx5_flow_destination dest;
1544
1545 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1546 dest.tir_num = priv->direct_tir[0].tirn;
1547 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1548 rep->vport,
1549 &dest);
1550 if (IS_ERR(flow_rule))
1551 return PTR_ERR(flow_rule);
1552 rpriv->vport_rx_rule = flow_rule;
1553 return 0;
1554 }
1555
1556 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1557 {
1558 struct mlx5_core_dev *mdev = priv->mdev;
1559 int err;
1560
1561 mlx5e_init_l2_addr(priv);
1562
1563 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1564 if (err) {
1565 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1566 return err;
1567 }
1568
1569 err = mlx5e_create_indirect_rqt(priv);
1570 if (err)
1571 goto err_close_drop_rq;
1572
1573 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
1574 if (err)
1575 goto err_destroy_indirect_rqts;
1576
1577 err = mlx5e_create_indirect_tirs(priv, false);
1578 if (err)
1579 goto err_destroy_direct_rqts;
1580
1581 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
1582 if (err)
1583 goto err_destroy_indirect_tirs;
1584
1585 err = mlx5e_create_rep_ttc_table(priv);
1586 if (err)
1587 goto err_destroy_direct_tirs;
1588
1589 err = mlx5e_create_rep_vport_rx_rule(priv);
1590 if (err)
1591 goto err_destroy_ttc_table;
1592
1593 return 0;
1594
1595 err_destroy_ttc_table:
1596 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1597 err_destroy_direct_tirs:
1598 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1599 err_destroy_indirect_tirs:
1600 mlx5e_destroy_indirect_tirs(priv);
1601 err_destroy_direct_rqts:
1602 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1603 err_destroy_indirect_rqts:
1604 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1605 err_close_drop_rq:
1606 mlx5e_close_drop_rq(&priv->drop_rq);
1607 return err;
1608 }
1609
1610 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1611 {
1612 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1613
1614 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1615 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1616 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1617 mlx5e_destroy_indirect_tirs(priv);
1618 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1619 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1620 mlx5e_close_drop_rq(&priv->drop_rq);
1621 }
1622
1623 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1624 {
1625 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1626 struct mlx5_rep_uplink_priv *uplink_priv;
1627 int err;
1628
1629 err = mlx5e_create_tises(priv);
1630 if (err) {
1631 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1632 return err;
1633 }
1634
1635 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1636 uplink_priv = &rpriv->uplink_priv;
1637
1638 mutex_init(&uplink_priv->unready_flows_lock);
1639 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1640
1641
1642 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1643 if (err)
1644 goto destroy_tises;
1645
1646 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1647
1648
1649 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1650 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1651 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1652 if (err) {
1653 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1654 goto tc_esw_cleanup;
1655 }
1656 }
1657
1658 return 0;
1659
1660 tc_esw_cleanup:
1661 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1662 destroy_tises:
1663 mlx5e_destroy_tises(priv);
1664 return err;
1665 }
1666
1667 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1668 {
1669 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1670
1671 mlx5e_destroy_tises(priv);
1672
1673 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1674
1675 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1676 mlx5e_rep_indr_clean_block_privs(rpriv);
1677
1678
1679 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1680 mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
1681 }
1682 }
1683
1684 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1685 {
1686 mlx5e_set_netdev_mtu_boundaries(priv);
1687 }
1688
1689 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1690 {
1691 return 0;
1692 }
1693
1694 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1695 {
1696 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1697
1698 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1699 struct mlx5_eqe *eqe = data;
1700
1701 switch (eqe->sub_type) {
1702 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1703 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1704 queue_work(priv->wq, &priv->update_carrier_work);
1705 break;
1706 default:
1707 return NOTIFY_DONE;
1708 }
1709
1710 return NOTIFY_OK;
1711 }
1712
1713 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1714 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1715
1716 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1717
1718 return NOTIFY_OK;
1719 }
1720
1721 return NOTIFY_DONE;
1722 }
1723
1724 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1725 {
1726 struct net_device *netdev = priv->netdev;
1727 struct mlx5_core_dev *mdev = priv->mdev;
1728 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1729 u16 max_mtu;
1730
1731 netdev->min_mtu = ETH_MIN_MTU;
1732 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1733 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1734 mlx5e_set_dev_port_mtu(priv);
1735
1736 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1737 mlx5e_tc_reoffload_flows_work);
1738
1739 mlx5_lag_add(mdev, netdev);
1740 priv->events_nb.notifier_call = uplink_rep_async_event;
1741 mlx5_notifier_register(mdev, &priv->events_nb);
1742 #ifdef CONFIG_MLX5_CORE_EN_DCB
1743 mlx5e_dcbnl_initialize(priv);
1744 mlx5e_dcbnl_init_app(priv);
1745 #endif
1746 }
1747
1748 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1749 {
1750 struct mlx5_core_dev *mdev = priv->mdev;
1751 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1752
1753 #ifdef CONFIG_MLX5_CORE_EN_DCB
1754 mlx5e_dcbnl_delete_app(priv);
1755 #endif
1756 mlx5_notifier_unregister(mdev, &priv->events_nb);
1757 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1758 mlx5_lag_remove(mdev);
1759 }
1760
1761 static const struct mlx5e_profile mlx5e_rep_profile = {
1762 .init = mlx5e_init_rep,
1763 .cleanup = mlx5e_cleanup_rep,
1764 .init_rx = mlx5e_init_rep_rx,
1765 .cleanup_rx = mlx5e_cleanup_rep_rx,
1766 .init_tx = mlx5e_init_rep_tx,
1767 .cleanup_tx = mlx5e_cleanup_rep_tx,
1768 .enable = mlx5e_rep_enable,
1769 .update_rx = mlx5e_update_rep_rx,
1770 .update_stats = mlx5e_rep_update_hw_counters,
1771 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1772 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1773 .max_tc = 1,
1774 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1775 };
1776
1777 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1778 .init = mlx5e_init_rep,
1779 .cleanup = mlx5e_cleanup_rep,
1780 .init_rx = mlx5e_init_rep_rx,
1781 .cleanup_rx = mlx5e_cleanup_rep_rx,
1782 .init_tx = mlx5e_init_rep_tx,
1783 .cleanup_tx = mlx5e_cleanup_rep_tx,
1784 .enable = mlx5e_uplink_rep_enable,
1785 .disable = mlx5e_uplink_rep_disable,
1786 .update_rx = mlx5e_update_rep_rx,
1787 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1788 .update_carrier = mlx5e_update_carrier,
1789 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1790 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1791 .max_tc = MLX5E_MAX_NUM_TC,
1792 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1793 };
1794
1795 static bool
1796 is_devlink_port_supported(const struct mlx5_core_dev *dev,
1797 const struct mlx5e_rep_priv *rpriv)
1798 {
1799 return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1800 rpriv->rep->vport == MLX5_VPORT_PF ||
1801 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1802 }
1803
1804 static unsigned int
1805 vport_to_devlink_port_index(const struct mlx5_core_dev *dev, u16 vport_num)
1806 {
1807 return (MLX5_CAP_GEN(dev, vhca_id) << 16) | vport_num;
1808 }
1809
1810 static int register_devlink_port(struct mlx5_core_dev *dev,
1811 struct mlx5e_rep_priv *rpriv)
1812 {
1813 struct devlink *devlink = priv_to_devlink(dev);
1814 struct mlx5_eswitch_rep *rep = rpriv->rep;
1815 struct netdev_phys_item_id ppid = {};
1816 unsigned int dl_port_index = 0;
1817 u16 pfnum;
1818
1819 if (!is_devlink_port_supported(dev, rpriv))
1820 return 0;
1821
1822 mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
1823 pfnum = PCI_FUNC(dev->pdev->devfn);
1824
1825 if (rep->vport == MLX5_VPORT_UPLINK) {
1826 devlink_port_attrs_set(&rpriv->dl_port,
1827 DEVLINK_PORT_FLAVOUR_PHYSICAL,
1828 pfnum, false, 0,
1829 &ppid.id[0], ppid.id_len);
1830 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1831 } else if (rep->vport == MLX5_VPORT_PF) {
1832 devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
1833 &ppid.id[0], ppid.id_len,
1834 pfnum);
1835 dl_port_index = rep->vport;
1836 } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch,
1837 rpriv->rep->vport)) {
1838 devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
1839 &ppid.id[0], ppid.id_len,
1840 pfnum, rep->vport - 1);
1841 dl_port_index = vport_to_devlink_port_index(dev, rep->vport);
1842 }
1843
1844 return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
1845 }
1846
1847 static void unregister_devlink_port(struct mlx5_core_dev *dev,
1848 struct mlx5e_rep_priv *rpriv)
1849 {
1850 if (is_devlink_port_supported(dev, rpriv))
1851 devlink_port_unregister(&rpriv->dl_port);
1852 }
1853
1854
1855 static int
1856 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1857 {
1858 const struct mlx5e_profile *profile;
1859 struct mlx5e_rep_priv *rpriv;
1860 struct net_device *netdev;
1861 int nch, err;
1862
1863 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1864 if (!rpriv)
1865 return -ENOMEM;
1866
1867
1868 rpriv->rep = rep;
1869
1870 nch = mlx5e_get_max_num_channels(dev);
1871 profile = (rep->vport == MLX5_VPORT_UPLINK) ?
1872 &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
1873 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
1874 if (!netdev) {
1875 pr_warn("Failed to create representor netdev for vport %d\n",
1876 rep->vport);
1877 kfree(rpriv);
1878 return -EINVAL;
1879 }
1880
1881 rpriv->netdev = netdev;
1882 rep->rep_data[REP_ETH].priv = rpriv;
1883 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1884
1885 if (rep->vport == MLX5_VPORT_UPLINK) {
1886 err = mlx5e_create_mdev_resources(dev);
1887 if (err)
1888 goto err_destroy_netdev;
1889 }
1890
1891 err = mlx5e_attach_netdev(netdev_priv(netdev));
1892 if (err) {
1893 pr_warn("Failed to attach representor netdev for vport %d\n",
1894 rep->vport);
1895 goto err_destroy_mdev_resources;
1896 }
1897
1898 err = mlx5e_rep_neigh_init(rpriv);
1899 if (err) {
1900 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1901 rep->vport);
1902 goto err_detach_netdev;
1903 }
1904
1905 err = register_devlink_port(dev, rpriv);
1906 if (err) {
1907 esw_warn(dev, "Failed to register devlink port %d\n",
1908 rep->vport);
1909 goto err_neigh_cleanup;
1910 }
1911
1912 err = register_netdev(netdev);
1913 if (err) {
1914 pr_warn("Failed to register representor netdev for vport %d\n",
1915 rep->vport);
1916 goto err_devlink_cleanup;
1917 }
1918
1919 if (is_devlink_port_supported(dev, rpriv))
1920 devlink_port_type_eth_set(&rpriv->dl_port, netdev);
1921 return 0;
1922
1923 err_devlink_cleanup:
1924 unregister_devlink_port(dev, rpriv);
1925
1926 err_neigh_cleanup:
1927 mlx5e_rep_neigh_cleanup(rpriv);
1928
1929 err_detach_netdev:
1930 mlx5e_detach_netdev(netdev_priv(netdev));
1931
1932 err_destroy_mdev_resources:
1933 if (rep->vport == MLX5_VPORT_UPLINK)
1934 mlx5e_destroy_mdev_resources(dev);
1935
1936 err_destroy_netdev:
1937 mlx5e_destroy_netdev(netdev_priv(netdev));
1938 kfree(rpriv);
1939 return err;
1940 }
1941
1942 static void
1943 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1944 {
1945 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1946 struct net_device *netdev = rpriv->netdev;
1947 struct mlx5e_priv *priv = netdev_priv(netdev);
1948 struct mlx5_core_dev *dev = priv->mdev;
1949 void *ppriv = priv->ppriv;
1950
1951 if (is_devlink_port_supported(dev, rpriv))
1952 devlink_port_type_clear(&rpriv->dl_port);
1953 unregister_netdev(netdev);
1954 unregister_devlink_port(dev, rpriv);
1955 mlx5e_rep_neigh_cleanup(rpriv);
1956 mlx5e_detach_netdev(priv);
1957 if (rep->vport == MLX5_VPORT_UPLINK)
1958 mlx5e_destroy_mdev_resources(priv->mdev);
1959 mlx5e_destroy_netdev(priv);
1960 kfree(ppriv);
1961 }
1962
1963 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1964 {
1965 struct mlx5e_rep_priv *rpriv;
1966
1967 rpriv = mlx5e_rep_to_rep_priv(rep);
1968
1969 return rpriv->netdev;
1970 }
1971
1972 static const struct mlx5_eswitch_rep_ops rep_ops = {
1973 .load = mlx5e_vport_rep_load,
1974 .unload = mlx5e_vport_rep_unload,
1975 .get_proto_dev = mlx5e_vport_rep_get_proto_dev
1976 };
1977
1978 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1979 {
1980 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1981
1982 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1983 }
1984
1985 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1986 {
1987 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1988
1989 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);
1990 }