This source file includes following definitions.
- nfp_repr_get_locked
- nfp_repr_inc_tx_stats
- nfp_repr_inc_rx_stats
- nfp_repr_phy_port_get_stats64
- nfp_repr_vnic_get_stats64
- nfp_repr_get_stats64
- nfp_repr_has_offload_stats
- nfp_repr_get_host_stats64
- nfp_repr_get_offload_stats
- nfp_repr_change_mtu
- nfp_repr_xmit
- nfp_repr_stop
- nfp_repr_open
- nfp_repr_fix_features
- nfp_repr_transfer_features
- nfp_repr_clean
- nfp_repr_init
- __nfp_repr_free
- nfp_repr_free
- nfp_repr_alloc_mqs
- nfp_repr_clean_and_free
- nfp_reprs_clean_and_free
- nfp_reprs_clean_and_free_by_type
- nfp_reprs_alloc
- nfp_reprs_resync_phys_ports
1
2
3
4 #include <linux/etherdevice.h>
5 #include <linux/io-64-nonatomic-hi-lo.h>
6 #include <linux/lockdep.h>
7 #include <net/dst_metadata.h>
8
9 #include "nfpcore/nfp_cpp.h"
10 #include "nfpcore/nfp_nsp.h"
11 #include "nfp_app.h"
12 #include "nfp_main.h"
13 #include "nfp_net.h"
14 #include "nfp_net_ctrl.h"
15 #include "nfp_net_repr.h"
16 #include "nfp_net_sriov.h"
17 #include "nfp_port.h"
18
19 struct net_device *
20 nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id)
21 {
22 return rcu_dereference_protected(set->reprs[id],
23 lockdep_is_held(&app->pf->lock));
24 }
25
26 static void
27 nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
28 int tx_status)
29 {
30 struct nfp_repr *repr = netdev_priv(netdev);
31 struct nfp_repr_pcpu_stats *stats;
32
33 if (unlikely(tx_status != NET_XMIT_SUCCESS &&
34 tx_status != NET_XMIT_CN)) {
35 this_cpu_inc(repr->stats->tx_drops);
36 return;
37 }
38
39 stats = this_cpu_ptr(repr->stats);
40 u64_stats_update_begin(&stats->syncp);
41 stats->tx_packets++;
42 stats->tx_bytes += len;
43 u64_stats_update_end(&stats->syncp);
44 }
45
46 void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
47 {
48 struct nfp_repr *repr = netdev_priv(netdev);
49 struct nfp_repr_pcpu_stats *stats;
50
51 stats = this_cpu_ptr(repr->stats);
52 u64_stats_update_begin(&stats->syncp);
53 stats->rx_packets++;
54 stats->rx_bytes += len;
55 u64_stats_update_end(&stats->syncp);
56 }
57
58 static void
59 nfp_repr_phy_port_get_stats64(struct nfp_port *port,
60 struct rtnl_link_stats64 *stats)
61 {
62 u8 __iomem *mem = port->eth_stats;
63
64 stats->tx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
65 stats->tx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
66 stats->tx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
67
68 stats->rx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
69 stats->rx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
70 stats->rx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
71 }
72
73 static void
74 nfp_repr_vnic_get_stats64(struct nfp_port *port,
75 struct rtnl_link_stats64 *stats)
76 {
77
78
79
80 stats->tx_packets = readq(port->vnic + NFP_NET_CFG_STATS_RX_FRAMES);
81 stats->tx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_RX_OCTETS);
82 stats->tx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_RX_DISCARDS);
83
84 stats->rx_packets = readq(port->vnic + NFP_NET_CFG_STATS_TX_FRAMES);
85 stats->rx_bytes = readq(port->vnic + NFP_NET_CFG_STATS_TX_OCTETS);
86 stats->rx_dropped = readq(port->vnic + NFP_NET_CFG_STATS_TX_DISCARDS);
87 }
88
89 static void
90 nfp_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
91 {
92 struct nfp_repr *repr = netdev_priv(netdev);
93
94 if (WARN_ON(!repr->port))
95 return;
96
97 switch (repr->port->type) {
98 case NFP_PORT_PHYS_PORT:
99 if (!__nfp_port_get_eth_port(repr->port))
100 break;
101 nfp_repr_phy_port_get_stats64(repr->port, stats);
102 break;
103 case NFP_PORT_PF_PORT:
104 case NFP_PORT_VF_PORT:
105 nfp_repr_vnic_get_stats64(repr->port, stats);
106 default:
107 break;
108 }
109 }
110
111 static bool
112 nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
113 {
114 switch (attr_id) {
115 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
116 return true;
117 }
118
119 return false;
120 }
121
122 static int
123 nfp_repr_get_host_stats64(const struct net_device *netdev,
124 struct rtnl_link_stats64 *stats)
125 {
126 struct nfp_repr *repr = netdev_priv(netdev);
127 int i;
128
129 for_each_possible_cpu(i) {
130 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
131 struct nfp_repr_pcpu_stats *repr_stats;
132 unsigned int start;
133
134 repr_stats = per_cpu_ptr(repr->stats, i);
135 do {
136 start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
137 tbytes = repr_stats->tx_bytes;
138 tpkts = repr_stats->tx_packets;
139 tdrops = repr_stats->tx_drops;
140 rbytes = repr_stats->rx_bytes;
141 rpkts = repr_stats->rx_packets;
142 } while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
143
144 stats->tx_bytes += tbytes;
145 stats->tx_packets += tpkts;
146 stats->tx_dropped += tdrops;
147 stats->rx_bytes += rbytes;
148 stats->rx_packets += rpkts;
149 }
150
151 return 0;
152 }
153
154 static int
155 nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
156 void *stats)
157 {
158 switch (attr_id) {
159 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
160 return nfp_repr_get_host_stats64(dev, stats);
161 }
162
163 return -EINVAL;
164 }
165
166 static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
167 {
168 struct nfp_repr *repr = netdev_priv(netdev);
169 int err;
170
171 err = nfp_app_check_mtu(repr->app, netdev, new_mtu);
172 if (err)
173 return err;
174
175 err = nfp_app_repr_change_mtu(repr->app, netdev, new_mtu);
176 if (err)
177 return err;
178
179 netdev->mtu = new_mtu;
180
181 return 0;
182 }
183
184 static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
185 {
186 struct nfp_repr *repr = netdev_priv(netdev);
187 unsigned int len = skb->len;
188 int ret;
189
190 skb_dst_drop(skb);
191 dst_hold((struct dst_entry *)repr->dst);
192 skb_dst_set(skb, (struct dst_entry *)repr->dst);
193 skb->dev = repr->dst->u.port_info.lower_dev;
194
195 ret = dev_queue_xmit(skb);
196 nfp_repr_inc_tx_stats(netdev, len, ret);
197
198 return NETDEV_TX_OK;
199 }
200
201 static int nfp_repr_stop(struct net_device *netdev)
202 {
203 struct nfp_repr *repr = netdev_priv(netdev);
204 int err;
205
206 err = nfp_app_repr_stop(repr->app, repr);
207 if (err)
208 return err;
209
210 nfp_port_configure(netdev, false);
211 return 0;
212 }
213
214 static int nfp_repr_open(struct net_device *netdev)
215 {
216 struct nfp_repr *repr = netdev_priv(netdev);
217 int err;
218
219 err = nfp_port_configure(netdev, true);
220 if (err)
221 return err;
222
223 err = nfp_app_repr_open(repr->app, repr);
224 if (err)
225 goto err_port_disable;
226
227 return 0;
228
229 err_port_disable:
230 nfp_port_configure(netdev, false);
231 return err;
232 }
233
234 static netdev_features_t
235 nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features)
236 {
237 struct nfp_repr *repr = netdev_priv(netdev);
238 netdev_features_t old_features = features;
239 netdev_features_t lower_features;
240 struct net_device *lower_dev;
241
242 lower_dev = repr->dst->u.port_info.lower_dev;
243
244 lower_features = lower_dev->features;
245 if (lower_features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
246 lower_features |= NETIF_F_HW_CSUM;
247
248 features = netdev_intersect_features(features, lower_features);
249 features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC);
250 features |= NETIF_F_LLTX;
251
252 return features;
253 }
254
255 const struct net_device_ops nfp_repr_netdev_ops = {
256 .ndo_init = nfp_app_ndo_init,
257 .ndo_uninit = nfp_app_ndo_uninit,
258 .ndo_open = nfp_repr_open,
259 .ndo_stop = nfp_repr_stop,
260 .ndo_start_xmit = nfp_repr_xmit,
261 .ndo_change_mtu = nfp_repr_change_mtu,
262 .ndo_get_stats64 = nfp_repr_get_stats64,
263 .ndo_has_offload_stats = nfp_repr_has_offload_stats,
264 .ndo_get_offload_stats = nfp_repr_get_offload_stats,
265 .ndo_get_phys_port_name = nfp_port_get_phys_port_name,
266 .ndo_setup_tc = nfp_port_setup_tc,
267 .ndo_set_vf_mac = nfp_app_set_vf_mac,
268 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
269 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
270 .ndo_set_vf_trust = nfp_app_set_vf_trust,
271 .ndo_get_vf_config = nfp_app_get_vf_config,
272 .ndo_set_vf_link_state = nfp_app_set_vf_link_state,
273 .ndo_fix_features = nfp_repr_fix_features,
274 .ndo_set_features = nfp_port_set_features,
275 .ndo_set_mac_address = eth_mac_addr,
276 .ndo_get_port_parent_id = nfp_port_get_port_parent_id,
277 .ndo_get_devlink_port = nfp_devlink_get_devlink_port,
278 };
279
280 void
281 nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower)
282 {
283 struct nfp_repr *repr = netdev_priv(netdev);
284
285 if (repr->dst->u.port_info.lower_dev != lower)
286 return;
287
288 netdev->gso_max_size = lower->gso_max_size;
289 netdev->gso_max_segs = lower->gso_max_segs;
290
291 netdev_update_features(netdev);
292 }
293
294 static void nfp_repr_clean(struct nfp_repr *repr)
295 {
296 unregister_netdev(repr->netdev);
297 nfp_app_repr_clean(repr->app, repr->netdev);
298 dst_release((struct dst_entry *)repr->dst);
299 nfp_port_free(repr->port);
300 }
301
302 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
303 u32 cmsg_port_id, struct nfp_port *port,
304 struct net_device *pf_netdev)
305 {
306 struct nfp_repr *repr = netdev_priv(netdev);
307 struct nfp_net *nn = netdev_priv(pf_netdev);
308 u32 repr_cap = nn->tlv_caps.repr_cap;
309 int err;
310
311 repr->port = port;
312 repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
313 if (!repr->dst)
314 return -ENOMEM;
315 repr->dst->u.port_info.port_id = cmsg_port_id;
316 repr->dst->u.port_info.lower_dev = pf_netdev;
317
318 netdev->netdev_ops = &nfp_repr_netdev_ops;
319 netdev->ethtool_ops = &nfp_port_ethtool_ops;
320
321 netdev->max_mtu = pf_netdev->max_mtu;
322
323
324 if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR)
325 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
326
327 netdev->hw_features = NETIF_F_HIGHDMA;
328 if (repr_cap & NFP_NET_CFG_CTRL_RXCSUM_ANY)
329 netdev->hw_features |= NETIF_F_RXCSUM;
330 if (repr_cap & NFP_NET_CFG_CTRL_TXCSUM)
331 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
332 if (repr_cap & NFP_NET_CFG_CTRL_GATHER)
333 netdev->hw_features |= NETIF_F_SG;
334 if ((repr_cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) ||
335 repr_cap & NFP_NET_CFG_CTRL_LSO2)
336 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
337 if (repr_cap & NFP_NET_CFG_CTRL_RSS_ANY)
338 netdev->hw_features |= NETIF_F_RXHASH;
339 if (repr_cap & NFP_NET_CFG_CTRL_VXLAN) {
340 if (repr_cap & NFP_NET_CFG_CTRL_LSO)
341 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
342 }
343 if (repr_cap & NFP_NET_CFG_CTRL_NVGRE) {
344 if (repr_cap & NFP_NET_CFG_CTRL_LSO)
345 netdev->hw_features |= NETIF_F_GSO_GRE;
346 }
347 if (repr_cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE))
348 netdev->hw_enc_features = netdev->hw_features;
349
350 netdev->vlan_features = netdev->hw_features;
351
352 if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN)
353 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
354 if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) {
355 if (repr_cap & NFP_NET_CFG_CTRL_LSO2)
356 netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
357 else
358 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
359 }
360 if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER)
361 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
362
363 netdev->features = netdev->hw_features;
364
365
366 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
367 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
368
369 netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
370 netdev->features |= NETIF_F_LLTX;
371
372 if (nfp_app_has_tc(app)) {
373 netdev->features |= NETIF_F_HW_TC;
374 netdev->hw_features |= NETIF_F_HW_TC;
375 }
376
377 err = nfp_app_repr_init(app, netdev);
378 if (err)
379 goto err_clean;
380
381 err = register_netdev(netdev);
382 if (err)
383 goto err_repr_clean;
384
385 return 0;
386
387 err_repr_clean:
388 nfp_app_repr_clean(app, netdev);
389 err_clean:
390 dst_release((struct dst_entry *)repr->dst);
391 return err;
392 }
393
394 static void __nfp_repr_free(struct nfp_repr *repr)
395 {
396 free_percpu(repr->stats);
397 free_netdev(repr->netdev);
398 }
399
400 void nfp_repr_free(struct net_device *netdev)
401 {
402 __nfp_repr_free(netdev_priv(netdev));
403 }
404
405 struct net_device *
406 nfp_repr_alloc_mqs(struct nfp_app *app, unsigned int txqs, unsigned int rxqs)
407 {
408 struct net_device *netdev;
409 struct nfp_repr *repr;
410
411 netdev = alloc_etherdev_mqs(sizeof(*repr), txqs, rxqs);
412 if (!netdev)
413 return NULL;
414
415 netif_carrier_off(netdev);
416
417 repr = netdev_priv(netdev);
418 repr->netdev = netdev;
419 repr->app = app;
420
421 repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
422 if (!repr->stats)
423 goto err_free_netdev;
424
425 return netdev;
426
427 err_free_netdev:
428 free_netdev(netdev);
429 return NULL;
430 }
431
432 void nfp_repr_clean_and_free(struct nfp_repr *repr)
433 {
434 nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
435 repr->netdev->name);
436 nfp_repr_clean(repr);
437 __nfp_repr_free(repr);
438 }
439
440 void nfp_reprs_clean_and_free(struct nfp_app *app, struct nfp_reprs *reprs)
441 {
442 struct net_device *netdev;
443 unsigned int i;
444
445 for (i = 0; i < reprs->num_reprs; i++) {
446 netdev = nfp_repr_get_locked(app, reprs, i);
447 if (netdev)
448 nfp_repr_clean_and_free(netdev_priv(netdev));
449 }
450
451 kfree(reprs);
452 }
453
454 void
455 nfp_reprs_clean_and_free_by_type(struct nfp_app *app, enum nfp_repr_type type)
456 {
457 struct net_device *netdev;
458 struct nfp_reprs *reprs;
459 int i;
460
461 reprs = rcu_dereference_protected(app->reprs[type],
462 lockdep_is_held(&app->pf->lock));
463 if (!reprs)
464 return;
465
466
467
468
469 for (i = 0; i < reprs->num_reprs; i++) {
470 netdev = nfp_repr_get_locked(app, reprs, i);
471 if (netdev)
472 nfp_app_repr_preclean(app, netdev);
473 }
474
475 reprs = nfp_app_reprs_set(app, type, NULL);
476
477 synchronize_rcu();
478 nfp_reprs_clean_and_free(app, reprs);
479 }
480
481 struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
482 {
483 struct nfp_reprs *reprs;
484
485 reprs = kzalloc(sizeof(*reprs) +
486 num_reprs * sizeof(struct net_device *), GFP_KERNEL);
487 if (!reprs)
488 return NULL;
489 reprs->num_reprs = num_reprs;
490
491 return reprs;
492 }
493
494 int nfp_reprs_resync_phys_ports(struct nfp_app *app)
495 {
496 struct net_device *netdev;
497 struct nfp_reprs *reprs;
498 struct nfp_repr *repr;
499 int i;
500
501 reprs = nfp_reprs_get_locked(app, NFP_REPR_TYPE_PHYS_PORT);
502 if (!reprs)
503 return 0;
504
505 for (i = 0; i < reprs->num_reprs; i++) {
506 netdev = nfp_repr_get_locked(app, reprs, i);
507 if (!netdev)
508 continue;
509
510 repr = netdev_priv(netdev);
511 if (repr->port->type != NFP_PORT_INVALID)
512 continue;
513
514 nfp_app_repr_preclean(app, netdev);
515 rtnl_lock();
516 rcu_assign_pointer(reprs->reprs[i], NULL);
517 rtnl_unlock();
518 synchronize_rcu();
519 nfp_repr_clean(repr);
520 }
521
522 return 0;
523 }