This source file includes following definitions.
- dpaa2_eth_get_drvinfo
- dpaa2_eth_get_link_ksettings
- dpaa2_eth_get_pauseparam
- dpaa2_eth_set_pauseparam
- dpaa2_eth_get_strings
- dpaa2_eth_get_sset_count
- dpaa2_eth_get_ethtool_stats
- prep_eth_rule
- prep_uip_rule
- prep_l4_rule
- prep_ext_rule
- prep_mac_ext_rule
- prep_cls_rule
- do_cls_rule
- num_rules
- update_cls_rule
- dpaa2_eth_get_rxnfc
- dpaa2_eth_set_rxnfc
- dpaa2_eth_get_ts_info
1
2
3
4
5
6 #include <linux/net_tstamp.h>
7 #include <linux/nospec.h>
8
9 #include "dpni.h"
10 #include "dpaa2-eth.h"
11
12
13 static char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = {
14 "[hw] rx frames",
15 "[hw] rx bytes",
16 "[hw] rx mcast frames",
17 "[hw] rx mcast bytes",
18 "[hw] rx bcast frames",
19 "[hw] rx bcast bytes",
20 "[hw] tx frames",
21 "[hw] tx bytes",
22 "[hw] tx mcast frames",
23 "[hw] tx mcast bytes",
24 "[hw] tx bcast frames",
25 "[hw] tx bcast bytes",
26 "[hw] rx filtered frames",
27 "[hw] rx discarded frames",
28 "[hw] rx nobuffer discards",
29 "[hw] tx discarded frames",
30 "[hw] tx confirmed frames",
31 "[hw] tx dequeued bytes",
32 "[hw] tx dequeued frames",
33 "[hw] tx rejected bytes",
34 "[hw] tx rejected frames",
35 "[hw] tx pending frames",
36 };
37
38 #define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats)
39
40 static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = {
41
42 "[drv] tx conf frames",
43 "[drv] tx conf bytes",
44 "[drv] tx sg frames",
45 "[drv] tx sg bytes",
46 "[drv] tx realloc frames",
47 "[drv] rx sg frames",
48 "[drv] rx sg bytes",
49 "[drv] enqueue portal busy",
50
51 "[drv] dequeue portal busy",
52 "[drv] channel pull errors",
53 "[drv] cdan",
54 "[drv] xdp drop",
55 "[drv] xdp tx",
56 "[drv] xdp tx errors",
57 "[drv] xdp redirect",
58
59 "[qbman] rx pending frames",
60 "[qbman] rx pending bytes",
61 "[qbman] tx conf pending frames",
62 "[qbman] tx conf pending bytes",
63 "[qbman] buffer count",
64 };
65
66 #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras)
67
68 static void dpaa2_eth_get_drvinfo(struct net_device *net_dev,
69 struct ethtool_drvinfo *drvinfo)
70 {
71 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
72
73 strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
74
75 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
76 "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor);
77
78 strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
79 sizeof(drvinfo->bus_info));
80 }
81
82 static int
83 dpaa2_eth_get_link_ksettings(struct net_device *net_dev,
84 struct ethtool_link_ksettings *link_settings)
85 {
86 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
87
88 link_settings->base.autoneg = AUTONEG_DISABLE;
89 if (!(priv->link_state.options & DPNI_LINK_OPT_HALF_DUPLEX))
90 link_settings->base.duplex = DUPLEX_FULL;
91 link_settings->base.speed = priv->link_state.rate;
92
93 return 0;
94 }
95
96 static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
97 struct ethtool_pauseparam *pause)
98 {
99 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
100 u64 link_options = priv->link_state.options;
101
102 pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
103 pause->tx_pause = pause->rx_pause ^
104 !!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
105 pause->autoneg = AUTONEG_DISABLE;
106 }
107
108 static int dpaa2_eth_set_pauseparam(struct net_device *net_dev,
109 struct ethtool_pauseparam *pause)
110 {
111 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
112 struct dpni_link_cfg cfg = {0};
113 int err;
114
115 if (!dpaa2_eth_has_pause_support(priv)) {
116 netdev_info(net_dev, "No pause frame support for DPNI version < %d.%d\n",
117 DPNI_PAUSE_VER_MAJOR, DPNI_PAUSE_VER_MINOR);
118 return -EOPNOTSUPP;
119 }
120
121 if (pause->autoneg)
122 return -EOPNOTSUPP;
123
124 cfg.rate = priv->link_state.rate;
125 cfg.options = priv->link_state.options;
126 if (pause->rx_pause)
127 cfg.options |= DPNI_LINK_OPT_PAUSE;
128 else
129 cfg.options &= ~DPNI_LINK_OPT_PAUSE;
130 if (!!pause->rx_pause ^ !!pause->tx_pause)
131 cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
132 else
133 cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
134
135 if (cfg.options == priv->link_state.options)
136 return 0;
137
138 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg);
139 if (err) {
140 netdev_err(net_dev, "dpni_set_link_state failed\n");
141 return err;
142 }
143
144 priv->link_state.options = cfg.options;
145
146 return 0;
147 }
148
149 static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset,
150 u8 *data)
151 {
152 u8 *p = data;
153 int i;
154
155 switch (stringset) {
156 case ETH_SS_STATS:
157 for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) {
158 strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN);
159 p += ETH_GSTRING_LEN;
160 }
161 for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) {
162 strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN);
163 p += ETH_GSTRING_LEN;
164 }
165 break;
166 }
167 }
168
169 static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset)
170 {
171 switch (sset) {
172 case ETH_SS_STATS:
173 return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS;
174 default:
175 return -EOPNOTSUPP;
176 }
177 }
178
179
180
181 static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
182 struct ethtool_stats *stats,
183 u64 *data)
184 {
185 int i = 0;
186 int j, k, err;
187 int num_cnt;
188 union dpni_statistics dpni_stats;
189 u32 fcnt, bcnt;
190 u32 fcnt_rx_total = 0, fcnt_tx_total = 0;
191 u32 bcnt_rx_total = 0, bcnt_tx_total = 0;
192 u32 buf_cnt;
193 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
194 struct dpaa2_eth_drv_stats *extras;
195 struct dpaa2_eth_ch_stats *ch_stats;
196 int dpni_stats_page_size[DPNI_STATISTICS_CNT] = {
197 sizeof(dpni_stats.page_0),
198 sizeof(dpni_stats.page_1),
199 sizeof(dpni_stats.page_2),
200 sizeof(dpni_stats.page_3),
201 sizeof(dpni_stats.page_4),
202 sizeof(dpni_stats.page_5),
203 sizeof(dpni_stats.page_6),
204 };
205
206 memset(data, 0,
207 sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS));
208
209
210 for (j = 0; j <= 6; j++) {
211
212 if (j == 4 || j == 5)
213 continue;
214 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token,
215 j, &dpni_stats);
216 if (err == -EINVAL)
217
218 memset(&dpni_stats, 0, sizeof(dpni_stats));
219 else if (err)
220 netdev_warn(net_dev, "dpni_get_stats(%d) failed\n", j);
221
222 num_cnt = dpni_stats_page_size[j] / sizeof(u64);
223 for (k = 0; k < num_cnt; k++)
224 *(data + i++) = dpni_stats.raw.counter[k];
225 }
226
227
228 for_each_online_cpu(k) {
229 extras = per_cpu_ptr(priv->percpu_extras, k);
230 for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++)
231 *((__u64 *)data + i + j) += *((__u64 *)extras + j);
232 }
233 i += j;
234
235
236 for (k = 0; k < priv->num_channels; k++) {
237 ch_stats = &priv->channel[k]->stats;
238 for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++)
239 *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j);
240 }
241 i += j;
242
243 for (j = 0; j < priv->num_fqs; j++) {
244
245 err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid,
246 &fcnt, &bcnt);
247 if (err) {
248 netdev_warn(net_dev, "FQ query error %d", err);
249 return;
250 }
251
252 if (priv->fq[j].type == DPAA2_TX_CONF_FQ) {
253 fcnt_tx_total += fcnt;
254 bcnt_tx_total += bcnt;
255 } else {
256 fcnt_rx_total += fcnt;
257 bcnt_rx_total += bcnt;
258 }
259 }
260
261 *(data + i++) = fcnt_rx_total;
262 *(data + i++) = bcnt_rx_total;
263 *(data + i++) = fcnt_tx_total;
264 *(data + i++) = bcnt_tx_total;
265
266 err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt);
267 if (err) {
268 netdev_warn(net_dev, "Buffer count query error %d\n", err);
269 return;
270 }
271 *(data + i++) = buf_cnt;
272 }
273
274 static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
275 void *key, void *mask, u64 *fields)
276 {
277 int off;
278
279 if (eth_mask->h_proto) {
280 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
281 *(__be16 *)(key + off) = eth_value->h_proto;
282 *(__be16 *)(mask + off) = eth_mask->h_proto;
283 *fields |= DPAA2_ETH_DIST_ETHTYPE;
284 }
285
286 if (!is_zero_ether_addr(eth_mask->h_source)) {
287 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_SA);
288 ether_addr_copy(key + off, eth_value->h_source);
289 ether_addr_copy(mask + off, eth_mask->h_source);
290 *fields |= DPAA2_ETH_DIST_ETHSRC;
291 }
292
293 if (!is_zero_ether_addr(eth_mask->h_dest)) {
294 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
295 ether_addr_copy(key + off, eth_value->h_dest);
296 ether_addr_copy(mask + off, eth_mask->h_dest);
297 *fields |= DPAA2_ETH_DIST_ETHDST;
298 }
299
300 return 0;
301 }
302
303 static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
304 struct ethtool_usrip4_spec *uip_mask,
305 void *key, void *mask, u64 *fields)
306 {
307 int off;
308 u32 tmp_value, tmp_mask;
309
310 if (uip_mask->tos || uip_mask->ip_ver)
311 return -EOPNOTSUPP;
312
313 if (uip_mask->ip4src) {
314 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
315 *(__be32 *)(key + off) = uip_value->ip4src;
316 *(__be32 *)(mask + off) = uip_mask->ip4src;
317 *fields |= DPAA2_ETH_DIST_IPSRC;
318 }
319
320 if (uip_mask->ip4dst) {
321 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
322 *(__be32 *)(key + off) = uip_value->ip4dst;
323 *(__be32 *)(mask + off) = uip_mask->ip4dst;
324 *fields |= DPAA2_ETH_DIST_IPDST;
325 }
326
327 if (uip_mask->proto) {
328 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
329 *(u8 *)(key + off) = uip_value->proto;
330 *(u8 *)(mask + off) = uip_mask->proto;
331 *fields |= DPAA2_ETH_DIST_IPPROTO;
332 }
333
334 if (uip_mask->l4_4_bytes) {
335 tmp_value = be32_to_cpu(uip_value->l4_4_bytes);
336 tmp_mask = be32_to_cpu(uip_mask->l4_4_bytes);
337
338 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
339 *(__be16 *)(key + off) = htons(tmp_value >> 16);
340 *(__be16 *)(mask + off) = htons(tmp_mask >> 16);
341 *fields |= DPAA2_ETH_DIST_L4SRC;
342
343 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
344 *(__be16 *)(key + off) = htons(tmp_value & 0xFFFF);
345 *(__be16 *)(mask + off) = htons(tmp_mask & 0xFFFF);
346 *fields |= DPAA2_ETH_DIST_L4DST;
347 }
348
349
350 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
351 *(__be16 *)(key + off) = htons(ETH_P_IP);
352 *(__be16 *)(mask + off) = htons(0xFFFF);
353 *fields |= DPAA2_ETH_DIST_ETHTYPE;
354
355 return 0;
356 }
357
358 static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
359 struct ethtool_tcpip4_spec *l4_mask,
360 void *key, void *mask, u8 l4_proto, u64 *fields)
361 {
362 int off;
363
364 if (l4_mask->tos)
365 return -EOPNOTSUPP;
366
367 if (l4_mask->ip4src) {
368 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_SRC);
369 *(__be32 *)(key + off) = l4_value->ip4src;
370 *(__be32 *)(mask + off) = l4_mask->ip4src;
371 *fields |= DPAA2_ETH_DIST_IPSRC;
372 }
373
374 if (l4_mask->ip4dst) {
375 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_DST);
376 *(__be32 *)(key + off) = l4_value->ip4dst;
377 *(__be32 *)(mask + off) = l4_mask->ip4dst;
378 *fields |= DPAA2_ETH_DIST_IPDST;
379 }
380
381 if (l4_mask->psrc) {
382 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
383 *(__be16 *)(key + off) = l4_value->psrc;
384 *(__be16 *)(mask + off) = l4_mask->psrc;
385 *fields |= DPAA2_ETH_DIST_L4SRC;
386 }
387
388 if (l4_mask->pdst) {
389 off = dpaa2_eth_cls_fld_off(NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
390 *(__be16 *)(key + off) = l4_value->pdst;
391 *(__be16 *)(mask + off) = l4_mask->pdst;
392 *fields |= DPAA2_ETH_DIST_L4DST;
393 }
394
395
396 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_TYPE);
397 *(__be16 *)(key + off) = htons(ETH_P_IP);
398 *(__be16 *)(mask + off) = htons(0xFFFF);
399 *fields |= DPAA2_ETH_DIST_ETHTYPE;
400
401 off = dpaa2_eth_cls_fld_off(NET_PROT_IP, NH_FLD_IP_PROTO);
402 *(u8 *)(key + off) = l4_proto;
403 *(u8 *)(mask + off) = 0xFF;
404 *fields |= DPAA2_ETH_DIST_IPPROTO;
405
406 return 0;
407 }
408
409 static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
410 struct ethtool_flow_ext *ext_mask,
411 void *key, void *mask, u64 *fields)
412 {
413 int off;
414
415 if (ext_mask->vlan_etype)
416 return -EOPNOTSUPP;
417
418 if (ext_mask->vlan_tci) {
419 off = dpaa2_eth_cls_fld_off(NET_PROT_VLAN, NH_FLD_VLAN_TCI);
420 *(__be16 *)(key + off) = ext_value->vlan_tci;
421 *(__be16 *)(mask + off) = ext_mask->vlan_tci;
422 *fields |= DPAA2_ETH_DIST_VLAN;
423 }
424
425 return 0;
426 }
427
428 static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
429 struct ethtool_flow_ext *ext_mask,
430 void *key, void *mask, u64 *fields)
431 {
432 int off;
433
434 if (!is_zero_ether_addr(ext_mask->h_dest)) {
435 off = dpaa2_eth_cls_fld_off(NET_PROT_ETH, NH_FLD_ETH_DA);
436 ether_addr_copy(key + off, ext_value->h_dest);
437 ether_addr_copy(mask + off, ext_mask->h_dest);
438 *fields |= DPAA2_ETH_DIST_ETHDST;
439 }
440
441 return 0;
442 }
443
444 static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
445 u64 *fields)
446 {
447 int err;
448
449 switch (fs->flow_type & 0xFF) {
450 case ETHER_FLOW:
451 err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
452 key, mask, fields);
453 break;
454 case IP_USER_FLOW:
455 err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
456 &fs->m_u.usr_ip4_spec, key, mask, fields);
457 break;
458 case TCP_V4_FLOW:
459 err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
460 key, mask, IPPROTO_TCP, fields);
461 break;
462 case UDP_V4_FLOW:
463 err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
464 key, mask, IPPROTO_UDP, fields);
465 break;
466 case SCTP_V4_FLOW:
467 err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
468 &fs->m_u.sctp_ip4_spec, key, mask,
469 IPPROTO_SCTP, fields);
470 break;
471 default:
472 return -EOPNOTSUPP;
473 }
474
475 if (err)
476 return err;
477
478 if (fs->flow_type & FLOW_EXT) {
479 err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
480 if (err)
481 return err;
482 }
483
484 if (fs->flow_type & FLOW_MAC_EXT) {
485 err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
486 fields);
487 if (err)
488 return err;
489 }
490
491 return 0;
492 }
493
494 static int do_cls_rule(struct net_device *net_dev,
495 struct ethtool_rx_flow_spec *fs,
496 bool add)
497 {
498 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
499 struct device *dev = net_dev->dev.parent;
500 struct dpni_rule_cfg rule_cfg = { 0 };
501 struct dpni_fs_action_cfg fs_act = { 0 };
502 dma_addr_t key_iova;
503 u64 fields = 0;
504 void *key_buf;
505 int err;
506
507 if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
508 fs->ring_cookie >= dpaa2_eth_queue_count(priv))
509 return -EINVAL;
510
511 rule_cfg.key_size = dpaa2_eth_cls_key_size(DPAA2_ETH_DIST_ALL);
512
513
514 key_buf = kzalloc(rule_cfg.key_size * 2, GFP_KERNEL);
515 if (!key_buf)
516 return -ENOMEM;
517
518
519 err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
520 if (err)
521 goto free_mem;
522
523 if (!dpaa2_eth_fs_mask_enabled(priv)) {
524
525
526
527
528
529
530
531
532
533 if (!priv->rx_cls_fields) {
534 err = dpaa2_eth_set_cls(net_dev, fields);
535 if (err)
536 goto free_mem;
537
538 priv->rx_cls_fields = fields;
539 } else if (priv->rx_cls_fields != fields) {
540 netdev_err(net_dev, "No support for multiple FS keys, need to delete existing rules\n");
541 err = -EOPNOTSUPP;
542 goto free_mem;
543 }
544
545 dpaa2_eth_cls_trim_rule(key_buf, fields);
546 rule_cfg.key_size = dpaa2_eth_cls_key_size(fields);
547 }
548
549 key_iova = dma_map_single(dev, key_buf, rule_cfg.key_size * 2,
550 DMA_TO_DEVICE);
551 if (dma_mapping_error(dev, key_iova)) {
552 err = -ENOMEM;
553 goto free_mem;
554 }
555
556 rule_cfg.key_iova = key_iova;
557 if (dpaa2_eth_fs_mask_enabled(priv))
558 rule_cfg.mask_iova = key_iova + rule_cfg.key_size;
559
560 if (add) {
561 if (fs->ring_cookie == RX_CLS_FLOW_DISC)
562 fs_act.options |= DPNI_FS_OPT_DISCARD;
563 else
564 fs_act.flow_id = fs->ring_cookie;
565 err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
566 fs->location, &rule_cfg, &fs_act);
567 } else {
568 err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
569 &rule_cfg);
570 }
571
572 dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
573
574 free_mem:
575 kfree(key_buf);
576
577 return err;
578 }
579
580 static int num_rules(struct dpaa2_eth_priv *priv)
581 {
582 int i, rules = 0;
583
584 for (i = 0; i < dpaa2_eth_fs_count(priv); i++)
585 if (priv->cls_rules[i].in_use)
586 rules++;
587
588 return rules;
589 }
590
591 static int update_cls_rule(struct net_device *net_dev,
592 struct ethtool_rx_flow_spec *new_fs,
593 unsigned int location)
594 {
595 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
596 struct dpaa2_eth_cls_rule *rule;
597 int err = -EINVAL;
598
599 if (!priv->rx_cls_enabled)
600 return -EOPNOTSUPP;
601
602 if (location >= dpaa2_eth_fs_count(priv))
603 return -EINVAL;
604
605 rule = &priv->cls_rules[location];
606
607
608 if (rule->in_use) {
609 err = do_cls_rule(net_dev, &rule->fs, false);
610 if (err)
611 return err;
612
613 rule->in_use = 0;
614
615 if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
616 priv->rx_cls_fields = 0;
617 }
618
619
620 if (!new_fs)
621 return err;
622
623 err = do_cls_rule(net_dev, new_fs, true);
624 if (err)
625 return err;
626
627 rule->in_use = 1;
628 rule->fs = *new_fs;
629
630 return 0;
631 }
632
633 static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
634 struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
635 {
636 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
637 int max_rules = dpaa2_eth_fs_count(priv);
638 int i, j = 0;
639
640 switch (rxnfc->cmd) {
641 case ETHTOOL_GRXFH:
642
643
644
645
646 rxnfc->data = priv->rx_hash_fields;
647 break;
648 case ETHTOOL_GRXRINGS:
649 rxnfc->data = dpaa2_eth_queue_count(priv);
650 break;
651 case ETHTOOL_GRXCLSRLCNT:
652 rxnfc->rule_cnt = 0;
653 rxnfc->rule_cnt = num_rules(priv);
654 rxnfc->data = max_rules;
655 break;
656 case ETHTOOL_GRXCLSRULE:
657 if (rxnfc->fs.location >= max_rules)
658 return -EINVAL;
659 rxnfc->fs.location = array_index_nospec(rxnfc->fs.location,
660 max_rules);
661 if (!priv->cls_rules[rxnfc->fs.location].in_use)
662 return -EINVAL;
663 rxnfc->fs = priv->cls_rules[rxnfc->fs.location].fs;
664 break;
665 case ETHTOOL_GRXCLSRLALL:
666 for (i = 0; i < max_rules; i++) {
667 if (!priv->cls_rules[i].in_use)
668 continue;
669 if (j == rxnfc->rule_cnt)
670 return -EMSGSIZE;
671 rule_locs[j++] = i;
672 }
673 rxnfc->rule_cnt = j;
674 rxnfc->data = max_rules;
675 break;
676 default:
677 return -EOPNOTSUPP;
678 }
679
680 return 0;
681 }
682
683 static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
684 struct ethtool_rxnfc *rxnfc)
685 {
686 int err = 0;
687
688 switch (rxnfc->cmd) {
689 case ETHTOOL_SRXFH:
690 if ((rxnfc->data & DPAA2_RXH_SUPPORTED) != rxnfc->data)
691 return -EOPNOTSUPP;
692 err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
693 break;
694 case ETHTOOL_SRXCLSRLINS:
695 err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
696 break;
697 case ETHTOOL_SRXCLSRLDEL:
698 err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
699 break;
700 default:
701 err = -EOPNOTSUPP;
702 }
703
704 return err;
705 }
706
707 int dpaa2_phc_index = -1;
708 EXPORT_SYMBOL(dpaa2_phc_index);
709
710 static int dpaa2_eth_get_ts_info(struct net_device *dev,
711 struct ethtool_ts_info *info)
712 {
713 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
714 SOF_TIMESTAMPING_RX_HARDWARE |
715 SOF_TIMESTAMPING_RAW_HARDWARE;
716
717 info->phc_index = dpaa2_phc_index;
718
719 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
720 (1 << HWTSTAMP_TX_ON);
721
722 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
723 (1 << HWTSTAMP_FILTER_ALL);
724 return 0;
725 }
726
727 const struct ethtool_ops dpaa2_ethtool_ops = {
728 .get_drvinfo = dpaa2_eth_get_drvinfo,
729 .get_link = ethtool_op_get_link,
730 .get_link_ksettings = dpaa2_eth_get_link_ksettings,
731 .get_pauseparam = dpaa2_eth_get_pauseparam,
732 .set_pauseparam = dpaa2_eth_set_pauseparam,
733 .get_sset_count = dpaa2_eth_get_sset_count,
734 .get_ethtool_stats = dpaa2_eth_get_ethtool_stats,
735 .get_strings = dpaa2_eth_get_strings,
736 .get_rxnfc = dpaa2_eth_get_rxnfc,
737 .set_rxnfc = dpaa2_eth_set_rxnfc,
738 .get_ts_info = dpaa2_eth_get_ts_info,
739 };