This source file includes following definitions.
- ef4_get_uint_stat
- ef4_get_atomic_stat
- ef4_ethtool_phys_id
- ef4_ethtool_get_link_ksettings
- ef4_ethtool_set_link_ksettings
- ef4_ethtool_get_drvinfo
- ef4_ethtool_get_regs_len
- ef4_ethtool_get_regs
- ef4_ethtool_get_msglevel
- ef4_ethtool_set_msglevel
- ef4_fill_test
- ef4_fill_loopback_test
- ef4_ethtool_fill_self_tests
- ef4_describe_per_queue_stats
- ef4_ethtool_get_sset_count
- ef4_ethtool_get_strings
- ef4_ethtool_get_stats
- ef4_ethtool_self_test
- ef4_ethtool_nway_reset
- ef4_ethtool_get_coalesce
- ef4_ethtool_set_coalesce
- ef4_ethtool_get_ringparam
- ef4_ethtool_set_ringparam
- ef4_ethtool_set_pauseparam
- ef4_ethtool_get_pauseparam
- ef4_ethtool_get_wol
- ef4_ethtool_set_wol
- ef4_ethtool_reset
- ip6_fill_mask
- ef4_ethtool_get_class_rule
- ef4_ethtool_get_rxnfc
- ip6_mask_is_full
- ip6_mask_is_empty
- ef4_ethtool_set_class_rule
- ef4_ethtool_set_rxnfc
- ef4_ethtool_get_rxfh_indir_size
- ef4_ethtool_get_rxfh
- ef4_ethtool_set_rxfh
- ef4_ethtool_get_module_eeprom
- ef4_ethtool_get_module_info
1
2
3
4
5
6
7
8 #include <linux/netdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/rtnetlink.h>
11 #include <linux/in.h>
12 #include "net_driver.h"
13 #include "workarounds.h"
14 #include "selftest.h"
15 #include "efx.h"
16 #include "filter.h"
17 #include "nic.h"
18
19 struct ef4_sw_stat_desc {
20 const char *name;
21 enum {
22 EF4_ETHTOOL_STAT_SOURCE_nic,
23 EF4_ETHTOOL_STAT_SOURCE_channel,
24 EF4_ETHTOOL_STAT_SOURCE_tx_queue
25 } source;
26 unsigned offset;
27 u64(*get_stat) (void *field);
28 };
29
30
31 #define EF4_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
32 get_stat_function) { \
33 .name = #stat_name, \
34 .source = EF4_ETHTOOL_STAT_SOURCE_##source_name, \
35 .offset = ((((field_type *) 0) == \
36 &((struct ef4_##source_name *)0)->field) ? \
37 offsetof(struct ef4_##source_name, field) : \
38 offsetof(struct ef4_##source_name, field)), \
39 .get_stat = get_stat_function, \
40 }
41
42 static u64 ef4_get_uint_stat(void *field)
43 {
44 return *(unsigned int *)field;
45 }
46
47 static u64 ef4_get_atomic_stat(void *field)
48 {
49 return atomic_read((atomic_t *) field);
50 }
51
52 #define EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
53 EF4_ETHTOOL_STAT(field, nic, field, \
54 atomic_t, ef4_get_atomic_stat)
55
56 #define EF4_ETHTOOL_UINT_CHANNEL_STAT(field) \
57 EF4_ETHTOOL_STAT(field, channel, n_##field, \
58 unsigned int, ef4_get_uint_stat)
59
60 #define EF4_ETHTOOL_UINT_TXQ_STAT(field) \
61 EF4_ETHTOOL_STAT(tx_##field, tx_queue, field, \
62 unsigned int, ef4_get_uint_stat)
63
64 static const struct ef4_sw_stat_desc ef4_sw_stat_desc[] = {
65 EF4_ETHTOOL_UINT_TXQ_STAT(merge_events),
66 EF4_ETHTOOL_UINT_TXQ_STAT(pushes),
67 EF4_ETHTOOL_UINT_TXQ_STAT(cb_packets),
68 EF4_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
69 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
70 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
71 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
72 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
73 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
74 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
75 EF4_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
76 };
77
78 #define EF4_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(ef4_sw_stat_desc)
79
80 #define EF4_ETHTOOL_EEPROM_MAGIC 0xEFAB
81
82
83
84
85
86
87
88
89
90 static int ef4_ethtool_phys_id(struct net_device *net_dev,
91 enum ethtool_phys_id_state state)
92 {
93 struct ef4_nic *efx = netdev_priv(net_dev);
94 enum ef4_led_mode mode = EF4_LED_DEFAULT;
95
96 switch (state) {
97 case ETHTOOL_ID_ON:
98 mode = EF4_LED_ON;
99 break;
100 case ETHTOOL_ID_OFF:
101 mode = EF4_LED_OFF;
102 break;
103 case ETHTOOL_ID_INACTIVE:
104 mode = EF4_LED_DEFAULT;
105 break;
106 case ETHTOOL_ID_ACTIVE:
107 return 1;
108 }
109
110 efx->type->set_id_led(efx, mode);
111 return 0;
112 }
113
114
115 static int
116 ef4_ethtool_get_link_ksettings(struct net_device *net_dev,
117 struct ethtool_link_ksettings *cmd)
118 {
119 struct ef4_nic *efx = netdev_priv(net_dev);
120 struct ef4_link_state *link_state = &efx->link_state;
121
122 mutex_lock(&efx->mac_lock);
123 efx->phy_op->get_link_ksettings(efx, cmd);
124 mutex_unlock(&efx->mac_lock);
125
126
127 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
128 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
129
130 if (LOOPBACK_INTERNAL(efx)) {
131 cmd->base.speed = link_state->speed;
132 cmd->base.duplex = link_state->fd ? DUPLEX_FULL : DUPLEX_HALF;
133 }
134
135 return 0;
136 }
137
138
139 static int
140 ef4_ethtool_set_link_ksettings(struct net_device *net_dev,
141 const struct ethtool_link_ksettings *cmd)
142 {
143 struct ef4_nic *efx = netdev_priv(net_dev);
144 int rc;
145
146
147 if ((cmd->base.speed == SPEED_1000) &&
148 (cmd->base.duplex != DUPLEX_FULL)) {
149 netif_dbg(efx, drv, efx->net_dev,
150 "rejecting unsupported 1000Mbps HD setting\n");
151 return -EINVAL;
152 }
153
154 mutex_lock(&efx->mac_lock);
155 rc = efx->phy_op->set_link_ksettings(efx, cmd);
156 mutex_unlock(&efx->mac_lock);
157 return rc;
158 }
159
160 static void ef4_ethtool_get_drvinfo(struct net_device *net_dev,
161 struct ethtool_drvinfo *info)
162 {
163 struct ef4_nic *efx = netdev_priv(net_dev);
164
165 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
166 strlcpy(info->version, EF4_DRIVER_VERSION, sizeof(info->version));
167 strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
168 }
169
170 static int ef4_ethtool_get_regs_len(struct net_device *net_dev)
171 {
172 return ef4_nic_get_regs_len(netdev_priv(net_dev));
173 }
174
175 static void ef4_ethtool_get_regs(struct net_device *net_dev,
176 struct ethtool_regs *regs, void *buf)
177 {
178 struct ef4_nic *efx = netdev_priv(net_dev);
179
180 regs->version = efx->type->revision;
181 ef4_nic_get_regs(efx, buf);
182 }
183
184 static u32 ef4_ethtool_get_msglevel(struct net_device *net_dev)
185 {
186 struct ef4_nic *efx = netdev_priv(net_dev);
187 return efx->msg_enable;
188 }
189
190 static void ef4_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
191 {
192 struct ef4_nic *efx = netdev_priv(net_dev);
193 efx->msg_enable = msg_enable;
194 }
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209 static void ef4_fill_test(unsigned int test_index, u8 *strings, u64 *data,
210 int *test, const char *unit_format, int unit_id,
211 const char *test_format, const char *test_id)
212 {
213 char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
214
215
216 if (data)
217 data[test_index] = *test;
218
219
220 if (strings) {
221 if (strchr(unit_format, '%'))
222 snprintf(unit_str, sizeof(unit_str),
223 unit_format, unit_id);
224 else
225 strcpy(unit_str, unit_format);
226 snprintf(test_str, sizeof(test_str), test_format, test_id);
227 snprintf(strings + test_index * ETH_GSTRING_LEN,
228 ETH_GSTRING_LEN,
229 "%-6s %-24s", unit_str, test_str);
230 }
231 }
232
233 #define EF4_CHANNEL_NAME(_channel) "chan%d", _channel->channel
234 #define EF4_TX_QUEUE_NAME(_tx_queue) "txq%d", _tx_queue->queue
235 #define EF4_RX_QUEUE_NAME(_rx_queue) "rxq%d", _rx_queue->queue
236 #define EF4_LOOPBACK_NAME(_mode, _counter) \
237 "loopback.%s." _counter, STRING_TABLE_LOOKUP(_mode, ef4_loopback_mode)
238
239
240
241
242
243
244
245
246
247
248
249
250
251 static int ef4_fill_loopback_test(struct ef4_nic *efx,
252 struct ef4_loopback_self_tests *lb_tests,
253 enum ef4_loopback_mode mode,
254 unsigned int test_index,
255 u8 *strings, u64 *data)
256 {
257 struct ef4_channel *channel =
258 ef4_get_channel(efx, efx->tx_channel_offset);
259 struct ef4_tx_queue *tx_queue;
260
261 ef4_for_each_channel_tx_queue(tx_queue, channel) {
262 ef4_fill_test(test_index++, strings, data,
263 &lb_tests->tx_sent[tx_queue->queue],
264 EF4_TX_QUEUE_NAME(tx_queue),
265 EF4_LOOPBACK_NAME(mode, "tx_sent"));
266 ef4_fill_test(test_index++, strings, data,
267 &lb_tests->tx_done[tx_queue->queue],
268 EF4_TX_QUEUE_NAME(tx_queue),
269 EF4_LOOPBACK_NAME(mode, "tx_done"));
270 }
271 ef4_fill_test(test_index++, strings, data,
272 &lb_tests->rx_good,
273 "rx", 0,
274 EF4_LOOPBACK_NAME(mode, "rx_good"));
275 ef4_fill_test(test_index++, strings, data,
276 &lb_tests->rx_bad,
277 "rx", 0,
278 EF4_LOOPBACK_NAME(mode, "rx_bad"));
279
280 return test_index;
281 }
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296 static int ef4_ethtool_fill_self_tests(struct ef4_nic *efx,
297 struct ef4_self_tests *tests,
298 u8 *strings, u64 *data)
299 {
300 struct ef4_channel *channel;
301 unsigned int n = 0, i;
302 enum ef4_loopback_mode mode;
303
304 ef4_fill_test(n++, strings, data, &tests->phy_alive,
305 "phy", 0, "alive", NULL);
306 ef4_fill_test(n++, strings, data, &tests->nvram,
307 "core", 0, "nvram", NULL);
308 ef4_fill_test(n++, strings, data, &tests->interrupt,
309 "core", 0, "interrupt", NULL);
310
311
312 ef4_for_each_channel(channel, efx) {
313 ef4_fill_test(n++, strings, data,
314 &tests->eventq_dma[channel->channel],
315 EF4_CHANNEL_NAME(channel),
316 "eventq.dma", NULL);
317 ef4_fill_test(n++, strings, data,
318 &tests->eventq_int[channel->channel],
319 EF4_CHANNEL_NAME(channel),
320 "eventq.int", NULL);
321 }
322
323 ef4_fill_test(n++, strings, data, &tests->memory,
324 "core", 0, "memory", NULL);
325 ef4_fill_test(n++, strings, data, &tests->registers,
326 "core", 0, "registers", NULL);
327
328 if (efx->phy_op->run_tests != NULL) {
329 EF4_BUG_ON_PARANOID(efx->phy_op->test_name == NULL);
330
331 for (i = 0; true; ++i) {
332 const char *name;
333
334 EF4_BUG_ON_PARANOID(i >= EF4_MAX_PHY_TESTS);
335 name = efx->phy_op->test_name(efx, i);
336 if (name == NULL)
337 break;
338
339 ef4_fill_test(n++, strings, data, &tests->phy_ext[i],
340 "phy", 0, name, NULL);
341 }
342 }
343
344
345 for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
346 if (!(efx->loopback_modes & (1 << mode)))
347 continue;
348 n = ef4_fill_loopback_test(efx,
349 &tests->loopback[mode], mode, n,
350 strings, data);
351 }
352
353 return n;
354 }
355
356 static size_t ef4_describe_per_queue_stats(struct ef4_nic *efx, u8 *strings)
357 {
358 size_t n_stats = 0;
359 struct ef4_channel *channel;
360
361 ef4_for_each_channel(channel, efx) {
362 if (ef4_channel_has_tx_queues(channel)) {
363 n_stats++;
364 if (strings != NULL) {
365 snprintf(strings, ETH_GSTRING_LEN,
366 "tx-%u.tx_packets",
367 channel->tx_queue[0].queue /
368 EF4_TXQ_TYPES);
369
370 strings += ETH_GSTRING_LEN;
371 }
372 }
373 }
374 ef4_for_each_channel(channel, efx) {
375 if (ef4_channel_has_rx_queue(channel)) {
376 n_stats++;
377 if (strings != NULL) {
378 snprintf(strings, ETH_GSTRING_LEN,
379 "rx-%d.rx_packets", channel->channel);
380 strings += ETH_GSTRING_LEN;
381 }
382 }
383 }
384 return n_stats;
385 }
386
387 static int ef4_ethtool_get_sset_count(struct net_device *net_dev,
388 int string_set)
389 {
390 struct ef4_nic *efx = netdev_priv(net_dev);
391
392 switch (string_set) {
393 case ETH_SS_STATS:
394 return efx->type->describe_stats(efx, NULL) +
395 EF4_ETHTOOL_SW_STAT_COUNT +
396 ef4_describe_per_queue_stats(efx, NULL);
397 case ETH_SS_TEST:
398 return ef4_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
399 default:
400 return -EINVAL;
401 }
402 }
403
404 static void ef4_ethtool_get_strings(struct net_device *net_dev,
405 u32 string_set, u8 *strings)
406 {
407 struct ef4_nic *efx = netdev_priv(net_dev);
408 int i;
409
410 switch (string_set) {
411 case ETH_SS_STATS:
412 strings += (efx->type->describe_stats(efx, strings) *
413 ETH_GSTRING_LEN);
414 for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++)
415 strlcpy(strings + i * ETH_GSTRING_LEN,
416 ef4_sw_stat_desc[i].name, ETH_GSTRING_LEN);
417 strings += EF4_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
418 strings += (ef4_describe_per_queue_stats(efx, strings) *
419 ETH_GSTRING_LEN);
420 break;
421 case ETH_SS_TEST:
422 ef4_ethtool_fill_self_tests(efx, NULL, strings, NULL);
423 break;
424 default:
425
426 break;
427 }
428 }
429
430 static void ef4_ethtool_get_stats(struct net_device *net_dev,
431 struct ethtool_stats *stats,
432 u64 *data)
433 {
434 struct ef4_nic *efx = netdev_priv(net_dev);
435 const struct ef4_sw_stat_desc *stat;
436 struct ef4_channel *channel;
437 struct ef4_tx_queue *tx_queue;
438 struct ef4_rx_queue *rx_queue;
439 int i;
440
441 spin_lock_bh(&efx->stats_lock);
442
443
444 data += efx->type->update_stats(efx, data, NULL);
445
446
447 for (i = 0; i < EF4_ETHTOOL_SW_STAT_COUNT; i++) {
448 stat = &ef4_sw_stat_desc[i];
449 switch (stat->source) {
450 case EF4_ETHTOOL_STAT_SOURCE_nic:
451 data[i] = stat->get_stat((void *)efx + stat->offset);
452 break;
453 case EF4_ETHTOOL_STAT_SOURCE_channel:
454 data[i] = 0;
455 ef4_for_each_channel(channel, efx)
456 data[i] += stat->get_stat((void *)channel +
457 stat->offset);
458 break;
459 case EF4_ETHTOOL_STAT_SOURCE_tx_queue:
460 data[i] = 0;
461 ef4_for_each_channel(channel, efx) {
462 ef4_for_each_channel_tx_queue(tx_queue, channel)
463 data[i] +=
464 stat->get_stat((void *)tx_queue
465 + stat->offset);
466 }
467 break;
468 }
469 }
470 data += EF4_ETHTOOL_SW_STAT_COUNT;
471
472 spin_unlock_bh(&efx->stats_lock);
473
474 ef4_for_each_channel(channel, efx) {
475 if (ef4_channel_has_tx_queues(channel)) {
476 *data = 0;
477 ef4_for_each_channel_tx_queue(tx_queue, channel) {
478 *data += tx_queue->tx_packets;
479 }
480 data++;
481 }
482 }
483 ef4_for_each_channel(channel, efx) {
484 if (ef4_channel_has_rx_queue(channel)) {
485 *data = 0;
486 ef4_for_each_channel_rx_queue(rx_queue, channel) {
487 *data += rx_queue->rx_packets;
488 }
489 data++;
490 }
491 }
492 }
493
494 static void ef4_ethtool_self_test(struct net_device *net_dev,
495 struct ethtool_test *test, u64 *data)
496 {
497 struct ef4_nic *efx = netdev_priv(net_dev);
498 struct ef4_self_tests *ef4_tests;
499 bool already_up;
500 int rc = -ENOMEM;
501
502 ef4_tests = kzalloc(sizeof(*ef4_tests), GFP_KERNEL);
503 if (!ef4_tests)
504 goto fail;
505
506 if (efx->state != STATE_READY) {
507 rc = -EBUSY;
508 goto out;
509 }
510
511 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
512 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
513
514
515 already_up = (efx->net_dev->flags & IFF_UP);
516 if (!already_up) {
517 rc = dev_open(efx->net_dev, NULL);
518 if (rc) {
519 netif_err(efx, drv, efx->net_dev,
520 "failed opening device.\n");
521 goto out;
522 }
523 }
524
525 rc = ef4_selftest(efx, ef4_tests, test->flags);
526
527 if (!already_up)
528 dev_close(efx->net_dev);
529
530 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
531 rc == 0 ? "passed" : "failed",
532 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
533
534 out:
535 ef4_ethtool_fill_self_tests(efx, ef4_tests, NULL, data);
536 kfree(ef4_tests);
537 fail:
538 if (rc)
539 test->flags |= ETH_TEST_FL_FAILED;
540 }
541
542
543 static int ef4_ethtool_nway_reset(struct net_device *net_dev)
544 {
545 struct ef4_nic *efx = netdev_priv(net_dev);
546
547 return mdio45_nway_restart(&efx->mdio);
548 }
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579 static int ef4_ethtool_get_coalesce(struct net_device *net_dev,
580 struct ethtool_coalesce *coalesce)
581 {
582 struct ef4_nic *efx = netdev_priv(net_dev);
583 unsigned int tx_usecs, rx_usecs;
584 bool rx_adaptive;
585
586 ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
587
588 coalesce->tx_coalesce_usecs = tx_usecs;
589 coalesce->tx_coalesce_usecs_irq = tx_usecs;
590 coalesce->rx_coalesce_usecs = rx_usecs;
591 coalesce->rx_coalesce_usecs_irq = rx_usecs;
592 coalesce->use_adaptive_rx_coalesce = rx_adaptive;
593
594 return 0;
595 }
596
597 static int ef4_ethtool_set_coalesce(struct net_device *net_dev,
598 struct ethtool_coalesce *coalesce)
599 {
600 struct ef4_nic *efx = netdev_priv(net_dev);
601 struct ef4_channel *channel;
602 unsigned int tx_usecs, rx_usecs;
603 bool adaptive, rx_may_override_tx;
604 int rc;
605
606 if (coalesce->use_adaptive_tx_coalesce)
607 return -EINVAL;
608
609 ef4_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
610
611 if (coalesce->rx_coalesce_usecs != rx_usecs)
612 rx_usecs = coalesce->rx_coalesce_usecs;
613 else
614 rx_usecs = coalesce->rx_coalesce_usecs_irq;
615
616 adaptive = coalesce->use_adaptive_rx_coalesce;
617
618
619
620
621 rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
622 coalesce->tx_coalesce_usecs_irq == tx_usecs);
623 if (coalesce->tx_coalesce_usecs != tx_usecs)
624 tx_usecs = coalesce->tx_coalesce_usecs;
625 else
626 tx_usecs = coalesce->tx_coalesce_usecs_irq;
627
628 rc = ef4_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
629 rx_may_override_tx);
630 if (rc != 0)
631 return rc;
632
633 ef4_for_each_channel(channel, efx)
634 efx->type->push_irq_moderation(channel);
635
636 return 0;
637 }
638
639 static void ef4_ethtool_get_ringparam(struct net_device *net_dev,
640 struct ethtool_ringparam *ring)
641 {
642 struct ef4_nic *efx = netdev_priv(net_dev);
643
644 ring->rx_max_pending = EF4_MAX_DMAQ_SIZE;
645 ring->tx_max_pending = EF4_MAX_DMAQ_SIZE;
646 ring->rx_pending = efx->rxq_entries;
647 ring->tx_pending = efx->txq_entries;
648 }
649
650 static int ef4_ethtool_set_ringparam(struct net_device *net_dev,
651 struct ethtool_ringparam *ring)
652 {
653 struct ef4_nic *efx = netdev_priv(net_dev);
654 u32 txq_entries;
655
656 if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
657 ring->rx_pending > EF4_MAX_DMAQ_SIZE ||
658 ring->tx_pending > EF4_MAX_DMAQ_SIZE)
659 return -EINVAL;
660
661 if (ring->rx_pending < EF4_RXQ_MIN_ENT) {
662 netif_err(efx, drv, efx->net_dev,
663 "RX queues cannot be smaller than %u\n",
664 EF4_RXQ_MIN_ENT);
665 return -EINVAL;
666 }
667
668 txq_entries = max(ring->tx_pending, EF4_TXQ_MIN_ENT(efx));
669 if (txq_entries != ring->tx_pending)
670 netif_warn(efx, drv, efx->net_dev,
671 "increasing TX queue size to minimum of %u\n",
672 txq_entries);
673
674 return ef4_realloc_channels(efx, ring->rx_pending, txq_entries);
675 }
676
677 static int ef4_ethtool_set_pauseparam(struct net_device *net_dev,
678 struct ethtool_pauseparam *pause)
679 {
680 struct ef4_nic *efx = netdev_priv(net_dev);
681 u8 wanted_fc, old_fc;
682 u32 old_adv;
683 int rc = 0;
684
685 mutex_lock(&efx->mac_lock);
686
687 wanted_fc = ((pause->rx_pause ? EF4_FC_RX : 0) |
688 (pause->tx_pause ? EF4_FC_TX : 0) |
689 (pause->autoneg ? EF4_FC_AUTO : 0));
690
691 if ((wanted_fc & EF4_FC_TX) && !(wanted_fc & EF4_FC_RX)) {
692 netif_dbg(efx, drv, efx->net_dev,
693 "Flow control unsupported: tx ON rx OFF\n");
694 rc = -EINVAL;
695 goto out;
696 }
697
698 if ((wanted_fc & EF4_FC_AUTO) && !efx->link_advertising) {
699 netif_dbg(efx, drv, efx->net_dev,
700 "Autonegotiation is disabled\n");
701 rc = -EINVAL;
702 goto out;
703 }
704
705
706 if (efx->type->prepare_enable_fc_tx &&
707 (wanted_fc & EF4_FC_TX) && !(efx->wanted_fc & EF4_FC_TX))
708 efx->type->prepare_enable_fc_tx(efx);
709
710 old_adv = efx->link_advertising;
711 old_fc = efx->wanted_fc;
712 ef4_link_set_wanted_fc(efx, wanted_fc);
713 if (efx->link_advertising != old_adv ||
714 (efx->wanted_fc ^ old_fc) & EF4_FC_AUTO) {
715 rc = efx->phy_op->reconfigure(efx);
716 if (rc) {
717 netif_err(efx, drv, efx->net_dev,
718 "Unable to advertise requested flow "
719 "control setting\n");
720 goto out;
721 }
722 }
723
724
725
726
727 ef4_mac_reconfigure(efx);
728
729 out:
730 mutex_unlock(&efx->mac_lock);
731
732 return rc;
733 }
734
735 static void ef4_ethtool_get_pauseparam(struct net_device *net_dev,
736 struct ethtool_pauseparam *pause)
737 {
738 struct ef4_nic *efx = netdev_priv(net_dev);
739
740 pause->rx_pause = !!(efx->wanted_fc & EF4_FC_RX);
741 pause->tx_pause = !!(efx->wanted_fc & EF4_FC_TX);
742 pause->autoneg = !!(efx->wanted_fc & EF4_FC_AUTO);
743 }
744
745 static void ef4_ethtool_get_wol(struct net_device *net_dev,
746 struct ethtool_wolinfo *wol)
747 {
748 struct ef4_nic *efx = netdev_priv(net_dev);
749 return efx->type->get_wol(efx, wol);
750 }
751
752
753 static int ef4_ethtool_set_wol(struct net_device *net_dev,
754 struct ethtool_wolinfo *wol)
755 {
756 struct ef4_nic *efx = netdev_priv(net_dev);
757 return efx->type->set_wol(efx, wol->wolopts);
758 }
759
760 static int ef4_ethtool_reset(struct net_device *net_dev, u32 *flags)
761 {
762 struct ef4_nic *efx = netdev_priv(net_dev);
763 int rc;
764
765 rc = efx->type->map_reset_flags(flags);
766 if (rc < 0)
767 return rc;
768
769 return ef4_reset(efx, rc);
770 }
771
772
773 static const u8 mac_addr_ig_mask[ETH_ALEN] __aligned(2) = {0x01, 0, 0, 0, 0, 0};
774
775 #define IP4_ADDR_FULL_MASK ((__force __be32)~0)
776 #define IP_PROTO_FULL_MASK 0xFF
777 #define PORT_FULL_MASK ((__force __be16)~0)
778 #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
779
780 static inline void ip6_fill_mask(__be32 *mask)
781 {
782 mask[0] = mask[1] = mask[2] = mask[3] = ~(__be32)0;
783 }
784
785 static int ef4_ethtool_get_class_rule(struct ef4_nic *efx,
786 struct ethtool_rx_flow_spec *rule)
787 {
788 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
789 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
790 struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
791 struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
792 struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
793 struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
794 struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
795 struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
796 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
797 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
798 struct ef4_filter_spec spec;
799 int rc;
800
801 rc = ef4_filter_get_filter_safe(efx, EF4_FILTER_PRI_MANUAL,
802 rule->location, &spec);
803 if (rc)
804 return rc;
805
806 if (spec.dmaq_id == EF4_FILTER_RX_DMAQ_ID_DROP)
807 rule->ring_cookie = RX_CLS_FLOW_DISC;
808 else
809 rule->ring_cookie = spec.dmaq_id;
810
811 if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
812 spec.ether_type == htons(ETH_P_IP) &&
813 (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
814 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
815 !(spec.match_flags &
816 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
817 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
818 EF4_FILTER_MATCH_IP_PROTO |
819 EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
820 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
821 TCP_V4_FLOW : UDP_V4_FLOW);
822 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
823 ip_entry->ip4dst = spec.loc_host[0];
824 ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
825 }
826 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
827 ip_entry->ip4src = spec.rem_host[0];
828 ip_mask->ip4src = IP4_ADDR_FULL_MASK;
829 }
830 if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
831 ip_entry->pdst = spec.loc_port;
832 ip_mask->pdst = PORT_FULL_MASK;
833 }
834 if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
835 ip_entry->psrc = spec.rem_port;
836 ip_mask->psrc = PORT_FULL_MASK;
837 }
838 } else if ((spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) &&
839 spec.ether_type == htons(ETH_P_IPV6) &&
840 (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) &&
841 (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
842 !(spec.match_flags &
843 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
844 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
845 EF4_FILTER_MATCH_IP_PROTO |
846 EF4_FILTER_MATCH_LOC_PORT | EF4_FILTER_MATCH_REM_PORT))) {
847 rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
848 TCP_V6_FLOW : UDP_V6_FLOW);
849 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
850 memcpy(ip6_entry->ip6dst, spec.loc_host,
851 sizeof(ip6_entry->ip6dst));
852 ip6_fill_mask(ip6_mask->ip6dst);
853 }
854 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
855 memcpy(ip6_entry->ip6src, spec.rem_host,
856 sizeof(ip6_entry->ip6src));
857 ip6_fill_mask(ip6_mask->ip6src);
858 }
859 if (spec.match_flags & EF4_FILTER_MATCH_LOC_PORT) {
860 ip6_entry->pdst = spec.loc_port;
861 ip6_mask->pdst = PORT_FULL_MASK;
862 }
863 if (spec.match_flags & EF4_FILTER_MATCH_REM_PORT) {
864 ip6_entry->psrc = spec.rem_port;
865 ip6_mask->psrc = PORT_FULL_MASK;
866 }
867 } else if (!(spec.match_flags &
868 ~(EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG |
869 EF4_FILTER_MATCH_REM_MAC | EF4_FILTER_MATCH_ETHER_TYPE |
870 EF4_FILTER_MATCH_OUTER_VID))) {
871 rule->flow_type = ETHER_FLOW;
872 if (spec.match_flags &
873 (EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_LOC_MAC_IG)) {
874 ether_addr_copy(mac_entry->h_dest, spec.loc_mac);
875 if (spec.match_flags & EF4_FILTER_MATCH_LOC_MAC)
876 eth_broadcast_addr(mac_mask->h_dest);
877 else
878 ether_addr_copy(mac_mask->h_dest,
879 mac_addr_ig_mask);
880 }
881 if (spec.match_flags & EF4_FILTER_MATCH_REM_MAC) {
882 ether_addr_copy(mac_entry->h_source, spec.rem_mac);
883 eth_broadcast_addr(mac_mask->h_source);
884 }
885 if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE) {
886 mac_entry->h_proto = spec.ether_type;
887 mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
888 }
889 } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
890 spec.ether_type == htons(ETH_P_IP) &&
891 !(spec.match_flags &
892 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
893 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
894 EF4_FILTER_MATCH_IP_PROTO))) {
895 rule->flow_type = IPV4_USER_FLOW;
896 uip_entry->ip_ver = ETH_RX_NFC_IP4;
897 if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
898 uip_mask->proto = IP_PROTO_FULL_MASK;
899 uip_entry->proto = spec.ip_proto;
900 }
901 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
902 uip_entry->ip4dst = spec.loc_host[0];
903 uip_mask->ip4dst = IP4_ADDR_FULL_MASK;
904 }
905 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
906 uip_entry->ip4src = spec.rem_host[0];
907 uip_mask->ip4src = IP4_ADDR_FULL_MASK;
908 }
909 } else if (spec.match_flags & EF4_FILTER_MATCH_ETHER_TYPE &&
910 spec.ether_type == htons(ETH_P_IPV6) &&
911 !(spec.match_flags &
912 ~(EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_OUTER_VID |
913 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_REM_HOST |
914 EF4_FILTER_MATCH_IP_PROTO))) {
915 rule->flow_type = IPV6_USER_FLOW;
916 if (spec.match_flags & EF4_FILTER_MATCH_IP_PROTO) {
917 uip6_mask->l4_proto = IP_PROTO_FULL_MASK;
918 uip6_entry->l4_proto = spec.ip_proto;
919 }
920 if (spec.match_flags & EF4_FILTER_MATCH_LOC_HOST) {
921 memcpy(uip6_entry->ip6dst, spec.loc_host,
922 sizeof(uip6_entry->ip6dst));
923 ip6_fill_mask(uip6_mask->ip6dst);
924 }
925 if (spec.match_flags & EF4_FILTER_MATCH_REM_HOST) {
926 memcpy(uip6_entry->ip6src, spec.rem_host,
927 sizeof(uip6_entry->ip6src));
928 ip6_fill_mask(uip6_mask->ip6src);
929 }
930 } else {
931
932 WARN_ON(1);
933 return -EINVAL;
934 }
935
936 if (spec.match_flags & EF4_FILTER_MATCH_OUTER_VID) {
937 rule->flow_type |= FLOW_EXT;
938 rule->h_ext.vlan_tci = spec.outer_vid;
939 rule->m_ext.vlan_tci = htons(0xfff);
940 }
941
942 return rc;
943 }
944
945 static int
946 ef4_ethtool_get_rxnfc(struct net_device *net_dev,
947 struct ethtool_rxnfc *info, u32 *rule_locs)
948 {
949 struct ef4_nic *efx = netdev_priv(net_dev);
950
951 switch (info->cmd) {
952 case ETHTOOL_GRXRINGS:
953 info->data = efx->n_rx_channels;
954 return 0;
955
956 case ETHTOOL_GRXFH: {
957 unsigned min_revision = 0;
958
959 info->data = 0;
960 switch (info->flow_type) {
961 case TCP_V4_FLOW:
962 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
963
964 case UDP_V4_FLOW:
965 case SCTP_V4_FLOW:
966 case AH_ESP_V4_FLOW:
967 case IPV4_FLOW:
968 info->data |= RXH_IP_SRC | RXH_IP_DST;
969 min_revision = EF4_REV_FALCON_B0;
970 break;
971 default:
972 break;
973 }
974 if (ef4_nic_rev(efx) < min_revision)
975 info->data = 0;
976 return 0;
977 }
978
979 case ETHTOOL_GRXCLSRLCNT:
980 info->data = ef4_filter_get_rx_id_limit(efx);
981 if (info->data == 0)
982 return -EOPNOTSUPP;
983 info->data |= RX_CLS_LOC_SPECIAL;
984 info->rule_cnt =
985 ef4_filter_count_rx_used(efx, EF4_FILTER_PRI_MANUAL);
986 return 0;
987
988 case ETHTOOL_GRXCLSRULE:
989 if (ef4_filter_get_rx_id_limit(efx) == 0)
990 return -EOPNOTSUPP;
991 return ef4_ethtool_get_class_rule(efx, &info->fs);
992
993 case ETHTOOL_GRXCLSRLALL: {
994 s32 rc;
995 info->data = ef4_filter_get_rx_id_limit(efx);
996 if (info->data == 0)
997 return -EOPNOTSUPP;
998 rc = ef4_filter_get_rx_ids(efx, EF4_FILTER_PRI_MANUAL,
999 rule_locs, info->rule_cnt);
1000 if (rc < 0)
1001 return rc;
1002 info->rule_cnt = rc;
1003 return 0;
1004 }
1005
1006 default:
1007 return -EOPNOTSUPP;
1008 }
1009 }
1010
1011 static inline bool ip6_mask_is_full(__be32 mask[4])
1012 {
1013 return !~(mask[0] & mask[1] & mask[2] & mask[3]);
1014 }
1015
1016 static inline bool ip6_mask_is_empty(__be32 mask[4])
1017 {
1018 return !(mask[0] | mask[1] | mask[2] | mask[3]);
1019 }
1020
1021 static int ef4_ethtool_set_class_rule(struct ef4_nic *efx,
1022 struct ethtool_rx_flow_spec *rule)
1023 {
1024 struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
1025 struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
1026 struct ethtool_usrip4_spec *uip_entry = &rule->h_u.usr_ip4_spec;
1027 struct ethtool_usrip4_spec *uip_mask = &rule->m_u.usr_ip4_spec;
1028 struct ethtool_tcpip6_spec *ip6_entry = &rule->h_u.tcp_ip6_spec;
1029 struct ethtool_tcpip6_spec *ip6_mask = &rule->m_u.tcp_ip6_spec;
1030 struct ethtool_usrip6_spec *uip6_entry = &rule->h_u.usr_ip6_spec;
1031 struct ethtool_usrip6_spec *uip6_mask = &rule->m_u.usr_ip6_spec;
1032 struct ethhdr *mac_entry = &rule->h_u.ether_spec;
1033 struct ethhdr *mac_mask = &rule->m_u.ether_spec;
1034 struct ef4_filter_spec spec;
1035 int rc;
1036
1037
1038 if (rule->location != RX_CLS_LOC_ANY)
1039 return -EINVAL;
1040
1041
1042 if (rule->ring_cookie >= efx->n_rx_channels &&
1043 rule->ring_cookie != RX_CLS_FLOW_DISC)
1044 return -EINVAL;
1045
1046
1047 if ((rule->flow_type & FLOW_EXT) &&
1048 (rule->m_ext.vlan_etype || rule->m_ext.data[0] ||
1049 rule->m_ext.data[1]))
1050 return -EINVAL;
1051
1052 ef4_filter_init_rx(&spec, EF4_FILTER_PRI_MANUAL,
1053 efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0,
1054 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
1055 EF4_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
1056
1057 switch (rule->flow_type & ~FLOW_EXT) {
1058 case TCP_V4_FLOW:
1059 case UDP_V4_FLOW:
1060 spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
1061 EF4_FILTER_MATCH_IP_PROTO);
1062 spec.ether_type = htons(ETH_P_IP);
1063 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
1064 IPPROTO_TCP : IPPROTO_UDP);
1065 if (ip_mask->ip4dst) {
1066 if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1067 return -EINVAL;
1068 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1069 spec.loc_host[0] = ip_entry->ip4dst;
1070 }
1071 if (ip_mask->ip4src) {
1072 if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
1073 return -EINVAL;
1074 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1075 spec.rem_host[0] = ip_entry->ip4src;
1076 }
1077 if (ip_mask->pdst) {
1078 if (ip_mask->pdst != PORT_FULL_MASK)
1079 return -EINVAL;
1080 spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
1081 spec.loc_port = ip_entry->pdst;
1082 }
1083 if (ip_mask->psrc) {
1084 if (ip_mask->psrc != PORT_FULL_MASK)
1085 return -EINVAL;
1086 spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
1087 spec.rem_port = ip_entry->psrc;
1088 }
1089 if (ip_mask->tos)
1090 return -EINVAL;
1091 break;
1092
1093 case TCP_V6_FLOW:
1094 case UDP_V6_FLOW:
1095 spec.match_flags = (EF4_FILTER_MATCH_ETHER_TYPE |
1096 EF4_FILTER_MATCH_IP_PROTO);
1097 spec.ether_type = htons(ETH_P_IPV6);
1098 spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V6_FLOW ?
1099 IPPROTO_TCP : IPPROTO_UDP);
1100 if (!ip6_mask_is_empty(ip6_mask->ip6dst)) {
1101 if (!ip6_mask_is_full(ip6_mask->ip6dst))
1102 return -EINVAL;
1103 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1104 memcpy(spec.loc_host, ip6_entry->ip6dst, sizeof(spec.loc_host));
1105 }
1106 if (!ip6_mask_is_empty(ip6_mask->ip6src)) {
1107 if (!ip6_mask_is_full(ip6_mask->ip6src))
1108 return -EINVAL;
1109 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1110 memcpy(spec.rem_host, ip6_entry->ip6src, sizeof(spec.rem_host));
1111 }
1112 if (ip6_mask->pdst) {
1113 if (ip6_mask->pdst != PORT_FULL_MASK)
1114 return -EINVAL;
1115 spec.match_flags |= EF4_FILTER_MATCH_LOC_PORT;
1116 spec.loc_port = ip6_entry->pdst;
1117 }
1118 if (ip6_mask->psrc) {
1119 if (ip6_mask->psrc != PORT_FULL_MASK)
1120 return -EINVAL;
1121 spec.match_flags |= EF4_FILTER_MATCH_REM_PORT;
1122 spec.rem_port = ip6_entry->psrc;
1123 }
1124 if (ip6_mask->tclass)
1125 return -EINVAL;
1126 break;
1127
1128 case IPV4_USER_FLOW:
1129 if (uip_mask->l4_4_bytes || uip_mask->tos || uip_mask->ip_ver ||
1130 uip_entry->ip_ver != ETH_RX_NFC_IP4)
1131 return -EINVAL;
1132 spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
1133 spec.ether_type = htons(ETH_P_IP);
1134 if (uip_mask->ip4dst) {
1135 if (uip_mask->ip4dst != IP4_ADDR_FULL_MASK)
1136 return -EINVAL;
1137 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1138 spec.loc_host[0] = uip_entry->ip4dst;
1139 }
1140 if (uip_mask->ip4src) {
1141 if (uip_mask->ip4src != IP4_ADDR_FULL_MASK)
1142 return -EINVAL;
1143 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1144 spec.rem_host[0] = uip_entry->ip4src;
1145 }
1146 if (uip_mask->proto) {
1147 if (uip_mask->proto != IP_PROTO_FULL_MASK)
1148 return -EINVAL;
1149 spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
1150 spec.ip_proto = uip_entry->proto;
1151 }
1152 break;
1153
1154 case IPV6_USER_FLOW:
1155 if (uip6_mask->l4_4_bytes || uip6_mask->tclass)
1156 return -EINVAL;
1157 spec.match_flags = EF4_FILTER_MATCH_ETHER_TYPE;
1158 spec.ether_type = htons(ETH_P_IPV6);
1159 if (!ip6_mask_is_empty(uip6_mask->ip6dst)) {
1160 if (!ip6_mask_is_full(uip6_mask->ip6dst))
1161 return -EINVAL;
1162 spec.match_flags |= EF4_FILTER_MATCH_LOC_HOST;
1163 memcpy(spec.loc_host, uip6_entry->ip6dst, sizeof(spec.loc_host));
1164 }
1165 if (!ip6_mask_is_empty(uip6_mask->ip6src)) {
1166 if (!ip6_mask_is_full(uip6_mask->ip6src))
1167 return -EINVAL;
1168 spec.match_flags |= EF4_FILTER_MATCH_REM_HOST;
1169 memcpy(spec.rem_host, uip6_entry->ip6src, sizeof(spec.rem_host));
1170 }
1171 if (uip6_mask->l4_proto) {
1172 if (uip6_mask->l4_proto != IP_PROTO_FULL_MASK)
1173 return -EINVAL;
1174 spec.match_flags |= EF4_FILTER_MATCH_IP_PROTO;
1175 spec.ip_proto = uip6_entry->l4_proto;
1176 }
1177 break;
1178
1179 case ETHER_FLOW:
1180 if (!is_zero_ether_addr(mac_mask->h_dest)) {
1181 if (ether_addr_equal(mac_mask->h_dest,
1182 mac_addr_ig_mask))
1183 spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC_IG;
1184 else if (is_broadcast_ether_addr(mac_mask->h_dest))
1185 spec.match_flags |= EF4_FILTER_MATCH_LOC_MAC;
1186 else
1187 return -EINVAL;
1188 ether_addr_copy(spec.loc_mac, mac_entry->h_dest);
1189 }
1190 if (!is_zero_ether_addr(mac_mask->h_source)) {
1191 if (!is_broadcast_ether_addr(mac_mask->h_source))
1192 return -EINVAL;
1193 spec.match_flags |= EF4_FILTER_MATCH_REM_MAC;
1194 ether_addr_copy(spec.rem_mac, mac_entry->h_source);
1195 }
1196 if (mac_mask->h_proto) {
1197 if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
1198 return -EINVAL;
1199 spec.match_flags |= EF4_FILTER_MATCH_ETHER_TYPE;
1200 spec.ether_type = mac_entry->h_proto;
1201 }
1202 break;
1203
1204 default:
1205 return -EINVAL;
1206 }
1207
1208 if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
1209 if (rule->m_ext.vlan_tci != htons(0xfff))
1210 return -EINVAL;
1211 spec.match_flags |= EF4_FILTER_MATCH_OUTER_VID;
1212 spec.outer_vid = rule->h_ext.vlan_tci;
1213 }
1214
1215 rc = ef4_filter_insert_filter(efx, &spec, true);
1216 if (rc < 0)
1217 return rc;
1218
1219 rule->location = rc;
1220 return 0;
1221 }
1222
1223 static int ef4_ethtool_set_rxnfc(struct net_device *net_dev,
1224 struct ethtool_rxnfc *info)
1225 {
1226 struct ef4_nic *efx = netdev_priv(net_dev);
1227
1228 if (ef4_filter_get_rx_id_limit(efx) == 0)
1229 return -EOPNOTSUPP;
1230
1231 switch (info->cmd) {
1232 case ETHTOOL_SRXCLSRLINS:
1233 return ef4_ethtool_set_class_rule(efx, &info->fs);
1234
1235 case ETHTOOL_SRXCLSRLDEL:
1236 return ef4_filter_remove_id_safe(efx, EF4_FILTER_PRI_MANUAL,
1237 info->fs.location);
1238
1239 default:
1240 return -EOPNOTSUPP;
1241 }
1242 }
1243
1244 static u32 ef4_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
1245 {
1246 struct ef4_nic *efx = netdev_priv(net_dev);
1247
1248 return ((ef4_nic_rev(efx) < EF4_REV_FALCON_B0 ||
1249 efx->n_rx_channels == 1) ?
1250 0 : ARRAY_SIZE(efx->rx_indir_table));
1251 }
1252
1253 static int ef4_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
1254 u8 *hfunc)
1255 {
1256 struct ef4_nic *efx = netdev_priv(net_dev);
1257
1258 if (hfunc)
1259 *hfunc = ETH_RSS_HASH_TOP;
1260 if (indir)
1261 memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
1262 return 0;
1263 }
1264
1265 static int ef4_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
1266 const u8 *key, const u8 hfunc)
1267 {
1268 struct ef4_nic *efx = netdev_priv(net_dev);
1269
1270
1271 if (key ||
1272 (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
1273 return -EOPNOTSUPP;
1274 if (!indir)
1275 return 0;
1276
1277 return efx->type->rx_push_rss_config(efx, true, indir);
1278 }
1279
1280 static int ef4_ethtool_get_module_eeprom(struct net_device *net_dev,
1281 struct ethtool_eeprom *ee,
1282 u8 *data)
1283 {
1284 struct ef4_nic *efx = netdev_priv(net_dev);
1285 int ret;
1286
1287 if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
1288 return -EOPNOTSUPP;
1289
1290 mutex_lock(&efx->mac_lock);
1291 ret = efx->phy_op->get_module_eeprom(efx, ee, data);
1292 mutex_unlock(&efx->mac_lock);
1293
1294 return ret;
1295 }
1296
1297 static int ef4_ethtool_get_module_info(struct net_device *net_dev,
1298 struct ethtool_modinfo *modinfo)
1299 {
1300 struct ef4_nic *efx = netdev_priv(net_dev);
1301 int ret;
1302
1303 if (!efx->phy_op || !efx->phy_op->get_module_info)
1304 return -EOPNOTSUPP;
1305
1306 mutex_lock(&efx->mac_lock);
1307 ret = efx->phy_op->get_module_info(efx, modinfo);
1308 mutex_unlock(&efx->mac_lock);
1309
1310 return ret;
1311 }
1312
1313 const struct ethtool_ops ef4_ethtool_ops = {
1314 .get_drvinfo = ef4_ethtool_get_drvinfo,
1315 .get_regs_len = ef4_ethtool_get_regs_len,
1316 .get_regs = ef4_ethtool_get_regs,
1317 .get_msglevel = ef4_ethtool_get_msglevel,
1318 .set_msglevel = ef4_ethtool_set_msglevel,
1319 .nway_reset = ef4_ethtool_nway_reset,
1320 .get_link = ethtool_op_get_link,
1321 .get_coalesce = ef4_ethtool_get_coalesce,
1322 .set_coalesce = ef4_ethtool_set_coalesce,
1323 .get_ringparam = ef4_ethtool_get_ringparam,
1324 .set_ringparam = ef4_ethtool_set_ringparam,
1325 .get_pauseparam = ef4_ethtool_get_pauseparam,
1326 .set_pauseparam = ef4_ethtool_set_pauseparam,
1327 .get_sset_count = ef4_ethtool_get_sset_count,
1328 .self_test = ef4_ethtool_self_test,
1329 .get_strings = ef4_ethtool_get_strings,
1330 .set_phys_id = ef4_ethtool_phys_id,
1331 .get_ethtool_stats = ef4_ethtool_get_stats,
1332 .get_wol = ef4_ethtool_get_wol,
1333 .set_wol = ef4_ethtool_set_wol,
1334 .reset = ef4_ethtool_reset,
1335 .get_rxnfc = ef4_ethtool_get_rxnfc,
1336 .set_rxnfc = ef4_ethtool_set_rxnfc,
1337 .get_rxfh_indir_size = ef4_ethtool_get_rxfh_indir_size,
1338 .get_rxfh = ef4_ethtool_get_rxfh,
1339 .set_rxfh = ef4_ethtool_set_rxfh,
1340 .get_module_info = ef4_ethtool_get_module_info,
1341 .get_module_eeprom = ef4_ethtool_get_module_eeprom,
1342 .get_link_ksettings = ef4_ethtool_get_link_ksettings,
1343 .set_link_ksettings = ef4_ethtool_set_link_ksettings,
1344 };