This source file includes following definitions.
- efx_get_udp_tunnel_type_name
- efx_check_disabled
- efx_process_channel
- efx_update_irq_mod
- efx_poll
- efx_probe_eventq
- efx_init_eventq
- efx_start_eventq
- efx_stop_eventq
- efx_fini_eventq
- efx_remove_eventq
- efx_alloc_channel
- efx_copy_channel
- efx_probe_channel
- efx_get_channel_name
- efx_set_channel_names
- efx_probe_channels
- efx_start_datapath
- efx_stop_datapath
- efx_remove_channel
- efx_remove_channels
- efx_realloc_channels
- efx_schedule_slow_fill
- efx_default_channel_want_txqs
- efx_channel_dummy_op_int
- efx_channel_dummy_op_void
- efx_link_status_changed
- efx_link_set_advertising
- efx_link_clear_advertising
- efx_link_set_wanted_fc
- efx_mac_reconfigure
- __efx_reconfigure_port
- efx_reconfigure_port
- efx_mac_work
- efx_probe_port
- efx_init_port
- efx_start_port
- efx_stop_port
- efx_fini_port
- efx_remove_port
- efx_same_controller
- efx_associate
- efx_dissociate
- efx_init_io
- efx_fini_io
- efx_set_default_rx_indir_table
- efx_wanted_parallelism
- efx_probe_interrupts
- efx_set_interrupt_affinity
- efx_clear_interrupt_affinity
- efx_set_interrupt_affinity
- efx_clear_interrupt_affinity
- efx_soft_enable_interrupts
- efx_soft_disable_interrupts
- efx_enable_interrupts
- efx_disable_interrupts
- efx_remove_interrupts
- efx_set_channels
- efx_probe_nic
- efx_remove_nic
- efx_probe_filters
- efx_remove_filters
- efx_probe_all
- efx_start_all
- efx_stop_all
- efx_remove_all
- efx_usecs_to_ticks
- efx_ticks_to_usecs
- efx_init_irq_moderation
- efx_get_irq_moderation
- efx_monitor
- efx_ioctl
- efx_init_napi_channel
- efx_init_napi
- efx_fini_napi_channel
- efx_fini_napi
- efx_net_open
- efx_net_stop
- efx_net_stats
- efx_watchdog
- efx_change_mtu
- efx_set_mac_address
- efx_set_rx_mode
- efx_set_features
- efx_get_phys_port_id
- efx_get_phys_port_name
- efx_vlan_rx_add_vid
- efx_vlan_rx_kill_vid
- efx_udp_tunnel_type_map
- efx_udp_tunnel_add
- efx_udp_tunnel_del
- efx_update_name
- efx_netdev_event
- show_phy_type
- show_mcdi_log
- set_mcdi_log
- efx_register_netdev
- efx_unregister_netdev
- efx_reset_down
- efx_reset_up
- efx_reset
- efx_try_recovery
- efx_wait_for_bist_end
- efx_reset_work
- efx_schedule_reset
- efx_port_dummy_op_int
- efx_port_dummy_op_void
- efx_port_dummy_op_poll
- efx_init_struct
- efx_fini_struct
- efx_update_sw_stats
- efx_filter_spec_equal
- efx_filter_spec_hash
- efx_rps_check_rule
- efx_rps_hash_bucket
- efx_rps_hash_find
- efx_rps_hash_add
- efx_rps_hash_del
- efx_alloc_rss_context_entry
- efx_find_rss_context_entry
- efx_free_rss_context_entry
- efx_pci_remove_main
- efx_pci_remove
- efx_probe_vpd_strings
- efx_pci_probe_main
- efx_pci_probe_post_io
- efx_pci_probe
- efx_pci_sriov_configure
- efx_pm_freeze
- efx_pm_thaw
- efx_pm_poweroff
- efx_pm_resume
- efx_pm_suspend
- efx_io_error_detected
- efx_io_slot_reset
- efx_io_resume
- efx_init_module
- efx_exit_module
1
2
3
4
5
6
7
8 #include <linux/module.h>
9 #include <linux/pci.h>
10 #include <linux/netdevice.h>
11 #include <linux/etherdevice.h>
12 #include <linux/delay.h>
13 #include <linux/notifier.h>
14 #include <linux/ip.h>
15 #include <linux/tcp.h>
16 #include <linux/in.h>
17 #include <linux/ethtool.h>
18 #include <linux/topology.h>
19 #include <linux/gfp.h>
20 #include <linux/aer.h>
21 #include <linux/interrupt.h>
22 #include "net_driver.h"
23 #include <net/gre.h>
24 #include <net/udp_tunnel.h>
25 #include "efx.h"
26 #include "nic.h"
27 #include "io.h"
28 #include "selftest.h"
29 #include "sriov.h"
30
31 #include "mcdi.h"
32 #include "mcdi_pcol.h"
33 #include "workarounds.h"
34
35
36
37
38
39
40
41
42
43 const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
44 const char *const efx_loopback_mode_names[] = {
45 [LOOPBACK_NONE] = "NONE",
46 [LOOPBACK_DATA] = "DATAPATH",
47 [LOOPBACK_GMAC] = "GMAC",
48 [LOOPBACK_XGMII] = "XGMII",
49 [LOOPBACK_XGXS] = "XGXS",
50 [LOOPBACK_XAUI] = "XAUI",
51 [LOOPBACK_GMII] = "GMII",
52 [LOOPBACK_SGMII] = "SGMII",
53 [LOOPBACK_XGBR] = "XGBR",
54 [LOOPBACK_XFI] = "XFI",
55 [LOOPBACK_XAUI_FAR] = "XAUI_FAR",
56 [LOOPBACK_GMII_FAR] = "GMII_FAR",
57 [LOOPBACK_SGMII_FAR] = "SGMII_FAR",
58 [LOOPBACK_XFI_FAR] = "XFI_FAR",
59 [LOOPBACK_GPHY] = "GPHY",
60 [LOOPBACK_PHYXS] = "PHYXS",
61 [LOOPBACK_PCS] = "PCS",
62 [LOOPBACK_PMAPMD] = "PMA/PMD",
63 [LOOPBACK_XPORT] = "XPORT",
64 [LOOPBACK_XGMII_WS] = "XGMII_WS",
65 [LOOPBACK_XAUI_WS] = "XAUI_WS",
66 [LOOPBACK_XAUI_WS_FAR] = "XAUI_WS_FAR",
67 [LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
68 [LOOPBACK_GMII_WS] = "GMII_WS",
69 [LOOPBACK_XFI_WS] = "XFI_WS",
70 [LOOPBACK_XFI_WS_FAR] = "XFI_WS_FAR",
71 [LOOPBACK_PHYXS_WS] = "PHYXS_WS",
72 };
73
74 const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
75 const char *const efx_reset_type_names[] = {
76 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
77 [RESET_TYPE_ALL] = "ALL",
78 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
79 [RESET_TYPE_WORLD] = "WORLD",
80 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
81 [RESET_TYPE_DATAPATH] = "DATAPATH",
82 [RESET_TYPE_MC_BIST] = "MC_BIST",
83 [RESET_TYPE_DISABLE] = "DISABLE",
84 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
85 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
86 [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
87 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
88 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
89 [RESET_TYPE_MCDI_TIMEOUT] = "MCDI_TIMEOUT (FLR)",
90 };
91
92
93 static const char *const efx_udp_tunnel_type_names[] = {
94 [TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN] = "vxlan",
95 [TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE] = "geneve",
96 };
97
98 void efx_get_udp_tunnel_type_name(u16 type, char *buf, size_t buflen)
99 {
100 if (type < ARRAY_SIZE(efx_udp_tunnel_type_names) &&
101 efx_udp_tunnel_type_names[type] != NULL)
102 snprintf(buf, buflen, "%s", efx_udp_tunnel_type_names[type]);
103 else
104 snprintf(buf, buflen, "type %d", type);
105 }
106
107
108
109
110
111 static struct workqueue_struct *reset_workqueue;
112
113
114
115
116 #define BIST_WAIT_DELAY_MS 100
117 #define BIST_WAIT_DELAY_COUNT 100
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133 bool efx_separate_tx_channels;
134 module_param(efx_separate_tx_channels, bool, 0444);
135 MODULE_PARM_DESC(efx_separate_tx_channels,
136 "Use separate channels for TX and RX");
137
138
139
140
141 static int napi_weight = 64;
142
143
144
145
146
147
148
149
150
151 static unsigned int efx_monitor_interval = 1 * HZ;
152
153
154
155
156
157
158
159 static unsigned int rx_irq_mod_usec = 60;
160
161
162
163
164
165
166
167
168
169
170 static unsigned int tx_irq_mod_usec = 150;
171
172
173
174
175
176
177 static unsigned int interrupt_mode;
178
179
180
181
182
183
184
185
186 static unsigned int rss_cpus;
187 module_param(rss_cpus, uint, 0444);
188 MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
189
190 static bool phy_flash_cfg;
191 module_param(phy_flash_cfg, bool, 0644);
192 MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
193
194 static unsigned irq_adapt_low_thresh = 8000;
195 module_param(irq_adapt_low_thresh, uint, 0644);
196 MODULE_PARM_DESC(irq_adapt_low_thresh,
197 "Threshold score for reducing IRQ moderation");
198
199 static unsigned irq_adapt_high_thresh = 16000;
200 module_param(irq_adapt_high_thresh, uint, 0644);
201 MODULE_PARM_DESC(irq_adapt_high_thresh,
202 "Threshold score for increasing IRQ moderation");
203
204 static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
205 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
206 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
207 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
208 module_param(debug, uint, 0);
209 MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
210
211
212
213
214
215
216
217 static int efx_soft_enable_interrupts(struct efx_nic *efx);
218 static void efx_soft_disable_interrupts(struct efx_nic *efx);
219 static void efx_remove_channel(struct efx_channel *channel);
220 static void efx_remove_channels(struct efx_nic *efx);
221 static const struct efx_channel_type efx_default_channel_type;
222 static void efx_remove_port(struct efx_nic *efx);
223 static void efx_init_napi_channel(struct efx_channel *channel);
224 static void efx_fini_napi(struct efx_nic *efx);
225 static void efx_fini_napi_channel(struct efx_channel *channel);
226 static void efx_fini_struct(struct efx_nic *efx);
227 static void efx_start_all(struct efx_nic *efx);
228 static void efx_stop_all(struct efx_nic *efx);
229
230 #define EFX_ASSERT_RESET_SERIALISED(efx) \
231 do { \
232 if ((efx->state == STATE_READY) || \
233 (efx->state == STATE_RECOVERY) || \
234 (efx->state == STATE_DISABLED)) \
235 ASSERT_RTNL(); \
236 } while (0)
237
238 static int efx_check_disabled(struct efx_nic *efx)
239 {
240 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
241 netif_err(efx, drv, efx->net_dev,
242 "device is disabled due to earlier errors\n");
243 return -EIO;
244 }
245 return 0;
246 }
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261 static int efx_process_channel(struct efx_channel *channel, int budget)
262 {
263 struct efx_tx_queue *tx_queue;
264 struct list_head rx_list;
265 int spent;
266
267 if (unlikely(!channel->enabled))
268 return 0;
269
270
271 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
272 INIT_LIST_HEAD(&rx_list);
273 channel->rx_list = &rx_list;
274
275 efx_for_each_channel_tx_queue(tx_queue, channel) {
276 tx_queue->pkts_compl = 0;
277 tx_queue->bytes_compl = 0;
278 }
279
280 spent = efx_nic_process_eventq(channel, budget);
281 if (spent && efx_channel_has_rx_queue(channel)) {
282 struct efx_rx_queue *rx_queue =
283 efx_channel_get_rx_queue(channel);
284
285 efx_rx_flush_packet(channel);
286 efx_fast_push_rx_descriptors(rx_queue, true);
287 }
288
289
290 efx_for_each_channel_tx_queue(tx_queue, channel) {
291 if (tx_queue->bytes_compl) {
292 netdev_tx_completed_queue(tx_queue->core_txq,
293 tx_queue->pkts_compl, tx_queue->bytes_compl);
294 }
295 }
296
297
298 netif_receive_skb_list(channel->rx_list);
299 channel->rx_list = NULL;
300
301 return spent;
302 }
303
304
305
306
307
308
309 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
310 {
311 int step = efx->irq_mod_step_us;
312
313 if (channel->irq_mod_score < irq_adapt_low_thresh) {
314 if (channel->irq_moderation_us > step) {
315 channel->irq_moderation_us -= step;
316 efx->type->push_irq_moderation(channel);
317 }
318 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
319 if (channel->irq_moderation_us <
320 efx->irq_rx_moderation_us) {
321 channel->irq_moderation_us += step;
322 efx->type->push_irq_moderation(channel);
323 }
324 }
325
326 channel->irq_count = 0;
327 channel->irq_mod_score = 0;
328 }
329
330 static int efx_poll(struct napi_struct *napi, int budget)
331 {
332 struct efx_channel *channel =
333 container_of(napi, struct efx_channel, napi_str);
334 struct efx_nic *efx = channel->efx;
335 int spent;
336
337 netif_vdbg(efx, intr, efx->net_dev,
338 "channel %d NAPI poll executing on CPU %d\n",
339 channel->channel, raw_smp_processor_id());
340
341 spent = efx_process_channel(channel, budget);
342
343 if (spent < budget) {
344 if (efx_channel_has_rx_queue(channel) &&
345 efx->irq_rx_adaptive &&
346 unlikely(++channel->irq_count == 1000)) {
347 efx_update_irq_mod(efx, channel);
348 }
349
350 #ifdef CONFIG_RFS_ACCEL
351
352 schedule_work(&channel->filter_work);
353 #endif
354
355
356
357
358
359
360 if (napi_complete_done(napi, spent))
361 efx_nic_eventq_read_ack(channel);
362 }
363
364 return spent;
365 }
366
367
368
369
370
371
372 static int efx_probe_eventq(struct efx_channel *channel)
373 {
374 struct efx_nic *efx = channel->efx;
375 unsigned long entries;
376
377 netif_dbg(efx, probe, efx->net_dev,
378 "chan %d create event queue\n", channel->channel);
379
380
381
382 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
383 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
384 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
385
386 return efx_nic_probe_eventq(channel);
387 }
388
389
390 static int efx_init_eventq(struct efx_channel *channel)
391 {
392 struct efx_nic *efx = channel->efx;
393 int rc;
394
395 EFX_WARN_ON_PARANOID(channel->eventq_init);
396
397 netif_dbg(efx, drv, efx->net_dev,
398 "chan %d init event queue\n", channel->channel);
399
400 rc = efx_nic_init_eventq(channel);
401 if (rc == 0) {
402 efx->type->push_irq_moderation(channel);
403 channel->eventq_read_ptr = 0;
404 channel->eventq_init = true;
405 }
406 return rc;
407 }
408
409
410 void efx_start_eventq(struct efx_channel *channel)
411 {
412 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
413 "chan %d start event queue\n", channel->channel);
414
415
416 channel->enabled = true;
417 smp_wmb();
418
419 napi_enable(&channel->napi_str);
420 efx_nic_eventq_read_ack(channel);
421 }
422
423
424 void efx_stop_eventq(struct efx_channel *channel)
425 {
426 if (!channel->enabled)
427 return;
428
429 napi_disable(&channel->napi_str);
430 channel->enabled = false;
431 }
432
433 static void efx_fini_eventq(struct efx_channel *channel)
434 {
435 if (!channel->eventq_init)
436 return;
437
438 netif_dbg(channel->efx, drv, channel->efx->net_dev,
439 "chan %d fini event queue\n", channel->channel);
440
441 efx_nic_fini_eventq(channel);
442 channel->eventq_init = false;
443 }
444
445 static void efx_remove_eventq(struct efx_channel *channel)
446 {
447 netif_dbg(channel->efx, drv, channel->efx->net_dev,
448 "chan %d remove event queue\n", channel->channel);
449
450 efx_nic_remove_eventq(channel);
451 }
452
453
454
455
456
457
458
459
460 static struct efx_channel *
461 efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
462 {
463 struct efx_channel *channel;
464 struct efx_rx_queue *rx_queue;
465 struct efx_tx_queue *tx_queue;
466 int j;
467
468 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
469 if (!channel)
470 return NULL;
471
472 channel->efx = efx;
473 channel->channel = i;
474 channel->type = &efx_default_channel_type;
475
476 for (j = 0; j < EFX_TXQ_TYPES; j++) {
477 tx_queue = &channel->tx_queue[j];
478 tx_queue->efx = efx;
479 tx_queue->queue = i * EFX_TXQ_TYPES + j;
480 tx_queue->channel = channel;
481 }
482
483 #ifdef CONFIG_RFS_ACCEL
484 INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
485 #endif
486
487 rx_queue = &channel->rx_queue;
488 rx_queue->efx = efx;
489 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
490
491 return channel;
492 }
493
494
495
496
497 static struct efx_channel *
498 efx_copy_channel(const struct efx_channel *old_channel)
499 {
500 struct efx_channel *channel;
501 struct efx_rx_queue *rx_queue;
502 struct efx_tx_queue *tx_queue;
503 int j;
504
505 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
506 if (!channel)
507 return NULL;
508
509 *channel = *old_channel;
510
511 channel->napi_dev = NULL;
512 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
513 channel->napi_str.napi_id = 0;
514 channel->napi_str.state = 0;
515 memset(&channel->eventq, 0, sizeof(channel->eventq));
516
517 for (j = 0; j < EFX_TXQ_TYPES; j++) {
518 tx_queue = &channel->tx_queue[j];
519 if (tx_queue->channel)
520 tx_queue->channel = channel;
521 tx_queue->buffer = NULL;
522 tx_queue->cb_page = NULL;
523 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
524 }
525
526 rx_queue = &channel->rx_queue;
527 rx_queue->buffer = NULL;
528 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
529 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
530 #ifdef CONFIG_RFS_ACCEL
531 INIT_WORK(&channel->filter_work, efx_filter_rfs_expire);
532 #endif
533
534 return channel;
535 }
536
537 static int efx_probe_channel(struct efx_channel *channel)
538 {
539 struct efx_tx_queue *tx_queue;
540 struct efx_rx_queue *rx_queue;
541 int rc;
542
543 netif_dbg(channel->efx, probe, channel->efx->net_dev,
544 "creating channel %d\n", channel->channel);
545
546 rc = channel->type->pre_probe(channel);
547 if (rc)
548 goto fail;
549
550 rc = efx_probe_eventq(channel);
551 if (rc)
552 goto fail;
553
554 efx_for_each_channel_tx_queue(tx_queue, channel) {
555 rc = efx_probe_tx_queue(tx_queue);
556 if (rc)
557 goto fail;
558 }
559
560 efx_for_each_channel_rx_queue(rx_queue, channel) {
561 rc = efx_probe_rx_queue(rx_queue);
562 if (rc)
563 goto fail;
564 }
565
566 channel->rx_list = NULL;
567
568 return 0;
569
570 fail:
571 efx_remove_channel(channel);
572 return rc;
573 }
574
575 static void
576 efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
577 {
578 struct efx_nic *efx = channel->efx;
579 const char *type;
580 int number;
581
582 number = channel->channel;
583 if (efx->tx_channel_offset == 0) {
584 type = "";
585 } else if (channel->channel < efx->tx_channel_offset) {
586 type = "-rx";
587 } else {
588 type = "-tx";
589 number -= efx->tx_channel_offset;
590 }
591 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
592 }
593
594 static void efx_set_channel_names(struct efx_nic *efx)
595 {
596 struct efx_channel *channel;
597
598 efx_for_each_channel(channel, efx)
599 channel->type->get_name(channel,
600 efx->msi_context[channel->channel].name,
601 sizeof(efx->msi_context[0].name));
602 }
603
604 static int efx_probe_channels(struct efx_nic *efx)
605 {
606 struct efx_channel *channel;
607 int rc;
608
609
610 efx->next_buffer_table = 0;
611
612
613
614
615
616
617 efx_for_each_channel_rev(channel, efx) {
618 rc = efx_probe_channel(channel);
619 if (rc) {
620 netif_err(efx, probe, efx->net_dev,
621 "failed to create channel %d\n",
622 channel->channel);
623 goto fail;
624 }
625 }
626 efx_set_channel_names(efx);
627
628 return 0;
629
630 fail:
631 efx_remove_channels(efx);
632 return rc;
633 }
634
635
636
637
638
639 static void efx_start_datapath(struct efx_nic *efx)
640 {
641 netdev_features_t old_features = efx->net_dev->features;
642 bool old_rx_scatter = efx->rx_scatter;
643 struct efx_tx_queue *tx_queue;
644 struct efx_rx_queue *rx_queue;
645 struct efx_channel *channel;
646 size_t rx_buf_len;
647
648
649
650
651
652 efx->rx_dma_len = (efx->rx_prefix_size +
653 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
654 efx->type->rx_buffer_padding);
655 rx_buf_len = (sizeof(struct efx_rx_page_state) +
656 efx->rx_ip_align + efx->rx_dma_len);
657 if (rx_buf_len <= PAGE_SIZE) {
658 efx->rx_scatter = efx->type->always_rx_scatter;
659 efx->rx_buffer_order = 0;
660 } else if (efx->type->can_rx_scatter) {
661 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
662 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
663 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
664 EFX_RX_BUF_ALIGNMENT) >
665 PAGE_SIZE);
666 efx->rx_scatter = true;
667 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
668 efx->rx_buffer_order = 0;
669 } else {
670 efx->rx_scatter = false;
671 efx->rx_buffer_order = get_order(rx_buf_len);
672 }
673
674 efx_rx_config_page_split(efx);
675 if (efx->rx_buffer_order)
676 netif_dbg(efx, drv, efx->net_dev,
677 "RX buf len=%u; page order=%u batch=%u\n",
678 efx->rx_dma_len, efx->rx_buffer_order,
679 efx->rx_pages_per_batch);
680 else
681 netif_dbg(efx, drv, efx->net_dev,
682 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
683 efx->rx_dma_len, efx->rx_page_buf_step,
684 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
685
686
687
688
689 efx->net_dev->hw_features |= efx->net_dev->features;
690 efx->net_dev->hw_features &= ~efx->fixed_features;
691 efx->net_dev->features |= efx->fixed_features;
692 if (efx->net_dev->features != old_features)
693 netdev_features_change(efx->net_dev);
694
695
696 if (efx->rx_scatter != old_rx_scatter)
697 efx->type->filter_update_rx_scatter(efx);
698
699
700
701
702
703
704
705
706 efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
707 efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
708
709
710 efx_for_each_channel(channel, efx) {
711 efx_for_each_channel_tx_queue(tx_queue, channel) {
712 efx_init_tx_queue(tx_queue);
713 atomic_inc(&efx->active_queues);
714 }
715
716 efx_for_each_channel_rx_queue(rx_queue, channel) {
717 efx_init_rx_queue(rx_queue);
718 atomic_inc(&efx->active_queues);
719 efx_stop_eventq(channel);
720 efx_fast_push_rx_descriptors(rx_queue, false);
721 efx_start_eventq(channel);
722 }
723
724 WARN_ON(channel->rx_pkt_n_frags);
725 }
726
727 efx_ptp_start_datapath(efx);
728
729 if (netif_device_present(efx->net_dev))
730 netif_tx_wake_all_queues(efx->net_dev);
731 }
732
733 static void efx_stop_datapath(struct efx_nic *efx)
734 {
735 struct efx_channel *channel;
736 struct efx_tx_queue *tx_queue;
737 struct efx_rx_queue *rx_queue;
738 int rc;
739
740 EFX_ASSERT_RESET_SERIALISED(efx);
741 BUG_ON(efx->port_enabled);
742
743 efx_ptp_stop_datapath(efx);
744
745
746 efx_for_each_channel(channel, efx) {
747 efx_for_each_channel_rx_queue(rx_queue, channel)
748 rx_queue->refill_enabled = false;
749 }
750
751 efx_for_each_channel(channel, efx) {
752
753
754
755
756
757
758 if (efx_channel_has_rx_queue(channel)) {
759 efx_stop_eventq(channel);
760 efx_start_eventq(channel);
761 }
762 }
763
764 rc = efx->type->fini_dmaq(efx);
765 if (rc) {
766 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
767 } else {
768 netif_dbg(efx, drv, efx->net_dev,
769 "successfully flushed all queues\n");
770 }
771
772 efx_for_each_channel(channel, efx) {
773 efx_for_each_channel_rx_queue(rx_queue, channel)
774 efx_fini_rx_queue(rx_queue);
775 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
776 efx_fini_tx_queue(tx_queue);
777 }
778 }
779
780 static void efx_remove_channel(struct efx_channel *channel)
781 {
782 struct efx_tx_queue *tx_queue;
783 struct efx_rx_queue *rx_queue;
784
785 netif_dbg(channel->efx, drv, channel->efx->net_dev,
786 "destroy chan %d\n", channel->channel);
787
788 efx_for_each_channel_rx_queue(rx_queue, channel)
789 efx_remove_rx_queue(rx_queue);
790 efx_for_each_possible_channel_tx_queue(tx_queue, channel)
791 efx_remove_tx_queue(tx_queue);
792 efx_remove_eventq(channel);
793 channel->type->post_remove(channel);
794 }
795
796 static void efx_remove_channels(struct efx_nic *efx)
797 {
798 struct efx_channel *channel;
799
800 efx_for_each_channel(channel, efx)
801 efx_remove_channel(channel);
802 }
803
804 int
805 efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
806 {
807 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
808 u32 old_rxq_entries, old_txq_entries;
809 unsigned i, next_buffer_table = 0;
810 int rc, rc2;
811
812 rc = efx_check_disabled(efx);
813 if (rc)
814 return rc;
815
816
817
818
819 efx_for_each_channel(channel, efx) {
820 struct efx_rx_queue *rx_queue;
821 struct efx_tx_queue *tx_queue;
822
823 if (channel->type->copy)
824 continue;
825 next_buffer_table = max(next_buffer_table,
826 channel->eventq.index +
827 channel->eventq.entries);
828 efx_for_each_channel_rx_queue(rx_queue, channel)
829 next_buffer_table = max(next_buffer_table,
830 rx_queue->rxd.index +
831 rx_queue->rxd.entries);
832 efx_for_each_channel_tx_queue(tx_queue, channel)
833 next_buffer_table = max(next_buffer_table,
834 tx_queue->txd.index +
835 tx_queue->txd.entries);
836 }
837
838 efx_device_detach_sync(efx);
839 efx_stop_all(efx);
840 efx_soft_disable_interrupts(efx);
841
842
843 memset(other_channel, 0, sizeof(other_channel));
844 for (i = 0; i < efx->n_channels; i++) {
845 channel = efx->channel[i];
846 if (channel->type->copy)
847 channel = channel->type->copy(channel);
848 if (!channel) {
849 rc = -ENOMEM;
850 goto out;
851 }
852 other_channel[i] = channel;
853 }
854
855
856 old_rxq_entries = efx->rxq_entries;
857 old_txq_entries = efx->txq_entries;
858 efx->rxq_entries = rxq_entries;
859 efx->txq_entries = txq_entries;
860 for (i = 0; i < efx->n_channels; i++) {
861 channel = efx->channel[i];
862 efx->channel[i] = other_channel[i];
863 other_channel[i] = channel;
864 }
865
866
867 efx->next_buffer_table = next_buffer_table;
868
869 for (i = 0; i < efx->n_channels; i++) {
870 channel = efx->channel[i];
871 if (!channel->type->copy)
872 continue;
873 rc = efx_probe_channel(channel);
874 if (rc)
875 goto rollback;
876 efx_init_napi_channel(efx->channel[i]);
877 }
878
879 out:
880
881 for (i = 0; i < efx->n_channels; i++) {
882 channel = other_channel[i];
883 if (channel && channel->type->copy) {
884 efx_fini_napi_channel(channel);
885 efx_remove_channel(channel);
886 kfree(channel);
887 }
888 }
889
890 rc2 = efx_soft_enable_interrupts(efx);
891 if (rc2) {
892 rc = rc ? rc : rc2;
893 netif_err(efx, drv, efx->net_dev,
894 "unable to restart interrupts on channel reallocation\n");
895 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
896 } else {
897 efx_start_all(efx);
898 efx_device_attach_if_not_resetting(efx);
899 }
900 return rc;
901
902 rollback:
903
904 efx->rxq_entries = old_rxq_entries;
905 efx->txq_entries = old_txq_entries;
906 for (i = 0; i < efx->n_channels; i++) {
907 channel = efx->channel[i];
908 efx->channel[i] = other_channel[i];
909 other_channel[i] = channel;
910 }
911 goto out;
912 }
913
914 void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
915 {
916 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
917 }
918
919 static bool efx_default_channel_want_txqs(struct efx_channel *channel)
920 {
921 return channel->channel - channel->efx->tx_channel_offset <
922 channel->efx->n_tx_channels;
923 }
924
925 static const struct efx_channel_type efx_default_channel_type = {
926 .pre_probe = efx_channel_dummy_op_int,
927 .post_remove = efx_channel_dummy_op_void,
928 .get_name = efx_get_channel_name,
929 .copy = efx_copy_channel,
930 .want_txqs = efx_default_channel_want_txqs,
931 .keep_eventq = false,
932 .want_pio = true,
933 };
934
935 int efx_channel_dummy_op_int(struct efx_channel *channel)
936 {
937 return 0;
938 }
939
940 void efx_channel_dummy_op_void(struct efx_channel *channel)
941 {
942 }
943
944
945
946
947
948
949
950
951
952
953
954 void efx_link_status_changed(struct efx_nic *efx)
955 {
956 struct efx_link_state *link_state = &efx->link_state;
957
958
959
960
961
962 if (!netif_running(efx->net_dev))
963 return;
964
965 if (link_state->up != netif_carrier_ok(efx->net_dev)) {
966 efx->n_link_state_changes++;
967
968 if (link_state->up)
969 netif_carrier_on(efx->net_dev);
970 else
971 netif_carrier_off(efx->net_dev);
972 }
973
974
975 if (link_state->up)
976 netif_info(efx, link, efx->net_dev,
977 "link up at %uMbps %s-duplex (MTU %d)\n",
978 link_state->speed, link_state->fd ? "full" : "half",
979 efx->net_dev->mtu);
980 else
981 netif_info(efx, link, efx->net_dev, "link down\n");
982 }
983
984 void efx_link_set_advertising(struct efx_nic *efx,
985 const unsigned long *advertising)
986 {
987 memcpy(efx->link_advertising, advertising,
988 sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
989
990 efx->link_advertising[0] |= ADVERTISED_Autoneg;
991 if (advertising[0] & ADVERTISED_Pause)
992 efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
993 else
994 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
995 if (advertising[0] & ADVERTISED_Asym_Pause)
996 efx->wanted_fc ^= EFX_FC_TX;
997 }
998
999
1000
1001
1002 void efx_link_clear_advertising(struct efx_nic *efx)
1003 {
1004 bitmap_zero(efx->link_advertising, __ETHTOOL_LINK_MODE_MASK_NBITS);
1005 efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
1006 }
1007
1008 void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
1009 {
1010 efx->wanted_fc = wanted_fc;
1011 if (efx->link_advertising[0]) {
1012 if (wanted_fc & EFX_FC_RX)
1013 efx->link_advertising[0] |= (ADVERTISED_Pause |
1014 ADVERTISED_Asym_Pause);
1015 else
1016 efx->link_advertising[0] &= ~(ADVERTISED_Pause |
1017 ADVERTISED_Asym_Pause);
1018 if (wanted_fc & EFX_FC_TX)
1019 efx->link_advertising[0] ^= ADVERTISED_Asym_Pause;
1020 }
1021 }
1022
1023 static void efx_fini_port(struct efx_nic *efx);
1024
1025
1026
1027
1028 void efx_mac_reconfigure(struct efx_nic *efx)
1029 {
1030 down_read(&efx->filter_sem);
1031 efx->type->reconfigure_mac(efx);
1032 up_read(&efx->filter_sem);
1033 }
1034
1035
1036
1037
1038
1039
1040
1041
1042 int __efx_reconfigure_port(struct efx_nic *efx)
1043 {
1044 enum efx_phy_mode phy_mode;
1045 int rc;
1046
1047 WARN_ON(!mutex_is_locked(&efx->mac_lock));
1048
1049
1050 phy_mode = efx->phy_mode;
1051 if (LOOPBACK_INTERNAL(efx))
1052 efx->phy_mode |= PHY_MODE_TX_DISABLED;
1053 else
1054 efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
1055
1056 rc = efx->type->reconfigure_port(efx);
1057
1058 if (rc)
1059 efx->phy_mode = phy_mode;
1060
1061 return rc;
1062 }
1063
1064
1065
1066 int efx_reconfigure_port(struct efx_nic *efx)
1067 {
1068 int rc;
1069
1070 EFX_ASSERT_RESET_SERIALISED(efx);
1071
1072 mutex_lock(&efx->mac_lock);
1073 rc = __efx_reconfigure_port(efx);
1074 mutex_unlock(&efx->mac_lock);
1075
1076 return rc;
1077 }
1078
1079
1080
1081
1082 static void efx_mac_work(struct work_struct *data)
1083 {
1084 struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1085
1086 mutex_lock(&efx->mac_lock);
1087 if (efx->port_enabled)
1088 efx_mac_reconfigure(efx);
1089 mutex_unlock(&efx->mac_lock);
1090 }
1091
1092 static int efx_probe_port(struct efx_nic *efx)
1093 {
1094 int rc;
1095
1096 netif_dbg(efx, probe, efx->net_dev, "create port\n");
1097
1098 if (phy_flash_cfg)
1099 efx->phy_mode = PHY_MODE_SPECIAL;
1100
1101
1102 rc = efx->type->probe_port(efx);
1103 if (rc)
1104 return rc;
1105
1106
1107 ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1108
1109 return 0;
1110 }
1111
1112 static int efx_init_port(struct efx_nic *efx)
1113 {
1114 int rc;
1115
1116 netif_dbg(efx, drv, efx->net_dev, "init port\n");
1117
1118 mutex_lock(&efx->mac_lock);
1119
1120 rc = efx->phy_op->init(efx);
1121 if (rc)
1122 goto fail1;
1123
1124 efx->port_initialized = true;
1125
1126
1127
1128 efx_mac_reconfigure(efx);
1129
1130
1131 rc = efx->phy_op->reconfigure(efx);
1132 if (rc && rc != -EPERM)
1133 goto fail2;
1134
1135 mutex_unlock(&efx->mac_lock);
1136 return 0;
1137
1138 fail2:
1139 efx->phy_op->fini(efx);
1140 fail1:
1141 mutex_unlock(&efx->mac_lock);
1142 return rc;
1143 }
1144
1145 static void efx_start_port(struct efx_nic *efx)
1146 {
1147 netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1148 BUG_ON(efx->port_enabled);
1149
1150 mutex_lock(&efx->mac_lock);
1151 efx->port_enabled = true;
1152
1153
1154 efx_mac_reconfigure(efx);
1155
1156 mutex_unlock(&efx->mac_lock);
1157 }
1158
1159
1160
1161
1162
1163
1164 static void efx_stop_port(struct efx_nic *efx)
1165 {
1166 netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1167
1168 EFX_ASSERT_RESET_SERIALISED(efx);
1169
1170 mutex_lock(&efx->mac_lock);
1171 efx->port_enabled = false;
1172 mutex_unlock(&efx->mac_lock);
1173
1174
1175 netif_addr_lock_bh(efx->net_dev);
1176 netif_addr_unlock_bh(efx->net_dev);
1177
1178 cancel_delayed_work_sync(&efx->monitor_work);
1179 efx_selftest_async_cancel(efx);
1180 cancel_work_sync(&efx->mac_work);
1181 }
1182
1183 static void efx_fini_port(struct efx_nic *efx)
1184 {
1185 netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1186
1187 if (!efx->port_initialized)
1188 return;
1189
1190 efx->phy_op->fini(efx);
1191 efx->port_initialized = false;
1192
1193 efx->link_state.up = false;
1194 efx_link_status_changed(efx);
1195 }
1196
1197 static void efx_remove_port(struct efx_nic *efx)
1198 {
1199 netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1200
1201 efx->type->remove_port(efx);
1202 }
1203
1204
1205
1206
1207
1208
1209
1210 static LIST_HEAD(efx_primary_list);
1211 static LIST_HEAD(efx_unassociated_list);
1212
1213 static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
1214 {
1215 return left->type == right->type &&
1216 left->vpd_sn && right->vpd_sn &&
1217 !strcmp(left->vpd_sn, right->vpd_sn);
1218 }
1219
1220 static void efx_associate(struct efx_nic *efx)
1221 {
1222 struct efx_nic *other, *next;
1223
1224 if (efx->primary == efx) {
1225
1226
1227 netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1228 list_add_tail(&efx->node, &efx_primary_list);
1229
1230 list_for_each_entry_safe(other, next, &efx_unassociated_list,
1231 node) {
1232 if (efx_same_controller(efx, other)) {
1233 list_del(&other->node);
1234 netif_dbg(other, probe, other->net_dev,
1235 "moving to secondary list of %s %s\n",
1236 pci_name(efx->pci_dev),
1237 efx->net_dev->name);
1238 list_add_tail(&other->node,
1239 &efx->secondary_list);
1240 other->primary = efx;
1241 }
1242 }
1243 } else {
1244
1245
1246 list_for_each_entry(other, &efx_primary_list, node) {
1247 if (efx_same_controller(efx, other)) {
1248 netif_dbg(efx, probe, efx->net_dev,
1249 "adding to secondary list of %s %s\n",
1250 pci_name(other->pci_dev),
1251 other->net_dev->name);
1252 list_add_tail(&efx->node,
1253 &other->secondary_list);
1254 efx->primary = other;
1255 return;
1256 }
1257 }
1258
1259 netif_dbg(efx, probe, efx->net_dev,
1260 "adding to unassociated list\n");
1261 list_add_tail(&efx->node, &efx_unassociated_list);
1262 }
1263 }
1264
1265 static void efx_dissociate(struct efx_nic *efx)
1266 {
1267 struct efx_nic *other, *next;
1268
1269 list_del(&efx->node);
1270 efx->primary = NULL;
1271
1272 list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1273 list_del(&other->node);
1274 netif_dbg(other, probe, other->net_dev,
1275 "moving to unassociated list\n");
1276 list_add_tail(&other->node, &efx_unassociated_list);
1277 other->primary = NULL;
1278 }
1279 }
1280
1281
1282 static int efx_init_io(struct efx_nic *efx)
1283 {
1284 struct pci_dev *pci_dev = efx->pci_dev;
1285 dma_addr_t dma_mask = efx->type->max_dma_mask;
1286 unsigned int mem_map_size = efx->type->mem_map_size(efx);
1287 int rc, bar;
1288
1289 netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1290
1291 bar = efx->type->mem_bar(efx);
1292
1293 rc = pci_enable_device(pci_dev);
1294 if (rc) {
1295 netif_err(efx, probe, efx->net_dev,
1296 "failed to enable PCI device\n");
1297 goto fail1;
1298 }
1299
1300 pci_set_master(pci_dev);
1301
1302
1303
1304
1305
1306 while (dma_mask > 0x7fffffffUL) {
1307 rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1308 if (rc == 0)
1309 break;
1310 dma_mask >>= 1;
1311 }
1312 if (rc) {
1313 netif_err(efx, probe, efx->net_dev,
1314 "could not find a suitable DMA mask\n");
1315 goto fail2;
1316 }
1317 netif_dbg(efx, probe, efx->net_dev,
1318 "using DMA mask %llx\n", (unsigned long long) dma_mask);
1319
1320 efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1321 rc = pci_request_region(pci_dev, bar, "sfc");
1322 if (rc) {
1323 netif_err(efx, probe, efx->net_dev,
1324 "request for memory BAR failed\n");
1325 rc = -EIO;
1326 goto fail3;
1327 }
1328 efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1329 if (!efx->membase) {
1330 netif_err(efx, probe, efx->net_dev,
1331 "could not map memory BAR at %llx+%x\n",
1332 (unsigned long long)efx->membase_phys, mem_map_size);
1333 rc = -ENOMEM;
1334 goto fail4;
1335 }
1336 netif_dbg(efx, probe, efx->net_dev,
1337 "memory BAR at %llx+%x (virtual %p)\n",
1338 (unsigned long long)efx->membase_phys, mem_map_size,
1339 efx->membase);
1340
1341 return 0;
1342
1343 fail4:
1344 pci_release_region(efx->pci_dev, bar);
1345 fail3:
1346 efx->membase_phys = 0;
1347 fail2:
1348 pci_disable_device(efx->pci_dev);
1349 fail1:
1350 return rc;
1351 }
1352
1353 static void efx_fini_io(struct efx_nic *efx)
1354 {
1355 int bar;
1356
1357 netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1358
1359 if (efx->membase) {
1360 iounmap(efx->membase);
1361 efx->membase = NULL;
1362 }
1363
1364 if (efx->membase_phys) {
1365 bar = efx->type->mem_bar(efx);
1366 pci_release_region(efx->pci_dev, bar);
1367 efx->membase_phys = 0;
1368 }
1369
1370
1371 if (!pci_vfs_assigned(efx->pci_dev))
1372 pci_disable_device(efx->pci_dev);
1373 }
1374
1375 void efx_set_default_rx_indir_table(struct efx_nic *efx,
1376 struct efx_rss_context *ctx)
1377 {
1378 size_t i;
1379
1380 for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
1381 ctx->rx_indir_table[i] =
1382 ethtool_rxfh_indir_default(i, efx->rss_spread);
1383 }
1384
1385 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1386 {
1387 cpumask_var_t thread_mask;
1388 unsigned int count;
1389 int cpu;
1390
1391 if (rss_cpus) {
1392 count = rss_cpus;
1393 } else {
1394 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1395 netif_warn(efx, probe, efx->net_dev,
1396 "RSS disabled due to allocation failure\n");
1397 return 1;
1398 }
1399
1400 count = 0;
1401 for_each_online_cpu(cpu) {
1402 if (!cpumask_test_cpu(cpu, thread_mask)) {
1403 ++count;
1404 cpumask_or(thread_mask, thread_mask,
1405 topology_sibling_cpumask(cpu));
1406 }
1407 }
1408
1409 free_cpumask_var(thread_mask);
1410 }
1411
1412 if (count > EFX_MAX_RX_QUEUES) {
1413 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
1414 "Reducing number of rx queues from %u to %u.\n",
1415 count, EFX_MAX_RX_QUEUES);
1416 count = EFX_MAX_RX_QUEUES;
1417 }
1418
1419
1420
1421
1422 #ifdef CONFIG_SFC_SRIOV
1423 if (efx->type->sriov_wanted) {
1424 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1425 count > efx_vf_size(efx)) {
1426 netif_warn(efx, probe, efx->net_dev,
1427 "Reducing number of RSS channels from %u to %u for "
1428 "VF support. Increase vf-msix-limit to use more "
1429 "channels on the PF.\n",
1430 count, efx_vf_size(efx));
1431 count = efx_vf_size(efx);
1432 }
1433 }
1434 #endif
1435
1436 return count;
1437 }
1438
1439
1440
1441
1442 static int efx_probe_interrupts(struct efx_nic *efx)
1443 {
1444 unsigned int extra_channels = 0;
1445 unsigned int i, j;
1446 int rc;
1447
1448 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1449 if (efx->extra_channel_type[i])
1450 ++extra_channels;
1451
1452 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1453 struct msix_entry xentries[EFX_MAX_CHANNELS];
1454 unsigned int n_channels;
1455
1456 n_channels = efx_wanted_parallelism(efx);
1457 if (efx_separate_tx_channels)
1458 n_channels *= 2;
1459 n_channels += extra_channels;
1460 n_channels = min(n_channels, efx->max_channels);
1461
1462 for (i = 0; i < n_channels; i++)
1463 xentries[i].entry = i;
1464 rc = pci_enable_msix_range(efx->pci_dev,
1465 xentries, 1, n_channels);
1466 if (rc < 0) {
1467
1468 netif_err(efx, drv, efx->net_dev,
1469 "could not enable MSI-X\n");
1470 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
1471 efx->interrupt_mode = EFX_INT_MODE_MSI;
1472 else
1473 return rc;
1474 } else if (rc < n_channels) {
1475 netif_err(efx, drv, efx->net_dev,
1476 "WARNING: Insufficient MSI-X vectors"
1477 " available (%d < %u).\n", rc, n_channels);
1478 netif_err(efx, drv, efx->net_dev,
1479 "WARNING: Performance may be reduced.\n");
1480 n_channels = rc;
1481 }
1482
1483 if (rc > 0) {
1484 efx->n_channels = n_channels;
1485 if (n_channels > extra_channels)
1486 n_channels -= extra_channels;
1487 if (efx_separate_tx_channels) {
1488 efx->n_tx_channels = min(max(n_channels / 2,
1489 1U),
1490 efx->max_tx_channels);
1491 efx->n_rx_channels = max(n_channels -
1492 efx->n_tx_channels,
1493 1U);
1494 } else {
1495 efx->n_tx_channels = min(n_channels,
1496 efx->max_tx_channels);
1497 efx->n_rx_channels = n_channels;
1498 }
1499 for (i = 0; i < efx->n_channels; i++)
1500 efx_get_channel(efx, i)->irq =
1501 xentries[i].vector;
1502 }
1503 }
1504
1505
1506 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1507 efx->n_channels = 1;
1508 efx->n_rx_channels = 1;
1509 efx->n_tx_channels = 1;
1510 rc = pci_enable_msi(efx->pci_dev);
1511 if (rc == 0) {
1512 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1513 } else {
1514 netif_err(efx, drv, efx->net_dev,
1515 "could not enable MSI\n");
1516 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
1517 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1518 else
1519 return rc;
1520 }
1521 }
1522
1523
1524 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1525 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1526 efx->n_rx_channels = 1;
1527 efx->n_tx_channels = 1;
1528 efx->legacy_irq = efx->pci_dev->irq;
1529 }
1530
1531
1532 efx->n_extra_tx_channels = 0;
1533 j = efx->n_channels;
1534 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1535 if (!efx->extra_channel_type[i])
1536 continue;
1537 if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1538 efx->n_channels <= extra_channels) {
1539 efx->extra_channel_type[i]->handle_no_channel(efx);
1540 } else {
1541 --j;
1542 efx_get_channel(efx, j)->type =
1543 efx->extra_channel_type[i];
1544 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
1545 efx->n_extra_tx_channels++;
1546 }
1547 }
1548
1549
1550 #ifdef CONFIG_SFC_SRIOV
1551 if (efx->type->sriov_wanted) {
1552 efx->rss_spread = ((efx->n_rx_channels > 1 ||
1553 !efx->type->sriov_wanted(efx)) ?
1554 efx->n_rx_channels : efx_vf_size(efx));
1555 return 0;
1556 }
1557 #endif
1558 efx->rss_spread = efx->n_rx_channels;
1559
1560 return 0;
1561 }
1562
1563 #if defined(CONFIG_SMP)
1564 static void efx_set_interrupt_affinity(struct efx_nic *efx)
1565 {
1566 struct efx_channel *channel;
1567 unsigned int cpu;
1568
1569 efx_for_each_channel(channel, efx) {
1570 cpu = cpumask_local_spread(channel->channel,
1571 pcibus_to_node(efx->pci_dev->bus));
1572 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
1573 }
1574 }
1575
1576 static void efx_clear_interrupt_affinity(struct efx_nic *efx)
1577 {
1578 struct efx_channel *channel;
1579
1580 efx_for_each_channel(channel, efx)
1581 irq_set_affinity_hint(channel->irq, NULL);
1582 }
1583 #else
1584 static void
1585 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1586 {
1587 }
1588
1589 static void
1590 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
1591 {
1592 }
1593 #endif
1594
1595 static int efx_soft_enable_interrupts(struct efx_nic *efx)
1596 {
1597 struct efx_channel *channel, *end_channel;
1598 int rc;
1599
1600 BUG_ON(efx->state == STATE_DISABLED);
1601
1602 efx->irq_soft_enabled = true;
1603 smp_wmb();
1604
1605 efx_for_each_channel(channel, efx) {
1606 if (!channel->type->keep_eventq) {
1607 rc = efx_init_eventq(channel);
1608 if (rc)
1609 goto fail;
1610 }
1611 efx_start_eventq(channel);
1612 }
1613
1614 efx_mcdi_mode_event(efx);
1615
1616 return 0;
1617 fail:
1618 end_channel = channel;
1619 efx_for_each_channel(channel, efx) {
1620 if (channel == end_channel)
1621 break;
1622 efx_stop_eventq(channel);
1623 if (!channel->type->keep_eventq)
1624 efx_fini_eventq(channel);
1625 }
1626
1627 return rc;
1628 }
1629
1630 static void efx_soft_disable_interrupts(struct efx_nic *efx)
1631 {
1632 struct efx_channel *channel;
1633
1634 if (efx->state == STATE_DISABLED)
1635 return;
1636
1637 efx_mcdi_mode_poll(efx);
1638
1639 efx->irq_soft_enabled = false;
1640 smp_wmb();
1641
1642 if (efx->legacy_irq)
1643 synchronize_irq(efx->legacy_irq);
1644
1645 efx_for_each_channel(channel, efx) {
1646 if (channel->irq)
1647 synchronize_irq(channel->irq);
1648
1649 efx_stop_eventq(channel);
1650 if (!channel->type->keep_eventq)
1651 efx_fini_eventq(channel);
1652 }
1653
1654
1655 efx_mcdi_flush_async(efx);
1656 }
1657
1658 static int efx_enable_interrupts(struct efx_nic *efx)
1659 {
1660 struct efx_channel *channel, *end_channel;
1661 int rc;
1662
1663 BUG_ON(efx->state == STATE_DISABLED);
1664
1665 if (efx->eeh_disabled_legacy_irq) {
1666 enable_irq(efx->legacy_irq);
1667 efx->eeh_disabled_legacy_irq = false;
1668 }
1669
1670 efx->type->irq_enable_master(efx);
1671
1672 efx_for_each_channel(channel, efx) {
1673 if (channel->type->keep_eventq) {
1674 rc = efx_init_eventq(channel);
1675 if (rc)
1676 goto fail;
1677 }
1678 }
1679
1680 rc = efx_soft_enable_interrupts(efx);
1681 if (rc)
1682 goto fail;
1683
1684 return 0;
1685
1686 fail:
1687 end_channel = channel;
1688 efx_for_each_channel(channel, efx) {
1689 if (channel == end_channel)
1690 break;
1691 if (channel->type->keep_eventq)
1692 efx_fini_eventq(channel);
1693 }
1694
1695 efx->type->irq_disable_non_ev(efx);
1696
1697 return rc;
1698 }
1699
1700 static void efx_disable_interrupts(struct efx_nic *efx)
1701 {
1702 struct efx_channel *channel;
1703
1704 efx_soft_disable_interrupts(efx);
1705
1706 efx_for_each_channel(channel, efx) {
1707 if (channel->type->keep_eventq)
1708 efx_fini_eventq(channel);
1709 }
1710
1711 efx->type->irq_disable_non_ev(efx);
1712 }
1713
1714 static void efx_remove_interrupts(struct efx_nic *efx)
1715 {
1716 struct efx_channel *channel;
1717
1718
1719 efx_for_each_channel(channel, efx)
1720 channel->irq = 0;
1721 pci_disable_msi(efx->pci_dev);
1722 pci_disable_msix(efx->pci_dev);
1723
1724
1725 efx->legacy_irq = 0;
1726 }
1727
1728 static void efx_set_channels(struct efx_nic *efx)
1729 {
1730 struct efx_channel *channel;
1731 struct efx_tx_queue *tx_queue;
1732
1733 efx->tx_channel_offset =
1734 efx_separate_tx_channels ?
1735 efx->n_channels - efx->n_tx_channels : 0;
1736
1737
1738
1739
1740
1741 efx_for_each_channel(channel, efx) {
1742 if (channel->channel < efx->n_rx_channels)
1743 channel->rx_queue.core_index = channel->channel;
1744 else
1745 channel->rx_queue.core_index = -1;
1746
1747 efx_for_each_channel_tx_queue(tx_queue, channel)
1748 tx_queue->queue -= (efx->tx_channel_offset *
1749 EFX_TXQ_TYPES);
1750 }
1751 }
1752
1753 static int efx_probe_nic(struct efx_nic *efx)
1754 {
1755 int rc;
1756
1757 netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1758
1759
1760 rc = efx->type->probe(efx);
1761 if (rc)
1762 return rc;
1763
1764 do {
1765 if (!efx->max_channels || !efx->max_tx_channels) {
1766 netif_err(efx, drv, efx->net_dev,
1767 "Insufficient resources to allocate"
1768 " any channels\n");
1769 rc = -ENOSPC;
1770 goto fail1;
1771 }
1772
1773
1774
1775
1776 rc = efx_probe_interrupts(efx);
1777 if (rc)
1778 goto fail1;
1779
1780 efx_set_channels(efx);
1781
1782
1783 rc = efx->type->dimension_resources(efx);
1784 if (rc != 0 && rc != -EAGAIN)
1785 goto fail2;
1786
1787 if (rc == -EAGAIN)
1788
1789 efx_remove_interrupts(efx);
1790
1791 } while (rc == -EAGAIN);
1792
1793 if (efx->n_channels > 1)
1794 netdev_rss_key_fill(efx->rss_context.rx_hash_key,
1795 sizeof(efx->rss_context.rx_hash_key));
1796 efx_set_default_rx_indir_table(efx, &efx->rss_context);
1797
1798 netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1799 netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1800
1801
1802 efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
1803 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1804 true);
1805
1806 return 0;
1807
1808 fail2:
1809 efx_remove_interrupts(efx);
1810 fail1:
1811 efx->type->remove(efx);
1812 return rc;
1813 }
1814
1815 static void efx_remove_nic(struct efx_nic *efx)
1816 {
1817 netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1818
1819 efx_remove_interrupts(efx);
1820 efx->type->remove(efx);
1821 }
1822
1823 static int efx_probe_filters(struct efx_nic *efx)
1824 {
1825 int rc;
1826
1827 init_rwsem(&efx->filter_sem);
1828 mutex_lock(&efx->mac_lock);
1829 down_write(&efx->filter_sem);
1830 rc = efx->type->filter_table_probe(efx);
1831 if (rc)
1832 goto out_unlock;
1833
1834 #ifdef CONFIG_RFS_ACCEL
1835 if (efx->type->offload_features & NETIF_F_NTUPLE) {
1836 struct efx_channel *channel;
1837 int i, success = 1;
1838
1839 efx_for_each_channel(channel, efx) {
1840 channel->rps_flow_id =
1841 kcalloc(efx->type->max_rx_ip_filters,
1842 sizeof(*channel->rps_flow_id),
1843 GFP_KERNEL);
1844 if (!channel->rps_flow_id)
1845 success = 0;
1846 else
1847 for (i = 0;
1848 i < efx->type->max_rx_ip_filters;
1849 ++i)
1850 channel->rps_flow_id[i] =
1851 RPS_FLOW_ID_INVALID;
1852 }
1853
1854 if (!success) {
1855 efx_for_each_channel(channel, efx)
1856 kfree(channel->rps_flow_id);
1857 efx->type->filter_table_remove(efx);
1858 rc = -ENOMEM;
1859 goto out_unlock;
1860 }
1861
1862 efx->rps_expire_index = efx->rps_expire_channel = 0;
1863 }
1864 #endif
1865 out_unlock:
1866 up_write(&efx->filter_sem);
1867 mutex_unlock(&efx->mac_lock);
1868 return rc;
1869 }
1870
1871 static void efx_remove_filters(struct efx_nic *efx)
1872 {
1873 #ifdef CONFIG_RFS_ACCEL
1874 struct efx_channel *channel;
1875
1876 efx_for_each_channel(channel, efx)
1877 kfree(channel->rps_flow_id);
1878 #endif
1879 down_write(&efx->filter_sem);
1880 efx->type->filter_table_remove(efx);
1881 up_write(&efx->filter_sem);
1882 }
1883
1884
1885
1886
1887
1888
1889
1890
1891 static int efx_probe_all(struct efx_nic *efx)
1892 {
1893 int rc;
1894
1895 rc = efx_probe_nic(efx);
1896 if (rc) {
1897 netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1898 goto fail1;
1899 }
1900
1901 rc = efx_probe_port(efx);
1902 if (rc) {
1903 netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1904 goto fail2;
1905 }
1906
1907 BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1908 if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1909 rc = -EINVAL;
1910 goto fail3;
1911 }
1912 efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1913
1914 #ifdef CONFIG_SFC_SRIOV
1915 rc = efx->type->vswitching_probe(efx);
1916 if (rc)
1917 netif_warn(efx, probe, efx->net_dev,
1918 "failed to setup vswitching rc=%d;"
1919 " VFs may not function\n", rc);
1920 #endif
1921
1922 rc = efx_probe_filters(efx);
1923 if (rc) {
1924 netif_err(efx, probe, efx->net_dev,
1925 "failed to create filter tables\n");
1926 goto fail4;
1927 }
1928
1929 rc = efx_probe_channels(efx);
1930 if (rc)
1931 goto fail5;
1932
1933 return 0;
1934
1935 fail5:
1936 efx_remove_filters(efx);
1937 fail4:
1938 #ifdef CONFIG_SFC_SRIOV
1939 efx->type->vswitching_remove(efx);
1940 #endif
1941 fail3:
1942 efx_remove_port(efx);
1943 fail2:
1944 efx_remove_nic(efx);
1945 fail1:
1946 return rc;
1947 }
1948
1949
1950
1951
1952
1953
1954
1955
1956 static void efx_start_all(struct efx_nic *efx)
1957 {
1958 EFX_ASSERT_RESET_SERIALISED(efx);
1959 BUG_ON(efx->state == STATE_DISABLED);
1960
1961
1962
1963 if (efx->port_enabled || !netif_running(efx->net_dev) ||
1964 efx->reset_pending)
1965 return;
1966
1967 efx_start_port(efx);
1968 efx_start_datapath(efx);
1969
1970
1971 if (efx->type->monitor != NULL)
1972 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1973 efx_monitor_interval);
1974
1975
1976
1977
1978 mutex_lock(&efx->mac_lock);
1979 if (efx->phy_op->poll(efx))
1980 efx_link_status_changed(efx);
1981 mutex_unlock(&efx->mac_lock);
1982
1983 efx->type->start_stats(efx);
1984 efx->type->pull_stats(efx);
1985 spin_lock_bh(&efx->stats_lock);
1986 efx->type->update_stats(efx, NULL, NULL);
1987 spin_unlock_bh(&efx->stats_lock);
1988 }
1989
1990
1991
1992
1993
1994
1995 static void efx_stop_all(struct efx_nic *efx)
1996 {
1997 EFX_ASSERT_RESET_SERIALISED(efx);
1998
1999
2000 if (!efx->port_enabled)
2001 return;
2002
2003
2004
2005
2006 efx->type->pull_stats(efx);
2007 spin_lock_bh(&efx->stats_lock);
2008 efx->type->update_stats(efx, NULL, NULL);
2009 spin_unlock_bh(&efx->stats_lock);
2010 efx->type->stop_stats(efx);
2011 efx_stop_port(efx);
2012
2013
2014
2015
2016
2017 WARN_ON(netif_running(efx->net_dev) &&
2018 netif_device_present(efx->net_dev));
2019 netif_tx_disable(efx->net_dev);
2020
2021 efx_stop_datapath(efx);
2022 }
2023
2024 static void efx_remove_all(struct efx_nic *efx)
2025 {
2026 efx_remove_channels(efx);
2027 efx_remove_filters(efx);
2028 #ifdef CONFIG_SFC_SRIOV
2029 efx->type->vswitching_remove(efx);
2030 #endif
2031 efx_remove_port(efx);
2032 efx_remove_nic(efx);
2033 }
2034
2035
2036
2037
2038
2039
2040 unsigned int efx_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs)
2041 {
2042 if (usecs == 0)
2043 return 0;
2044 if (usecs * 1000 < efx->timer_quantum_ns)
2045 return 1;
2046 return usecs * 1000 / efx->timer_quantum_ns;
2047 }
2048
2049 unsigned int efx_ticks_to_usecs(struct efx_nic *efx, unsigned int ticks)
2050 {
2051
2052
2053
2054 return DIV_ROUND_UP(ticks * efx->timer_quantum_ns, 1000);
2055 }
2056
2057
2058 int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
2059 unsigned int rx_usecs, bool rx_adaptive,
2060 bool rx_may_override_tx)
2061 {
2062 struct efx_channel *channel;
2063 unsigned int timer_max_us;
2064
2065 EFX_ASSERT_RESET_SERIALISED(efx);
2066
2067 timer_max_us = efx->timer_max_ns / 1000;
2068
2069 if (tx_usecs > timer_max_us || rx_usecs > timer_max_us)
2070 return -EINVAL;
2071
2072 if (tx_usecs != rx_usecs && efx->tx_channel_offset == 0 &&
2073 !rx_may_override_tx) {
2074 netif_err(efx, drv, efx->net_dev, "Channels are shared. "
2075 "RX and TX IRQ moderation must be equal\n");
2076 return -EINVAL;
2077 }
2078
2079 efx->irq_rx_adaptive = rx_adaptive;
2080 efx->irq_rx_moderation_us = rx_usecs;
2081 efx_for_each_channel(channel, efx) {
2082 if (efx_channel_has_rx_queue(channel))
2083 channel->irq_moderation_us = rx_usecs;
2084 else if (efx_channel_has_tx_queues(channel))
2085 channel->irq_moderation_us = tx_usecs;
2086 }
2087
2088 return 0;
2089 }
2090
2091 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
2092 unsigned int *rx_usecs, bool *rx_adaptive)
2093 {
2094 *rx_adaptive = efx->irq_rx_adaptive;
2095 *rx_usecs = efx->irq_rx_moderation_us;
2096
2097
2098
2099
2100
2101 if (efx->tx_channel_offset == 0) {
2102 *tx_usecs = *rx_usecs;
2103 } else {
2104 struct efx_channel *tx_channel;
2105
2106 tx_channel = efx->channel[efx->tx_channel_offset];
2107 *tx_usecs = tx_channel->irq_moderation_us;
2108 }
2109 }
2110
2111
2112
2113
2114
2115
2116
2117
2118 static void efx_monitor(struct work_struct *data)
2119 {
2120 struct efx_nic *efx = container_of(data, struct efx_nic,
2121 monitor_work.work);
2122
2123 netif_vdbg(efx, timer, efx->net_dev,
2124 "hardware monitor executing on CPU %d\n",
2125 raw_smp_processor_id());
2126 BUG_ON(efx->type->monitor == NULL);
2127
2128
2129
2130
2131 if (mutex_trylock(&efx->mac_lock)) {
2132 if (efx->port_enabled)
2133 efx->type->monitor(efx);
2134 mutex_unlock(&efx->mac_lock);
2135 }
2136
2137 queue_delayed_work(efx->workqueue, &efx->monitor_work,
2138 efx_monitor_interval);
2139 }
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150 static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2151 {
2152 struct efx_nic *efx = netdev_priv(net_dev);
2153 struct mii_ioctl_data *data = if_mii(ifr);
2154
2155 if (cmd == SIOCSHWTSTAMP)
2156 return efx_ptp_set_ts_config(efx, ifr);
2157 if (cmd == SIOCGHWTSTAMP)
2158 return efx_ptp_get_ts_config(efx, ifr);
2159
2160
2161 if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2162 (data->phy_id & 0xfc00) == 0x0400)
2163 data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2164
2165 return mdio_mii_ioctl(&efx->mdio, data, cmd);
2166 }
2167
2168
2169
2170
2171
2172
2173
2174 static void efx_init_napi_channel(struct efx_channel *channel)
2175 {
2176 struct efx_nic *efx = channel->efx;
2177
2178 channel->napi_dev = efx->net_dev;
2179 netif_napi_add(channel->napi_dev, &channel->napi_str,
2180 efx_poll, napi_weight);
2181 }
2182
2183 static void efx_init_napi(struct efx_nic *efx)
2184 {
2185 struct efx_channel *channel;
2186
2187 efx_for_each_channel(channel, efx)
2188 efx_init_napi_channel(channel);
2189 }
2190
2191 static void efx_fini_napi_channel(struct efx_channel *channel)
2192 {
2193 if (channel->napi_dev)
2194 netif_napi_del(&channel->napi_str);
2195
2196 channel->napi_dev = NULL;
2197 }
2198
2199 static void efx_fini_napi(struct efx_nic *efx)
2200 {
2201 struct efx_channel *channel;
2202
2203 efx_for_each_channel(channel, efx)
2204 efx_fini_napi_channel(channel);
2205 }
2206
2207
2208
2209
2210
2211
2212
2213
2214 int efx_net_open(struct net_device *net_dev)
2215 {
2216 struct efx_nic *efx = netdev_priv(net_dev);
2217 int rc;
2218
2219 netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2220 raw_smp_processor_id());
2221
2222 rc = efx_check_disabled(efx);
2223 if (rc)
2224 return rc;
2225 if (efx->phy_mode & PHY_MODE_SPECIAL)
2226 return -EBUSY;
2227 if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
2228 return -EIO;
2229
2230
2231
2232 efx_link_status_changed(efx);
2233
2234 efx_start_all(efx);
2235 if (efx->state == STATE_DISABLED || efx->reset_pending)
2236 netif_device_detach(efx->net_dev);
2237 efx_selftest_async_start(efx);
2238 return 0;
2239 }
2240
2241
2242
2243
2244
2245 int efx_net_stop(struct net_device *net_dev)
2246 {
2247 struct efx_nic *efx = netdev_priv(net_dev);
2248
2249 netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2250 raw_smp_processor_id());
2251
2252
2253 efx_stop_all(efx);
2254
2255 return 0;
2256 }
2257
2258
2259 static void efx_net_stats(struct net_device *net_dev,
2260 struct rtnl_link_stats64 *stats)
2261 {
2262 struct efx_nic *efx = netdev_priv(net_dev);
2263
2264 spin_lock_bh(&efx->stats_lock);
2265 efx->type->update_stats(efx, NULL, stats);
2266 spin_unlock_bh(&efx->stats_lock);
2267 }
2268
2269
2270 static void efx_watchdog(struct net_device *net_dev)
2271 {
2272 struct efx_nic *efx = netdev_priv(net_dev);
2273
2274 netif_err(efx, tx_err, efx->net_dev,
2275 "TX stuck with port_enabled=%d: resetting channels\n",
2276 efx->port_enabled);
2277
2278 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2279 }
2280
2281
2282
2283 static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2284 {
2285 struct efx_nic *efx = netdev_priv(net_dev);
2286 int rc;
2287
2288 rc = efx_check_disabled(efx);
2289 if (rc)
2290 return rc;
2291
2292 netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2293
2294 efx_device_detach_sync(efx);
2295 efx_stop_all(efx);
2296
2297 mutex_lock(&efx->mac_lock);
2298 net_dev->mtu = new_mtu;
2299 efx_mac_reconfigure(efx);
2300 mutex_unlock(&efx->mac_lock);
2301
2302 efx_start_all(efx);
2303 efx_device_attach_if_not_resetting(efx);
2304 return 0;
2305 }
2306
2307 static int efx_set_mac_address(struct net_device *net_dev, void *data)
2308 {
2309 struct efx_nic *efx = netdev_priv(net_dev);
2310 struct sockaddr *addr = data;
2311 u8 *new_addr = addr->sa_data;
2312 u8 old_addr[6];
2313 int rc;
2314
2315 if (!is_valid_ether_addr(new_addr)) {
2316 netif_err(efx, drv, efx->net_dev,
2317 "invalid ethernet MAC address requested: %pM\n",
2318 new_addr);
2319 return -EADDRNOTAVAIL;
2320 }
2321
2322
2323 ether_addr_copy(old_addr, net_dev->dev_addr);
2324 ether_addr_copy(net_dev->dev_addr, new_addr);
2325 if (efx->type->set_mac_address) {
2326 rc = efx->type->set_mac_address(efx);
2327 if (rc) {
2328 ether_addr_copy(net_dev->dev_addr, old_addr);
2329 return rc;
2330 }
2331 }
2332
2333
2334 mutex_lock(&efx->mac_lock);
2335 efx_mac_reconfigure(efx);
2336 mutex_unlock(&efx->mac_lock);
2337
2338 return 0;
2339 }
2340
2341
2342 static void efx_set_rx_mode(struct net_device *net_dev)
2343 {
2344 struct efx_nic *efx = netdev_priv(net_dev);
2345
2346 if (efx->port_enabled)
2347 queue_work(efx->workqueue, &efx->mac_work);
2348
2349 }
2350
2351 static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2352 {
2353 struct efx_nic *efx = netdev_priv(net_dev);
2354 int rc;
2355
2356
2357 if (net_dev->features & ~data & NETIF_F_NTUPLE) {
2358 rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2359 if (rc)
2360 return rc;
2361 }
2362
2363
2364
2365
2366 if ((net_dev->features ^ data) & (NETIF_F_HW_VLAN_CTAG_FILTER |
2367 NETIF_F_RXFCS)) {
2368
2369
2370
2371 efx_set_rx_mode(net_dev);
2372 }
2373
2374 return 0;
2375 }
2376
2377 static int efx_get_phys_port_id(struct net_device *net_dev,
2378 struct netdev_phys_item_id *ppid)
2379 {
2380 struct efx_nic *efx = netdev_priv(net_dev);
2381
2382 if (efx->type->get_phys_port_id)
2383 return efx->type->get_phys_port_id(efx, ppid);
2384 else
2385 return -EOPNOTSUPP;
2386 }
2387
2388 static int efx_get_phys_port_name(struct net_device *net_dev,
2389 char *name, size_t len)
2390 {
2391 struct efx_nic *efx = netdev_priv(net_dev);
2392
2393 if (snprintf(name, len, "p%u", efx->port_num) >= len)
2394 return -EINVAL;
2395 return 0;
2396 }
2397
2398 static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2399 {
2400 struct efx_nic *efx = netdev_priv(net_dev);
2401
2402 if (efx->type->vlan_rx_add_vid)
2403 return efx->type->vlan_rx_add_vid(efx, proto, vid);
2404 else
2405 return -EOPNOTSUPP;
2406 }
2407
2408 static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
2409 {
2410 struct efx_nic *efx = netdev_priv(net_dev);
2411
2412 if (efx->type->vlan_rx_kill_vid)
2413 return efx->type->vlan_rx_kill_vid(efx, proto, vid);
2414 else
2415 return -EOPNOTSUPP;
2416 }
2417
2418 static int efx_udp_tunnel_type_map(enum udp_parsable_tunnel_type in)
2419 {
2420 switch (in) {
2421 case UDP_TUNNEL_TYPE_VXLAN:
2422 return TUNNEL_ENCAP_UDP_PORT_ENTRY_VXLAN;
2423 case UDP_TUNNEL_TYPE_GENEVE:
2424 return TUNNEL_ENCAP_UDP_PORT_ENTRY_GENEVE;
2425 default:
2426 return -1;
2427 }
2428 }
2429
2430 static void efx_udp_tunnel_add(struct net_device *dev, struct udp_tunnel_info *ti)
2431 {
2432 struct efx_nic *efx = netdev_priv(dev);
2433 struct efx_udp_tunnel tnl;
2434 int efx_tunnel_type;
2435
2436 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2437 if (efx_tunnel_type < 0)
2438 return;
2439
2440 tnl.type = (u16)efx_tunnel_type;
2441 tnl.port = ti->port;
2442
2443 if (efx->type->udp_tnl_add_port)
2444 (void)efx->type->udp_tnl_add_port(efx, tnl);
2445 }
2446
2447 static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *ti)
2448 {
2449 struct efx_nic *efx = netdev_priv(dev);
2450 struct efx_udp_tunnel tnl;
2451 int efx_tunnel_type;
2452
2453 efx_tunnel_type = efx_udp_tunnel_type_map(ti->type);
2454 if (efx_tunnel_type < 0)
2455 return;
2456
2457 tnl.type = (u16)efx_tunnel_type;
2458 tnl.port = ti->port;
2459
2460 if (efx->type->udp_tnl_del_port)
2461 (void)efx->type->udp_tnl_del_port(efx, tnl);
2462 }
2463
2464 static const struct net_device_ops efx_netdev_ops = {
2465 .ndo_open = efx_net_open,
2466 .ndo_stop = efx_net_stop,
2467 .ndo_get_stats64 = efx_net_stats,
2468 .ndo_tx_timeout = efx_watchdog,
2469 .ndo_start_xmit = efx_hard_start_xmit,
2470 .ndo_validate_addr = eth_validate_addr,
2471 .ndo_do_ioctl = efx_ioctl,
2472 .ndo_change_mtu = efx_change_mtu,
2473 .ndo_set_mac_address = efx_set_mac_address,
2474 .ndo_set_rx_mode = efx_set_rx_mode,
2475 .ndo_set_features = efx_set_features,
2476 .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
2477 .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
2478 #ifdef CONFIG_SFC_SRIOV
2479 .ndo_set_vf_mac = efx_sriov_set_vf_mac,
2480 .ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
2481 .ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
2482 .ndo_get_vf_config = efx_sriov_get_vf_config,
2483 .ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
2484 #endif
2485 .ndo_get_phys_port_id = efx_get_phys_port_id,
2486 .ndo_get_phys_port_name = efx_get_phys_port_name,
2487 .ndo_setup_tc = efx_setup_tc,
2488 #ifdef CONFIG_RFS_ACCEL
2489 .ndo_rx_flow_steer = efx_filter_rfs,
2490 #endif
2491 .ndo_udp_tunnel_add = efx_udp_tunnel_add,
2492 .ndo_udp_tunnel_del = efx_udp_tunnel_del,
2493 };
2494
2495 static void efx_update_name(struct efx_nic *efx)
2496 {
2497 strcpy(efx->name, efx->net_dev->name);
2498 efx_mtd_rename(efx);
2499 efx_set_channel_names(efx);
2500 }
2501
2502 static int efx_netdev_event(struct notifier_block *this,
2503 unsigned long event, void *ptr)
2504 {
2505 struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2506
2507 if ((net_dev->netdev_ops == &efx_netdev_ops) &&
2508 event == NETDEV_CHANGENAME)
2509 efx_update_name(netdev_priv(net_dev));
2510
2511 return NOTIFY_DONE;
2512 }
2513
2514 static struct notifier_block efx_netdev_notifier = {
2515 .notifier_call = efx_netdev_event,
2516 };
2517
2518 static ssize_t
2519 show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2520 {
2521 struct efx_nic *efx = dev_get_drvdata(dev);
2522 return sprintf(buf, "%d\n", efx->phy_type);
2523 }
2524 static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2525
2526 #ifdef CONFIG_SFC_MCDI_LOGGING
2527 static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2528 char *buf)
2529 {
2530 struct efx_nic *efx = dev_get_drvdata(dev);
2531 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2532
2533 return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2534 }
2535 static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2536 const char *buf, size_t count)
2537 {
2538 struct efx_nic *efx = dev_get_drvdata(dev);
2539 struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2540 bool enable = count > 0 && *buf != '0';
2541
2542 mcdi->logging_enabled = enable;
2543 return count;
2544 }
2545 static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2546 #endif
2547
2548 static int efx_register_netdev(struct efx_nic *efx)
2549 {
2550 struct net_device *net_dev = efx->net_dev;
2551 struct efx_channel *channel;
2552 int rc;
2553
2554 net_dev->watchdog_timeo = 5 * HZ;
2555 net_dev->irq = efx->pci_dev->irq;
2556 net_dev->netdev_ops = &efx_netdev_ops;
2557 if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
2558 net_dev->priv_flags |= IFF_UNICAST_FLT;
2559 net_dev->ethtool_ops = &efx_ethtool_ops;
2560 net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2561 net_dev->min_mtu = EFX_MIN_MTU;
2562 net_dev->max_mtu = EFX_MAX_MTU;
2563
2564 rtnl_lock();
2565
2566
2567
2568
2569
2570 efx->state = STATE_READY;
2571 smp_mb();
2572 if (efx->reset_pending) {
2573 netif_err(efx, probe, efx->net_dev,
2574 "aborting probe due to scheduled reset\n");
2575 rc = -EIO;
2576 goto fail_locked;
2577 }
2578
2579 rc = dev_alloc_name(net_dev, net_dev->name);
2580 if (rc < 0)
2581 goto fail_locked;
2582 efx_update_name(efx);
2583
2584
2585 netif_carrier_off(net_dev);
2586
2587 rc = register_netdevice(net_dev);
2588 if (rc)
2589 goto fail_locked;
2590
2591 efx_for_each_channel(channel, efx) {
2592 struct efx_tx_queue *tx_queue;
2593 efx_for_each_channel_tx_queue(tx_queue, channel)
2594 efx_init_tx_queue_core_txq(tx_queue);
2595 }
2596
2597 efx_associate(efx);
2598
2599 rtnl_unlock();
2600
2601 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2602 if (rc) {
2603 netif_err(efx, drv, efx->net_dev,
2604 "failed to init net dev attributes\n");
2605 goto fail_registered;
2606 }
2607 #ifdef CONFIG_SFC_MCDI_LOGGING
2608 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2609 if (rc) {
2610 netif_err(efx, drv, efx->net_dev,
2611 "failed to init net dev attributes\n");
2612 goto fail_attr_mcdi_logging;
2613 }
2614 #endif
2615
2616 return 0;
2617
2618 #ifdef CONFIG_SFC_MCDI_LOGGING
2619 fail_attr_mcdi_logging:
2620 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2621 #endif
2622 fail_registered:
2623 rtnl_lock();
2624 efx_dissociate(efx);
2625 unregister_netdevice(net_dev);
2626 fail_locked:
2627 efx->state = STATE_UNINIT;
2628 rtnl_unlock();
2629 netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2630 return rc;
2631 }
2632
2633 static void efx_unregister_netdev(struct efx_nic *efx)
2634 {
2635 if (!efx->net_dev)
2636 return;
2637
2638 BUG_ON(netdev_priv(efx->net_dev) != efx);
2639
2640 if (efx_dev_registered(efx)) {
2641 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2642 #ifdef CONFIG_SFC_MCDI_LOGGING
2643 device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2644 #endif
2645 device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2646 unregister_netdev(efx->net_dev);
2647 }
2648 }
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658 void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2659 {
2660 EFX_ASSERT_RESET_SERIALISED(efx);
2661
2662 if (method == RESET_TYPE_MCDI_TIMEOUT)
2663 efx->type->prepare_flr(efx);
2664
2665 efx_stop_all(efx);
2666 efx_disable_interrupts(efx);
2667
2668 mutex_lock(&efx->mac_lock);
2669 down_write(&efx->filter_sem);
2670 mutex_lock(&efx->rss_lock);
2671 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2672 method != RESET_TYPE_DATAPATH)
2673 efx->phy_op->fini(efx);
2674 efx->type->fini(efx);
2675 }
2676
2677
2678
2679
2680
2681
2682 int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2683 {
2684 int rc;
2685
2686 EFX_ASSERT_RESET_SERIALISED(efx);
2687
2688 if (method == RESET_TYPE_MCDI_TIMEOUT)
2689 efx->type->finish_flr(efx);
2690
2691
2692 rc = efx->type->init(efx);
2693 if (rc) {
2694 netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2695 goto fail;
2696 }
2697
2698 if (!ok)
2699 goto fail;
2700
2701 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2702 method != RESET_TYPE_DATAPATH) {
2703 rc = efx->phy_op->init(efx);
2704 if (rc)
2705 goto fail;
2706 rc = efx->phy_op->reconfigure(efx);
2707 if (rc && rc != -EPERM)
2708 netif_err(efx, drv, efx->net_dev,
2709 "could not restore PHY settings\n");
2710 }
2711
2712 rc = efx_enable_interrupts(efx);
2713 if (rc)
2714 goto fail;
2715
2716 #ifdef CONFIG_SFC_SRIOV
2717 rc = efx->type->vswitching_restore(efx);
2718 if (rc)
2719 netif_warn(efx, probe, efx->net_dev,
2720 "failed to restore vswitching rc=%d;"
2721 " VFs may not function\n", rc);
2722 #endif
2723
2724 if (efx->type->rx_restore_rss_contexts)
2725 efx->type->rx_restore_rss_contexts(efx);
2726 mutex_unlock(&efx->rss_lock);
2727 efx->type->filter_table_restore(efx);
2728 up_write(&efx->filter_sem);
2729 if (efx->type->sriov_reset)
2730 efx->type->sriov_reset(efx);
2731
2732 mutex_unlock(&efx->mac_lock);
2733
2734 efx_start_all(efx);
2735
2736 if (efx->type->udp_tnl_push_ports)
2737 efx->type->udp_tnl_push_ports(efx);
2738
2739 return 0;
2740
2741 fail:
2742 efx->port_initialized = false;
2743
2744 mutex_unlock(&efx->rss_lock);
2745 up_write(&efx->filter_sem);
2746 mutex_unlock(&efx->mac_lock);
2747
2748 return rc;
2749 }
2750
2751
2752
2753
2754
2755
2756 int efx_reset(struct efx_nic *efx, enum reset_type method)
2757 {
2758 int rc, rc2;
2759 bool disabled;
2760
2761 netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2762 RESET_TYPE(method));
2763
2764 efx_device_detach_sync(efx);
2765 efx_reset_down(efx, method);
2766
2767 rc = efx->type->reset(efx, method);
2768 if (rc) {
2769 netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2770 goto out;
2771 }
2772
2773
2774
2775
2776 if (method < RESET_TYPE_MAX_METHOD)
2777 efx->reset_pending &= -(1 << (method + 1));
2778 else
2779 __clear_bit(method, &efx->reset_pending);
2780
2781
2782
2783
2784
2785 pci_set_master(efx->pci_dev);
2786
2787 out:
2788
2789 disabled = rc ||
2790 method == RESET_TYPE_DISABLE ||
2791 method == RESET_TYPE_RECOVER_OR_DISABLE;
2792 rc2 = efx_reset_up(efx, method, !disabled);
2793 if (rc2) {
2794 disabled = true;
2795 if (!rc)
2796 rc = rc2;
2797 }
2798
2799 if (disabled) {
2800 dev_close(efx->net_dev);
2801 netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2802 efx->state = STATE_DISABLED;
2803 } else {
2804 netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2805 efx_device_attach_if_not_resetting(efx);
2806 }
2807 return rc;
2808 }
2809
2810
2811
2812
2813
2814
2815 int efx_try_recovery(struct efx_nic *efx)
2816 {
2817 #ifdef CONFIG_EEH
2818
2819
2820
2821
2822
2823 struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2824 if (eeh_dev_check_failure(eehdev)) {
2825
2826
2827
2828 return 1;
2829 }
2830 #endif
2831 return 0;
2832 }
2833
2834 static void efx_wait_for_bist_end(struct efx_nic *efx)
2835 {
2836 int i;
2837
2838 for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2839 if (efx_mcdi_poll_reboot(efx))
2840 goto out;
2841 msleep(BIST_WAIT_DELAY_MS);
2842 }
2843
2844 netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2845 out:
2846
2847
2848
2849 efx->mc_bist_for_other_fn = false;
2850 }
2851
2852
2853
2854
2855 static void efx_reset_work(struct work_struct *data)
2856 {
2857 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2858 unsigned long pending;
2859 enum reset_type method;
2860
2861 pending = READ_ONCE(efx->reset_pending);
2862 method = fls(pending) - 1;
2863
2864 if (method == RESET_TYPE_MC_BIST)
2865 efx_wait_for_bist_end(efx);
2866
2867 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2868 method == RESET_TYPE_RECOVER_OR_ALL) &&
2869 efx_try_recovery(efx))
2870 return;
2871
2872 if (!pending)
2873 return;
2874
2875 rtnl_lock();
2876
2877
2878
2879
2880
2881 if (efx->state == STATE_READY)
2882 (void)efx_reset(efx, method);
2883
2884 rtnl_unlock();
2885 }
2886
2887 void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2888 {
2889 enum reset_type method;
2890
2891 if (efx->state == STATE_RECOVERY) {
2892 netif_dbg(efx, drv, efx->net_dev,
2893 "recovering: skip scheduling %s reset\n",
2894 RESET_TYPE(type));
2895 return;
2896 }
2897
2898 switch (type) {
2899 case RESET_TYPE_INVISIBLE:
2900 case RESET_TYPE_ALL:
2901 case RESET_TYPE_RECOVER_OR_ALL:
2902 case RESET_TYPE_WORLD:
2903 case RESET_TYPE_DISABLE:
2904 case RESET_TYPE_RECOVER_OR_DISABLE:
2905 case RESET_TYPE_DATAPATH:
2906 case RESET_TYPE_MC_BIST:
2907 case RESET_TYPE_MCDI_TIMEOUT:
2908 method = type;
2909 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2910 RESET_TYPE(method));
2911 break;
2912 default:
2913 method = efx->type->map_reset_reason(type);
2914 netif_dbg(efx, drv, efx->net_dev,
2915 "scheduling %s reset for %s\n",
2916 RESET_TYPE(method), RESET_TYPE(type));
2917 break;
2918 }
2919
2920 set_bit(method, &efx->reset_pending);
2921 smp_mb();
2922
2923
2924
2925
2926 if (READ_ONCE(efx->state) != STATE_READY)
2927 return;
2928
2929
2930
2931 efx_mcdi_mode_poll(efx);
2932
2933 queue_work(reset_workqueue, &efx->reset_work);
2934 }
2935
2936
2937
2938
2939
2940
2941
2942
2943 static const struct pci_device_id efx_pci_table[] = {
2944 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),
2945 .driver_data = (unsigned long) &siena_a0_nic_type},
2946 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),
2947 .driver_data = (unsigned long) &siena_a0_nic_type},
2948 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),
2949 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2950 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),
2951 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2952 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),
2953 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2954 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1923),
2955 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2956 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0a03),
2957 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2958 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1a03),
2959 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2960 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0b03),
2961 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2962 {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1b03),
2963 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2964 {0}
2965 };
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976 int efx_port_dummy_op_int(struct efx_nic *efx)
2977 {
2978 return 0;
2979 }
2980 void efx_port_dummy_op_void(struct efx_nic *efx) {}
2981
2982 static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2983 {
2984 return false;
2985 }
2986
2987 static const struct efx_phy_operations efx_dummy_phy_operations = {
2988 .init = efx_port_dummy_op_int,
2989 .reconfigure = efx_port_dummy_op_int,
2990 .poll = efx_port_dummy_op_poll,
2991 .fini = efx_port_dummy_op_void,
2992 };
2993
2994
2995
2996
2997
2998
2999
3000
3001
3002
3003 static int efx_init_struct(struct efx_nic *efx,
3004 struct pci_dev *pci_dev, struct net_device *net_dev)
3005 {
3006 int rc = -ENOMEM, i;
3007
3008
3009 INIT_LIST_HEAD(&efx->node);
3010 INIT_LIST_HEAD(&efx->secondary_list);
3011 spin_lock_init(&efx->biu_lock);
3012 #ifdef CONFIG_SFC_MTD
3013 INIT_LIST_HEAD(&efx->mtd_list);
3014 #endif
3015 INIT_WORK(&efx->reset_work, efx_reset_work);
3016 INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
3017 INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
3018 efx->pci_dev = pci_dev;
3019 efx->msg_enable = debug;
3020 efx->state = STATE_UNINIT;
3021 strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
3022
3023 efx->net_dev = net_dev;
3024 efx->rx_prefix_size = efx->type->rx_prefix_size;
3025 efx->rx_ip_align =
3026 NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
3027 efx->rx_packet_hash_offset =
3028 efx->type->rx_hash_offset - efx->type->rx_prefix_size;
3029 efx->rx_packet_ts_offset =
3030 efx->type->rx_ts_offset - efx->type->rx_prefix_size;
3031 INIT_LIST_HEAD(&efx->rss_context.list);
3032 mutex_init(&efx->rss_lock);
3033 spin_lock_init(&efx->stats_lock);
3034 efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
3035 efx->num_mac_stats = MC_CMD_MAC_NSTATS;
3036 BUILD_BUG_ON(MC_CMD_MAC_NSTATS - 1 != MC_CMD_MAC_GENERATION_END);
3037 mutex_init(&efx->mac_lock);
3038 #ifdef CONFIG_RFS_ACCEL
3039 mutex_init(&efx->rps_mutex);
3040 spin_lock_init(&efx->rps_hash_lock);
3041
3042 efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
3043 sizeof(*efx->rps_hash_table), GFP_KERNEL);
3044 #endif
3045 efx->phy_op = &efx_dummy_phy_operations;
3046 efx->mdio.dev = net_dev;
3047 INIT_WORK(&efx->mac_work, efx_mac_work);
3048 init_waitqueue_head(&efx->flush_wq);
3049
3050 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
3051 efx->channel[i] = efx_alloc_channel(efx, i, NULL);
3052 if (!efx->channel[i])
3053 goto fail;
3054 efx->msi_context[i].efx = efx;
3055 efx->msi_context[i].index = i;
3056 }
3057
3058
3059 if (WARN_ON_ONCE(efx->type->max_interrupt_mode >
3060 efx->type->min_interrupt_mode)) {
3061 rc = -EIO;
3062 goto fail;
3063 }
3064 efx->interrupt_mode = max(efx->type->max_interrupt_mode,
3065 interrupt_mode);
3066 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
3067 interrupt_mode);
3068
3069
3070 snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
3071 pci_name(pci_dev));
3072 efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
3073 if (!efx->workqueue)
3074 goto fail;
3075
3076 return 0;
3077
3078 fail:
3079 efx_fini_struct(efx);
3080 return rc;
3081 }
3082
3083 static void efx_fini_struct(struct efx_nic *efx)
3084 {
3085 int i;
3086
3087 #ifdef CONFIG_RFS_ACCEL
3088 kfree(efx->rps_hash_table);
3089 #endif
3090
3091 for (i = 0; i < EFX_MAX_CHANNELS; i++)
3092 kfree(efx->channel[i]);
3093
3094 kfree(efx->vpd_sn);
3095
3096 if (efx->workqueue) {
3097 destroy_workqueue(efx->workqueue);
3098 efx->workqueue = NULL;
3099 }
3100 }
3101
3102 void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
3103 {
3104 u64 n_rx_nodesc_trunc = 0;
3105 struct efx_channel *channel;
3106
3107 efx_for_each_channel(channel, efx)
3108 n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
3109 stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
3110 stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
3111 }
3112
3113 bool efx_filter_spec_equal(const struct efx_filter_spec *left,
3114 const struct efx_filter_spec *right)
3115 {
3116 if ((left->match_flags ^ right->match_flags) |
3117 ((left->flags ^ right->flags) &
3118 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
3119 return false;
3120
3121 return memcmp(&left->outer_vid, &right->outer_vid,
3122 sizeof(struct efx_filter_spec) -
3123 offsetof(struct efx_filter_spec, outer_vid)) == 0;
3124 }
3125
3126 u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
3127 {
3128 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
3129 return jhash2((const u32 *)&spec->outer_vid,
3130 (sizeof(struct efx_filter_spec) -
3131 offsetof(struct efx_filter_spec, outer_vid)) / 4,
3132 0);
3133 }
3134
3135 #ifdef CONFIG_RFS_ACCEL
3136 bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
3137 bool *force)
3138 {
3139 if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
3140
3141 return false;
3142 }
3143 if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
3144
3145
3146
3147 rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
3148 *force = true;
3149 return true;
3150 } else if (WARN_ON(rule->filter_id != filter_idx)) {
3151
3152
3153
3154
3155 *force = true;
3156 return true;
3157 }
3158
3159 return true;
3160 }
3161
3162 static
3163 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
3164 const struct efx_filter_spec *spec)
3165 {
3166 u32 hash = efx_filter_spec_hash(spec);
3167
3168 lockdep_assert_held(&efx->rps_hash_lock);
3169 if (!efx->rps_hash_table)
3170 return NULL;
3171 return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
3172 }
3173
3174 struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx,
3175 const struct efx_filter_spec *spec)
3176 {
3177 struct efx_arfs_rule *rule;
3178 struct hlist_head *head;
3179 struct hlist_node *node;
3180
3181 head = efx_rps_hash_bucket(efx, spec);
3182 if (!head)
3183 return NULL;
3184 hlist_for_each(node, head) {
3185 rule = container_of(node, struct efx_arfs_rule, node);
3186 if (efx_filter_spec_equal(spec, &rule->spec))
3187 return rule;
3188 }
3189 return NULL;
3190 }
3191
3192 struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
3193 const struct efx_filter_spec *spec,
3194 bool *new)
3195 {
3196 struct efx_arfs_rule *rule;
3197 struct hlist_head *head;
3198 struct hlist_node *node;
3199
3200 head = efx_rps_hash_bucket(efx, spec);
3201 if (!head)
3202 return NULL;
3203 hlist_for_each(node, head) {
3204 rule = container_of(node, struct efx_arfs_rule, node);
3205 if (efx_filter_spec_equal(spec, &rule->spec)) {
3206 *new = false;
3207 return rule;
3208 }
3209 }
3210 rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
3211 *new = true;
3212 if (rule) {
3213 memcpy(&rule->spec, spec, sizeof(rule->spec));
3214 hlist_add_head(&rule->node, head);
3215 }
3216 return rule;
3217 }
3218
3219 void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec)
3220 {
3221 struct efx_arfs_rule *rule;
3222 struct hlist_head *head;
3223 struct hlist_node *node;
3224
3225 head = efx_rps_hash_bucket(efx, spec);
3226 if (WARN_ON(!head))
3227 return;
3228 hlist_for_each(node, head) {
3229 rule = container_of(node, struct efx_arfs_rule, node);
3230 if (efx_filter_spec_equal(spec, &rule->spec)) {
3231
3232
3233
3234
3235
3236
3237 if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
3238 return;
3239 hlist_del(node);
3240 kfree(rule);
3241 return;
3242 }
3243 }
3244
3245 WARN_ON(1);
3246 }
3247 #endif
3248
3249
3250
3251
3252 struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
3253 {
3254 struct list_head *head = &efx->rss_context.list;
3255 struct efx_rss_context *ctx, *new;
3256 u32 id = 1;
3257
3258 WARN_ON(!mutex_is_locked(&efx->rss_lock));
3259
3260
3261 list_for_each_entry(ctx, head, list) {
3262 if (ctx->user_id != id)
3263 break;
3264 id++;
3265
3266
3267
3268 if (WARN_ON_ONCE(!id))
3269 return NULL;
3270 }
3271
3272
3273 new = kmalloc(sizeof(struct efx_rss_context), GFP_KERNEL);
3274 if (!new)
3275 return NULL;
3276 new->context_id = EFX_EF10_RSS_CONTEXT_INVALID;
3277 new->rx_hash_udp_4tuple = false;
3278
3279
3280 new->user_id = id;
3281 list_add_tail(&new->list, &ctx->list);
3282 return new;
3283 }
3284
3285 struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
3286 {
3287 struct list_head *head = &efx->rss_context.list;
3288 struct efx_rss_context *ctx;
3289
3290 WARN_ON(!mutex_is_locked(&efx->rss_lock));
3291
3292 list_for_each_entry(ctx, head, list)
3293 if (ctx->user_id == id)
3294 return ctx;
3295 return NULL;
3296 }
3297
3298 void efx_free_rss_context_entry(struct efx_rss_context *ctx)
3299 {
3300 list_del(&ctx->list);
3301 kfree(ctx);
3302 }
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313 static void efx_pci_remove_main(struct efx_nic *efx)
3314 {
3315
3316
3317
3318 BUG_ON(efx->state == STATE_READY);
3319 cancel_work_sync(&efx->reset_work);
3320
3321 efx_disable_interrupts(efx);
3322 efx_clear_interrupt_affinity(efx);
3323 efx_nic_fini_interrupt(efx);
3324 efx_fini_port(efx);
3325 efx->type->fini(efx);
3326 efx_fini_napi(efx);
3327 efx_remove_all(efx);
3328 }
3329
3330
3331
3332
3333
3334 static void efx_pci_remove(struct pci_dev *pci_dev)
3335 {
3336 struct efx_nic *efx;
3337
3338 efx = pci_get_drvdata(pci_dev);
3339 if (!efx)
3340 return;
3341
3342
3343 rtnl_lock();
3344 efx_dissociate(efx);
3345 dev_close(efx->net_dev);
3346 efx_disable_interrupts(efx);
3347 efx->state = STATE_UNINIT;
3348 rtnl_unlock();
3349
3350 if (efx->type->sriov_fini)
3351 efx->type->sriov_fini(efx);
3352
3353 efx_unregister_netdev(efx);
3354
3355 efx_mtd_remove(efx);
3356
3357 efx_pci_remove_main(efx);
3358
3359 efx_fini_io(efx);
3360 netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
3361
3362 efx_fini_struct(efx);
3363 free_netdev(efx->net_dev);
3364
3365 pci_disable_pcie_error_reporting(pci_dev);
3366 };
3367
3368
3369
3370
3371
3372
3373 #define SFC_VPD_LEN 512
3374 static void efx_probe_vpd_strings(struct efx_nic *efx)
3375 {
3376 struct pci_dev *dev = efx->pci_dev;
3377 char vpd_data[SFC_VPD_LEN];
3378 ssize_t vpd_size;
3379 int ro_start, ro_size, i, j;
3380
3381
3382 vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
3383 if (vpd_size <= 0) {
3384 netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
3385 return;
3386 }
3387
3388
3389 ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
3390 if (ro_start < 0) {
3391 netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
3392 return;
3393 }
3394
3395 ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
3396 j = ro_size;
3397 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3398 if (i + j > vpd_size)
3399 j = vpd_size - i;
3400
3401
3402 i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
3403 if (i < 0) {
3404 netif_err(efx, drv, efx->net_dev, "Part number not found\n");
3405 return;
3406 }
3407
3408 j = pci_vpd_info_field_size(&vpd_data[i]);
3409 i += PCI_VPD_INFO_FLD_HDR_SIZE;
3410 if (i + j > vpd_size) {
3411 netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
3412 return;
3413 }
3414
3415 netif_info(efx, drv, efx->net_dev,
3416 "Part Number : %.*s\n", j, &vpd_data[i]);
3417
3418 i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3419 j = ro_size;
3420 i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
3421 if (i < 0) {
3422 netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
3423 return;
3424 }
3425
3426 j = pci_vpd_info_field_size(&vpd_data[i]);
3427 i += PCI_VPD_INFO_FLD_HDR_SIZE;
3428 if (i + j > vpd_size) {
3429 netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
3430 return;
3431 }
3432
3433 efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
3434 if (!efx->vpd_sn)
3435 return;
3436
3437 snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
3438 }
3439
3440
3441
3442
3443
3444 static int efx_pci_probe_main(struct efx_nic *efx)
3445 {
3446 int rc;
3447
3448
3449 rc = efx_probe_all(efx);
3450 if (rc)
3451 goto fail1;
3452
3453 efx_init_napi(efx);
3454
3455 down_write(&efx->filter_sem);
3456 rc = efx->type->init(efx);
3457 up_write(&efx->filter_sem);
3458 if (rc) {
3459 netif_err(efx, probe, efx->net_dev,
3460 "failed to initialise NIC\n");
3461 goto fail3;
3462 }
3463
3464 rc = efx_init_port(efx);
3465 if (rc) {
3466 netif_err(efx, probe, efx->net_dev,
3467 "failed to initialise port\n");
3468 goto fail4;
3469 }
3470
3471 rc = efx_nic_init_interrupt(efx);
3472 if (rc)
3473 goto fail5;
3474
3475 efx_set_interrupt_affinity(efx);
3476 rc = efx_enable_interrupts(efx);
3477 if (rc)
3478 goto fail6;
3479
3480 return 0;
3481
3482 fail6:
3483 efx_clear_interrupt_affinity(efx);
3484 efx_nic_fini_interrupt(efx);
3485 fail5:
3486 efx_fini_port(efx);
3487 fail4:
3488 efx->type->fini(efx);
3489 fail3:
3490 efx_fini_napi(efx);
3491 efx_remove_all(efx);
3492 fail1:
3493 return rc;
3494 }
3495
3496 static int efx_pci_probe_post_io(struct efx_nic *efx)
3497 {
3498 struct net_device *net_dev = efx->net_dev;
3499 int rc = efx_pci_probe_main(efx);
3500
3501 if (rc)
3502 return rc;
3503
3504 if (efx->type->sriov_init) {
3505 rc = efx->type->sriov_init(efx);
3506 if (rc)
3507 netif_err(efx, probe, efx->net_dev,
3508 "SR-IOV can't be enabled rc %d\n", rc);
3509 }
3510
3511
3512 net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3513 NETIF_F_TSO | NETIF_F_RXCSUM | NETIF_F_RXALL);
3514 if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
3515 net_dev->features |= NETIF_F_TSO6;
3516
3517 if (!efx->type->tso_versions || !efx->type->tso_versions(efx))
3518 net_dev->features &= ~NETIF_F_ALL_TSO;
3519
3520 net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
3521 NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3522 NETIF_F_RXCSUM);
3523
3524 net_dev->hw_features |= net_dev->features & ~efx->fixed_features;
3525
3526
3527 net_dev->features &= ~NETIF_F_RXALL;
3528
3529
3530
3531
3532
3533 net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3534 net_dev->features |= efx->fixed_features;
3535
3536 rc = efx_register_netdev(efx);
3537 if (!rc)
3538 return 0;
3539
3540 efx_pci_remove_main(efx);
3541 return rc;
3542 }
3543
3544
3545
3546
3547
3548
3549
3550
3551
3552
3553 static int efx_pci_probe(struct pci_dev *pci_dev,
3554 const struct pci_device_id *entry)
3555 {
3556 struct net_device *net_dev;
3557 struct efx_nic *efx;
3558 int rc;
3559
3560
3561 net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
3562 EFX_MAX_RX_QUEUES);
3563 if (!net_dev)
3564 return -ENOMEM;
3565 efx = netdev_priv(net_dev);
3566 efx->type = (const struct efx_nic_type *) entry->driver_data;
3567 efx->fixed_features |= NETIF_F_HIGHDMA;
3568
3569 pci_set_drvdata(pci_dev, efx);
3570 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3571 rc = efx_init_struct(efx, pci_dev, net_dev);
3572 if (rc)
3573 goto fail1;
3574
3575 netif_info(efx, probe, efx->net_dev,
3576 "Solarflare NIC detected\n");
3577
3578 if (!efx->type->is_vf)
3579 efx_probe_vpd_strings(efx);
3580
3581
3582 rc = efx_init_io(efx);
3583 if (rc)
3584 goto fail2;
3585
3586 rc = efx_pci_probe_post_io(efx);
3587 if (rc) {
3588
3589
3590
3591 efx->reset_pending = 0;
3592 rc = efx_pci_probe_post_io(efx);
3593 if (rc) {
3594
3595
3596
3597 unsigned char r;
3598
3599 get_random_bytes(&r, 1);
3600 msleep((unsigned int)r + 50);
3601 efx->reset_pending = 0;
3602 rc = efx_pci_probe_post_io(efx);
3603 }
3604 }
3605 if (rc)
3606 goto fail3;
3607
3608 netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3609
3610
3611 rtnl_lock();
3612 rc = efx_mtd_probe(efx);
3613 rtnl_unlock();
3614 if (rc && rc != -EPERM)
3615 netif_warn(efx, probe, efx->net_dev,
3616 "failed to create MTDs (%d)\n", rc);
3617
3618 (void)pci_enable_pcie_error_reporting(pci_dev);
3619
3620 if (efx->type->udp_tnl_push_ports)
3621 efx->type->udp_tnl_push_ports(efx);
3622
3623 return 0;
3624
3625 fail3:
3626 efx_fini_io(efx);
3627 fail2:
3628 efx_fini_struct(efx);
3629 fail1:
3630 WARN_ON(rc > 0);
3631 netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3632 free_netdev(net_dev);
3633 return rc;
3634 }
3635
3636
3637
3638
3639 #ifdef CONFIG_SFC_SRIOV
3640 static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
3641 {
3642 int rc;
3643 struct efx_nic *efx = pci_get_drvdata(dev);
3644
3645 if (efx->type->sriov_configure) {
3646 rc = efx->type->sriov_configure(efx, num_vfs);
3647 if (rc)
3648 return rc;
3649 else
3650 return num_vfs;
3651 } else
3652 return -EOPNOTSUPP;
3653 }
3654 #endif
3655
3656 static int efx_pm_freeze(struct device *dev)
3657 {
3658 struct efx_nic *efx = dev_get_drvdata(dev);
3659
3660 rtnl_lock();
3661
3662 if (efx->state != STATE_DISABLED) {
3663 efx->state = STATE_UNINIT;
3664
3665 efx_device_detach_sync(efx);
3666
3667 efx_stop_all(efx);
3668 efx_disable_interrupts(efx);
3669 }
3670
3671 rtnl_unlock();
3672
3673 return 0;
3674 }
3675
3676 static int efx_pm_thaw(struct device *dev)
3677 {
3678 int rc;
3679 struct efx_nic *efx = dev_get_drvdata(dev);
3680
3681 rtnl_lock();
3682
3683 if (efx->state != STATE_DISABLED) {
3684 rc = efx_enable_interrupts(efx);
3685 if (rc)
3686 goto fail;
3687
3688 mutex_lock(&efx->mac_lock);
3689 efx->phy_op->reconfigure(efx);
3690 mutex_unlock(&efx->mac_lock);
3691
3692 efx_start_all(efx);
3693
3694 efx_device_attach_if_not_resetting(efx);
3695
3696 efx->state = STATE_READY;
3697
3698 efx->type->resume_wol(efx);
3699 }
3700
3701 rtnl_unlock();
3702
3703
3704 queue_work(reset_workqueue, &efx->reset_work);
3705
3706 return 0;
3707
3708 fail:
3709 rtnl_unlock();
3710
3711 return rc;
3712 }
3713
3714 static int efx_pm_poweroff(struct device *dev)
3715 {
3716 struct pci_dev *pci_dev = to_pci_dev(dev);
3717 struct efx_nic *efx = pci_get_drvdata(pci_dev);
3718
3719 efx->type->fini(efx);
3720
3721 efx->reset_pending = 0;
3722
3723 pci_save_state(pci_dev);
3724 return pci_set_power_state(pci_dev, PCI_D3hot);
3725 }
3726
3727
3728 static int efx_pm_resume(struct device *dev)
3729 {
3730 struct pci_dev *pci_dev = to_pci_dev(dev);
3731 struct efx_nic *efx = pci_get_drvdata(pci_dev);
3732 int rc;
3733
3734 rc = pci_set_power_state(pci_dev, PCI_D0);
3735 if (rc)
3736 return rc;
3737 pci_restore_state(pci_dev);
3738 rc = pci_enable_device(pci_dev);
3739 if (rc)
3740 return rc;
3741 pci_set_master(efx->pci_dev);
3742 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3743 if (rc)
3744 return rc;
3745 down_write(&efx->filter_sem);
3746 rc = efx->type->init(efx);
3747 up_write(&efx->filter_sem);
3748 if (rc)
3749 return rc;
3750 rc = efx_pm_thaw(dev);
3751 return rc;
3752 }
3753
3754 static int efx_pm_suspend(struct device *dev)
3755 {
3756 int rc;
3757
3758 efx_pm_freeze(dev);
3759 rc = efx_pm_poweroff(dev);
3760 if (rc)
3761 efx_pm_resume(dev);
3762 return rc;
3763 }
3764
3765 static const struct dev_pm_ops efx_pm_ops = {
3766 .suspend = efx_pm_suspend,
3767 .resume = efx_pm_resume,
3768 .freeze = efx_pm_freeze,
3769 .thaw = efx_pm_thaw,
3770 .poweroff = efx_pm_poweroff,
3771 .restore = efx_pm_resume,
3772 };
3773
3774
3775
3776
3777
3778 static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3779 enum pci_channel_state state)
3780 {
3781 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3782 struct efx_nic *efx = pci_get_drvdata(pdev);
3783
3784 if (state == pci_channel_io_perm_failure)
3785 return PCI_ERS_RESULT_DISCONNECT;
3786
3787 rtnl_lock();
3788
3789 if (efx->state != STATE_DISABLED) {
3790 efx->state = STATE_RECOVERY;
3791 efx->reset_pending = 0;
3792
3793 efx_device_detach_sync(efx);
3794
3795 efx_stop_all(efx);
3796 efx_disable_interrupts(efx);
3797
3798 status = PCI_ERS_RESULT_NEED_RESET;
3799 } else {
3800
3801
3802
3803 status = PCI_ERS_RESULT_RECOVERED;
3804 }
3805
3806 rtnl_unlock();
3807
3808 pci_disable_device(pdev);
3809
3810 return status;
3811 }
3812
3813
3814 static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3815 {
3816 struct efx_nic *efx = pci_get_drvdata(pdev);
3817 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3818
3819 if (pci_enable_device(pdev)) {
3820 netif_err(efx, hw, efx->net_dev,
3821 "Cannot re-enable PCI device after reset.\n");
3822 status = PCI_ERS_RESULT_DISCONNECT;
3823 }
3824
3825 return status;
3826 }
3827
3828
3829 static void efx_io_resume(struct pci_dev *pdev)
3830 {
3831 struct efx_nic *efx = pci_get_drvdata(pdev);
3832 int rc;
3833
3834 rtnl_lock();
3835
3836 if (efx->state == STATE_DISABLED)
3837 goto out;
3838
3839 rc = efx_reset(efx, RESET_TYPE_ALL);
3840 if (rc) {
3841 netif_err(efx, hw, efx->net_dev,
3842 "efx_reset failed after PCI error (%d)\n", rc);
3843 } else {
3844 efx->state = STATE_READY;
3845 netif_dbg(efx, hw, efx->net_dev,
3846 "Done resetting and resuming IO after PCI error.\n");
3847 }
3848
3849 out:
3850 rtnl_unlock();
3851 }
3852
3853
3854
3855
3856
3857
3858
3859 static const struct pci_error_handlers efx_err_handlers = {
3860 .error_detected = efx_io_error_detected,
3861 .slot_reset = efx_io_slot_reset,
3862 .resume = efx_io_resume,
3863 };
3864
3865 static struct pci_driver efx_pci_driver = {
3866 .name = KBUILD_MODNAME,
3867 .id_table = efx_pci_table,
3868 .probe = efx_pci_probe,
3869 .remove = efx_pci_remove,
3870 .driver.pm = &efx_pm_ops,
3871 .err_handler = &efx_err_handlers,
3872 #ifdef CONFIG_SFC_SRIOV
3873 .sriov_configure = efx_pci_sriov_configure,
3874 #endif
3875 };
3876
3877
3878
3879
3880
3881
3882
3883 module_param(interrupt_mode, uint, 0444);
3884 MODULE_PARM_DESC(interrupt_mode,
3885 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3886
3887 static int __init efx_init_module(void)
3888 {
3889 int rc;
3890
3891 printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3892
3893 rc = register_netdevice_notifier(&efx_netdev_notifier);
3894 if (rc)
3895 goto err_notifier;
3896
3897 #ifdef CONFIG_SFC_SRIOV
3898 rc = efx_init_sriov();
3899 if (rc)
3900 goto err_sriov;
3901 #endif
3902
3903 reset_workqueue = create_singlethread_workqueue("sfc_reset");
3904 if (!reset_workqueue) {
3905 rc = -ENOMEM;
3906 goto err_reset;
3907 }
3908
3909 rc = pci_register_driver(&efx_pci_driver);
3910 if (rc < 0)
3911 goto err_pci;
3912
3913 return 0;
3914
3915 err_pci:
3916 destroy_workqueue(reset_workqueue);
3917 err_reset:
3918 #ifdef CONFIG_SFC_SRIOV
3919 efx_fini_sriov();
3920 err_sriov:
3921 #endif
3922 unregister_netdevice_notifier(&efx_netdev_notifier);
3923 err_notifier:
3924 return rc;
3925 }
3926
3927 static void __exit efx_exit_module(void)
3928 {
3929 printk(KERN_INFO "Solarflare NET driver unloading\n");
3930
3931 pci_unregister_driver(&efx_pci_driver);
3932 destroy_workqueue(reset_workqueue);
3933 #ifdef CONFIG_SFC_SRIOV
3934 efx_fini_sriov();
3935 #endif
3936 unregister_netdevice_notifier(&efx_netdev_notifier);
3937
3938 }
3939
3940 module_init(efx_init_module);
3941 module_exit(efx_exit_module);
3942
3943 MODULE_AUTHOR("Solarflare Communications and "
3944 "Michael Brown <mbrown@fensystems.co.uk>");
3945 MODULE_DESCRIPTION("Solarflare network driver");
3946 MODULE_LICENSE("GPL");
3947 MODULE_DEVICE_TABLE(pci, efx_pci_table);
3948 MODULE_VERSION(EFX_DRIVER_VERSION);