1/****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#include <linux/module.h>
12#include <linux/pci.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/delay.h>
16#include <linux/notifier.h>
17#include <linux/ip.h>
18#include <linux/tcp.h>
19#include <linux/in.h>
20#include <linux/ethtool.h>
21#include <linux/topology.h>
22#include <linux/gfp.h>
23#include <linux/aer.h>
24#include <linux/interrupt.h>
25#include "net_driver.h"
26#include "efx.h"
27#include "nic.h"
28#include "selftest.h"
29#include "sriov.h"
30
31#include "mcdi.h"
32#include "workarounds.h"
33
34/**************************************************************************
35 *
36 * Type name strings
37 *
38 **************************************************************************
39 */
40
41/* Loopback mode names (see LOOPBACK_MODE()) */
42const unsigned int efx_loopback_mode_max = LOOPBACK_MAX;
43const char *const efx_loopback_mode_names[] = {
44	[LOOPBACK_NONE]		= "NONE",
45	[LOOPBACK_DATA]		= "DATAPATH",
46	[LOOPBACK_GMAC]		= "GMAC",
47	[LOOPBACK_XGMII]	= "XGMII",
48	[LOOPBACK_XGXS]		= "XGXS",
49	[LOOPBACK_XAUI]		= "XAUI",
50	[LOOPBACK_GMII]		= "GMII",
51	[LOOPBACK_SGMII]	= "SGMII",
52	[LOOPBACK_XGBR]		= "XGBR",
53	[LOOPBACK_XFI]		= "XFI",
54	[LOOPBACK_XAUI_FAR]	= "XAUI_FAR",
55	[LOOPBACK_GMII_FAR]	= "GMII_FAR",
56	[LOOPBACK_SGMII_FAR]	= "SGMII_FAR",
57	[LOOPBACK_XFI_FAR]	= "XFI_FAR",
58	[LOOPBACK_GPHY]		= "GPHY",
59	[LOOPBACK_PHYXS]	= "PHYXS",
60	[LOOPBACK_PCS]		= "PCS",
61	[LOOPBACK_PMAPMD]	= "PMA/PMD",
62	[LOOPBACK_XPORT]	= "XPORT",
63	[LOOPBACK_XGMII_WS]	= "XGMII_WS",
64	[LOOPBACK_XAUI_WS]	= "XAUI_WS",
65	[LOOPBACK_XAUI_WS_FAR]  = "XAUI_WS_FAR",
66	[LOOPBACK_XAUI_WS_NEAR] = "XAUI_WS_NEAR",
67	[LOOPBACK_GMII_WS]	= "GMII_WS",
68	[LOOPBACK_XFI_WS]	= "XFI_WS",
69	[LOOPBACK_XFI_WS_FAR]	= "XFI_WS_FAR",
70	[LOOPBACK_PHYXS_WS]	= "PHYXS_WS",
71};
72
73const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
74const char *const efx_reset_type_names[] = {
75	[RESET_TYPE_INVISIBLE]          = "INVISIBLE",
76	[RESET_TYPE_ALL]                = "ALL",
77	[RESET_TYPE_RECOVER_OR_ALL]     = "RECOVER_OR_ALL",
78	[RESET_TYPE_WORLD]              = "WORLD",
79	[RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
80	[RESET_TYPE_DATAPATH]           = "DATAPATH",
81	[RESET_TYPE_MC_BIST]		= "MC_BIST",
82	[RESET_TYPE_DISABLE]            = "DISABLE",
83	[RESET_TYPE_TX_WATCHDOG]        = "TX_WATCHDOG",
84	[RESET_TYPE_INT_ERROR]          = "INT_ERROR",
85	[RESET_TYPE_RX_RECOVERY]        = "RX_RECOVERY",
86	[RESET_TYPE_DMA_ERROR]          = "DMA_ERROR",
87	[RESET_TYPE_TX_SKIP]            = "TX_SKIP",
88	[RESET_TYPE_MC_FAILURE]         = "MC_FAILURE",
89	[RESET_TYPE_MCDI_TIMEOUT]	= "MCDI_TIMEOUT (FLR)",
90};
91
92/* Reset workqueue. If any NIC has a hardware failure then a reset will be
93 * queued onto this work queue. This is not a per-nic work queue, because
94 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
95 */
96static struct workqueue_struct *reset_workqueue;
97
98/* How often and how many times to poll for a reset while waiting for a
99 * BIST that another function started to complete.
100 */
101#define BIST_WAIT_DELAY_MS	100
102#define BIST_WAIT_DELAY_COUNT	100
103
104/**************************************************************************
105 *
106 * Configurable values
107 *
108 *************************************************************************/
109
110/*
111 * Use separate channels for TX and RX events
112 *
113 * Set this to 1 to use separate channels for TX and RX. It allows us
114 * to control interrupt affinity separately for TX and RX.
115 *
116 * This is only used in MSI-X interrupt mode
117 */
118bool efx_separate_tx_channels;
119module_param(efx_separate_tx_channels, bool, 0444);
120MODULE_PARM_DESC(efx_separate_tx_channels,
121		 "Use separate channels for TX and RX");
122
123/* This is the weight assigned to each of the (per-channel) virtual
124 * NAPI devices.
125 */
126static int napi_weight = 64;
127
128/* This is the time (in jiffies) between invocations of the hardware
129 * monitor.
130 * On Falcon-based NICs, this will:
131 * - Check the on-board hardware monitor;
132 * - Poll the link state and reconfigure the hardware as necessary.
133 * On Siena-based NICs for power systems with EEH support, this will give EEH a
134 * chance to start.
135 */
136static unsigned int efx_monitor_interval = 1 * HZ;
137
138/* Initial interrupt moderation settings.  They can be modified after
139 * module load with ethtool.
140 *
141 * The default for RX should strike a balance between increasing the
142 * round-trip latency and reducing overhead.
143 */
144static unsigned int rx_irq_mod_usec = 60;
145
146/* Initial interrupt moderation settings.  They can be modified after
147 * module load with ethtool.
148 *
149 * This default is chosen to ensure that a 10G link does not go idle
150 * while a TX queue is stopped after it has become full.  A queue is
151 * restarted when it drops below half full.  The time this takes (assuming
152 * worst case 3 descriptors per packet and 1024 descriptors) is
153 *   512 / 3 * 1.2 = 205 usec.
154 */
155static unsigned int tx_irq_mod_usec = 150;
156
157/* This is the first interrupt mode to try out of:
158 * 0 => MSI-X
159 * 1 => MSI
160 * 2 => legacy
161 */
162static unsigned int interrupt_mode;
163
164/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
165 * i.e. the number of CPUs among which we may distribute simultaneous
166 * interrupt handling.
167 *
168 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
169 * The default (0) means to assign an interrupt to each core.
170 */
171static unsigned int rss_cpus;
172module_param(rss_cpus, uint, 0444);
173MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
174
175static bool phy_flash_cfg;
176module_param(phy_flash_cfg, bool, 0644);
177MODULE_PARM_DESC(phy_flash_cfg, "Set PHYs into reflash mode initially");
178
179static unsigned irq_adapt_low_thresh = 8000;
180module_param(irq_adapt_low_thresh, uint, 0644);
181MODULE_PARM_DESC(irq_adapt_low_thresh,
182		 "Threshold score for reducing IRQ moderation");
183
184static unsigned irq_adapt_high_thresh = 16000;
185module_param(irq_adapt_high_thresh, uint, 0644);
186MODULE_PARM_DESC(irq_adapt_high_thresh,
187		 "Threshold score for increasing IRQ moderation");
188
189static unsigned debug = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
190			 NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
191			 NETIF_MSG_IFUP | NETIF_MSG_RX_ERR |
192			 NETIF_MSG_TX_ERR | NETIF_MSG_HW);
193module_param(debug, uint, 0);
194MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
195
196/**************************************************************************
197 *
198 * Utility functions and prototypes
199 *
200 *************************************************************************/
201
202static int efx_soft_enable_interrupts(struct efx_nic *efx);
203static void efx_soft_disable_interrupts(struct efx_nic *efx);
204static void efx_remove_channel(struct efx_channel *channel);
205static void efx_remove_channels(struct efx_nic *efx);
206static const struct efx_channel_type efx_default_channel_type;
207static void efx_remove_port(struct efx_nic *efx);
208static void efx_init_napi_channel(struct efx_channel *channel);
209static void efx_fini_napi(struct efx_nic *efx);
210static void efx_fini_napi_channel(struct efx_channel *channel);
211static void efx_fini_struct(struct efx_nic *efx);
212static void efx_start_all(struct efx_nic *efx);
213static void efx_stop_all(struct efx_nic *efx);
214
215#define EFX_ASSERT_RESET_SERIALISED(efx)		\
216	do {						\
217		if ((efx->state == STATE_READY) ||	\
218		    (efx->state == STATE_RECOVERY) ||	\
219		    (efx->state == STATE_DISABLED))	\
220			ASSERT_RTNL();			\
221	} while (0)
222
223static int efx_check_disabled(struct efx_nic *efx)
224{
225	if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
226		netif_err(efx, drv, efx->net_dev,
227			  "device is disabled due to earlier errors\n");
228		return -EIO;
229	}
230	return 0;
231}
232
233/**************************************************************************
234 *
235 * Event queue processing
236 *
237 *************************************************************************/
238
239/* Process channel's event queue
240 *
241 * This function is responsible for processing the event queue of a
242 * single channel.  The caller must guarantee that this function will
243 * never be concurrently called more than once on the same channel,
244 * though different channels may be being processed concurrently.
245 */
246static int efx_process_channel(struct efx_channel *channel, int budget)
247{
248	struct efx_tx_queue *tx_queue;
249	int spent;
250
251	if (unlikely(!channel->enabled))
252		return 0;
253
254	efx_for_each_channel_tx_queue(tx_queue, channel) {
255		tx_queue->pkts_compl = 0;
256		tx_queue->bytes_compl = 0;
257	}
258
259	spent = efx_nic_process_eventq(channel, budget);
260	if (spent && efx_channel_has_rx_queue(channel)) {
261		struct efx_rx_queue *rx_queue =
262			efx_channel_get_rx_queue(channel);
263
264		efx_rx_flush_packet(channel);
265		efx_fast_push_rx_descriptors(rx_queue, true);
266	}
267
268	/* Update BQL */
269	efx_for_each_channel_tx_queue(tx_queue, channel) {
270		if (tx_queue->bytes_compl) {
271			netdev_tx_completed_queue(tx_queue->core_txq,
272				tx_queue->pkts_compl, tx_queue->bytes_compl);
273		}
274	}
275
276	return spent;
277}
278
279/* NAPI poll handler
280 *
281 * NAPI guarantees serialisation of polls of the same device, which
282 * provides the guarantee required by efx_process_channel().
283 */
284static int efx_poll(struct napi_struct *napi, int budget)
285{
286	struct efx_channel *channel =
287		container_of(napi, struct efx_channel, napi_str);
288	struct efx_nic *efx = channel->efx;
289	int spent;
290
291	if (!efx_channel_lock_napi(channel))
292		return budget;
293
294	netif_vdbg(efx, intr, efx->net_dev,
295		   "channel %d NAPI poll executing on CPU %d\n",
296		   channel->channel, raw_smp_processor_id());
297
298	spent = efx_process_channel(channel, budget);
299
300	if (spent < budget) {
301		if (efx_channel_has_rx_queue(channel) &&
302		    efx->irq_rx_adaptive &&
303		    unlikely(++channel->irq_count == 1000)) {
304			if (unlikely(channel->irq_mod_score <
305				     irq_adapt_low_thresh)) {
306				if (channel->irq_moderation > 1) {
307					channel->irq_moderation -= 1;
308					efx->type->push_irq_moderation(channel);
309				}
310			} else if (unlikely(channel->irq_mod_score >
311					    irq_adapt_high_thresh)) {
312				if (channel->irq_moderation <
313				    efx->irq_rx_moderation) {
314					channel->irq_moderation += 1;
315					efx->type->push_irq_moderation(channel);
316				}
317			}
318			channel->irq_count = 0;
319			channel->irq_mod_score = 0;
320		}
321
322		efx_filter_rfs_expire(channel);
323
324		/* There is no race here; although napi_disable() will
325		 * only wait for napi_complete(), this isn't a problem
326		 * since efx_nic_eventq_read_ack() will have no effect if
327		 * interrupts have already been disabled.
328		 */
329		napi_complete(napi);
330		efx_nic_eventq_read_ack(channel);
331	}
332
333	efx_channel_unlock_napi(channel);
334	return spent;
335}
336
337/* Create event queue
338 * Event queue memory allocations are done only once.  If the channel
339 * is reset, the memory buffer will be reused; this guards against
340 * errors during channel reset and also simplifies interrupt handling.
341 */
342static int efx_probe_eventq(struct efx_channel *channel)
343{
344	struct efx_nic *efx = channel->efx;
345	unsigned long entries;
346
347	netif_dbg(efx, probe, efx->net_dev,
348		  "chan %d create event queue\n", channel->channel);
349
350	/* Build an event queue with room for one event per tx and rx buffer,
351	 * plus some extra for link state events and MCDI completions. */
352	entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
353	EFX_BUG_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
354	channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
355
356	return efx_nic_probe_eventq(channel);
357}
358
359/* Prepare channel's event queue */
360static int efx_init_eventq(struct efx_channel *channel)
361{
362	struct efx_nic *efx = channel->efx;
363	int rc;
364
365	EFX_WARN_ON_PARANOID(channel->eventq_init);
366
367	netif_dbg(efx, drv, efx->net_dev,
368		  "chan %d init event queue\n", channel->channel);
369
370	rc = efx_nic_init_eventq(channel);
371	if (rc == 0) {
372		efx->type->push_irq_moderation(channel);
373		channel->eventq_read_ptr = 0;
374		channel->eventq_init = true;
375	}
376	return rc;
377}
378
379/* Enable event queue processing and NAPI */
380void efx_start_eventq(struct efx_channel *channel)
381{
382	netif_dbg(channel->efx, ifup, channel->efx->net_dev,
383		  "chan %d start event queue\n", channel->channel);
384
385	/* Make sure the NAPI handler sees the enabled flag set */
386	channel->enabled = true;
387	smp_wmb();
388
389	efx_channel_enable(channel);
390	napi_enable(&channel->napi_str);
391	efx_nic_eventq_read_ack(channel);
392}
393
394/* Disable event queue processing and NAPI */
395void efx_stop_eventq(struct efx_channel *channel)
396{
397	if (!channel->enabled)
398		return;
399
400	napi_disable(&channel->napi_str);
401	while (!efx_channel_disable(channel))
402		usleep_range(1000, 20000);
403	channel->enabled = false;
404}
405
406static void efx_fini_eventq(struct efx_channel *channel)
407{
408	if (!channel->eventq_init)
409		return;
410
411	netif_dbg(channel->efx, drv, channel->efx->net_dev,
412		  "chan %d fini event queue\n", channel->channel);
413
414	efx_nic_fini_eventq(channel);
415	channel->eventq_init = false;
416}
417
418static void efx_remove_eventq(struct efx_channel *channel)
419{
420	netif_dbg(channel->efx, drv, channel->efx->net_dev,
421		  "chan %d remove event queue\n", channel->channel);
422
423	efx_nic_remove_eventq(channel);
424}
425
426/**************************************************************************
427 *
428 * Channel handling
429 *
430 *************************************************************************/
431
432/* Allocate and initialise a channel structure. */
433static struct efx_channel *
434efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
435{
436	struct efx_channel *channel;
437	struct efx_rx_queue *rx_queue;
438	struct efx_tx_queue *tx_queue;
439	int j;
440
441	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
442	if (!channel)
443		return NULL;
444
445	channel->efx = efx;
446	channel->channel = i;
447	channel->type = &efx_default_channel_type;
448
449	for (j = 0; j < EFX_TXQ_TYPES; j++) {
450		tx_queue = &channel->tx_queue[j];
451		tx_queue->efx = efx;
452		tx_queue->queue = i * EFX_TXQ_TYPES + j;
453		tx_queue->channel = channel;
454	}
455
456	rx_queue = &channel->rx_queue;
457	rx_queue->efx = efx;
458	setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
459		    (unsigned long)rx_queue);
460
461	return channel;
462}
463
464/* Allocate and initialise a channel structure, copying parameters
465 * (but not resources) from an old channel structure.
466 */
467static struct efx_channel *
468efx_copy_channel(const struct efx_channel *old_channel)
469{
470	struct efx_channel *channel;
471	struct efx_rx_queue *rx_queue;
472	struct efx_tx_queue *tx_queue;
473	int j;
474
475	channel = kmalloc(sizeof(*channel), GFP_KERNEL);
476	if (!channel)
477		return NULL;
478
479	*channel = *old_channel;
480
481	channel->napi_dev = NULL;
482	memset(&channel->eventq, 0, sizeof(channel->eventq));
483
484	for (j = 0; j < EFX_TXQ_TYPES; j++) {
485		tx_queue = &channel->tx_queue[j];
486		if (tx_queue->channel)
487			tx_queue->channel = channel;
488		tx_queue->buffer = NULL;
489		memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
490	}
491
492	rx_queue = &channel->rx_queue;
493	rx_queue->buffer = NULL;
494	memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
495	setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
496		    (unsigned long)rx_queue);
497
498	return channel;
499}
500
501static int efx_probe_channel(struct efx_channel *channel)
502{
503	struct efx_tx_queue *tx_queue;
504	struct efx_rx_queue *rx_queue;
505	int rc;
506
507	netif_dbg(channel->efx, probe, channel->efx->net_dev,
508		  "creating channel %d\n", channel->channel);
509
510	rc = channel->type->pre_probe(channel);
511	if (rc)
512		goto fail;
513
514	rc = efx_probe_eventq(channel);
515	if (rc)
516		goto fail;
517
518	efx_for_each_channel_tx_queue(tx_queue, channel) {
519		rc = efx_probe_tx_queue(tx_queue);
520		if (rc)
521			goto fail;
522	}
523
524	efx_for_each_channel_rx_queue(rx_queue, channel) {
525		rc = efx_probe_rx_queue(rx_queue);
526		if (rc)
527			goto fail;
528	}
529
530	return 0;
531
532fail:
533	efx_remove_channel(channel);
534	return rc;
535}
536
537static void
538efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
539{
540	struct efx_nic *efx = channel->efx;
541	const char *type;
542	int number;
543
544	number = channel->channel;
545	if (efx->tx_channel_offset == 0) {
546		type = "";
547	} else if (channel->channel < efx->tx_channel_offset) {
548		type = "-rx";
549	} else {
550		type = "-tx";
551		number -= efx->tx_channel_offset;
552	}
553	snprintf(buf, len, "%s%s-%d", efx->name, type, number);
554}
555
556static void efx_set_channel_names(struct efx_nic *efx)
557{
558	struct efx_channel *channel;
559
560	efx_for_each_channel(channel, efx)
561		channel->type->get_name(channel,
562					efx->msi_context[channel->channel].name,
563					sizeof(efx->msi_context[0].name));
564}
565
566static int efx_probe_channels(struct efx_nic *efx)
567{
568	struct efx_channel *channel;
569	int rc;
570
571	/* Restart special buffer allocation */
572	efx->next_buffer_table = 0;
573
574	/* Probe channels in reverse, so that any 'extra' channels
575	 * use the start of the buffer table. This allows the traffic
576	 * channels to be resized without moving them or wasting the
577	 * entries before them.
578	 */
579	efx_for_each_channel_rev(channel, efx) {
580		rc = efx_probe_channel(channel);
581		if (rc) {
582			netif_err(efx, probe, efx->net_dev,
583				  "failed to create channel %d\n",
584				  channel->channel);
585			goto fail;
586		}
587	}
588	efx_set_channel_names(efx);
589
590	return 0;
591
592fail:
593	efx_remove_channels(efx);
594	return rc;
595}
596
597/* Channels are shutdown and reinitialised whilst the NIC is running
598 * to propagate configuration changes (mtu, checksum offload), or
599 * to clear hardware error conditions
600 */
601static void efx_start_datapath(struct efx_nic *efx)
602{
603	bool old_rx_scatter = efx->rx_scatter;
604	struct efx_tx_queue *tx_queue;
605	struct efx_rx_queue *rx_queue;
606	struct efx_channel *channel;
607	size_t rx_buf_len;
608
609	/* Calculate the rx buffer allocation parameters required to
610	 * support the current MTU, including padding for header
611	 * alignment and overruns.
612	 */
613	efx->rx_dma_len = (efx->rx_prefix_size +
614			   EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
615			   efx->type->rx_buffer_padding);
616	rx_buf_len = (sizeof(struct efx_rx_page_state) +
617		      efx->rx_ip_align + efx->rx_dma_len);
618	if (rx_buf_len <= PAGE_SIZE) {
619		efx->rx_scatter = efx->type->always_rx_scatter;
620		efx->rx_buffer_order = 0;
621	} else if (efx->type->can_rx_scatter) {
622		BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
623		BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
624			     2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
625				       EFX_RX_BUF_ALIGNMENT) >
626			     PAGE_SIZE);
627		efx->rx_scatter = true;
628		efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
629		efx->rx_buffer_order = 0;
630	} else {
631		efx->rx_scatter = false;
632		efx->rx_buffer_order = get_order(rx_buf_len);
633	}
634
635	efx_rx_config_page_split(efx);
636	if (efx->rx_buffer_order)
637		netif_dbg(efx, drv, efx->net_dev,
638			  "RX buf len=%u; page order=%u batch=%u\n",
639			  efx->rx_dma_len, efx->rx_buffer_order,
640			  efx->rx_pages_per_batch);
641	else
642		netif_dbg(efx, drv, efx->net_dev,
643			  "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
644			  efx->rx_dma_len, efx->rx_page_buf_step,
645			  efx->rx_bufs_per_page, efx->rx_pages_per_batch);
646
647	/* RX filters may also have scatter-enabled flags */
648	if (efx->rx_scatter != old_rx_scatter)
649		efx->type->filter_update_rx_scatter(efx);
650
651	/* We must keep at least one descriptor in a TX ring empty.
652	 * We could avoid this when the queue size does not exactly
653	 * match the hardware ring size, but it's not that important.
654	 * Therefore we stop the queue when one more skb might fill
655	 * the ring completely.  We wake it when half way back to
656	 * empty.
657	 */
658	efx->txq_stop_thresh = efx->txq_entries - efx_tx_max_skb_descs(efx);
659	efx->txq_wake_thresh = efx->txq_stop_thresh / 2;
660
661	/* Initialise the channels */
662	efx_for_each_channel(channel, efx) {
663		efx_for_each_channel_tx_queue(tx_queue, channel) {
664			efx_init_tx_queue(tx_queue);
665			atomic_inc(&efx->active_queues);
666		}
667
668		efx_for_each_channel_rx_queue(rx_queue, channel) {
669			efx_init_rx_queue(rx_queue);
670			atomic_inc(&efx->active_queues);
671			efx_stop_eventq(channel);
672			efx_fast_push_rx_descriptors(rx_queue, false);
673			efx_start_eventq(channel);
674		}
675
676		WARN_ON(channel->rx_pkt_n_frags);
677	}
678
679	efx_ptp_start_datapath(efx);
680
681	if (netif_device_present(efx->net_dev))
682		netif_tx_wake_all_queues(efx->net_dev);
683}
684
685static void efx_stop_datapath(struct efx_nic *efx)
686{
687	struct efx_channel *channel;
688	struct efx_tx_queue *tx_queue;
689	struct efx_rx_queue *rx_queue;
690	int rc;
691
692	EFX_ASSERT_RESET_SERIALISED(efx);
693	BUG_ON(efx->port_enabled);
694
695	efx_ptp_stop_datapath(efx);
696
697	/* Stop RX refill */
698	efx_for_each_channel(channel, efx) {
699		efx_for_each_channel_rx_queue(rx_queue, channel)
700			rx_queue->refill_enabled = false;
701	}
702
703	efx_for_each_channel(channel, efx) {
704		/* RX packet processing is pipelined, so wait for the
705		 * NAPI handler to complete.  At least event queue 0
706		 * might be kept active by non-data events, so don't
707		 * use napi_synchronize() but actually disable NAPI
708		 * temporarily.
709		 */
710		if (efx_channel_has_rx_queue(channel)) {
711			efx_stop_eventq(channel);
712			efx_start_eventq(channel);
713		}
714	}
715
716	rc = efx->type->fini_dmaq(efx);
717	if (rc && EFX_WORKAROUND_7803(efx)) {
718		/* Schedule a reset to recover from the flush failure. The
719		 * descriptor caches reference memory we're about to free,
720		 * but falcon_reconfigure_mac_wrapper() won't reconnect
721		 * the MACs because of the pending reset.
722		 */
723		netif_err(efx, drv, efx->net_dev,
724			  "Resetting to recover from flush failure\n");
725		efx_schedule_reset(efx, RESET_TYPE_ALL);
726	} else if (rc) {
727		netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
728	} else {
729		netif_dbg(efx, drv, efx->net_dev,
730			  "successfully flushed all queues\n");
731	}
732
733	efx_for_each_channel(channel, efx) {
734		efx_for_each_channel_rx_queue(rx_queue, channel)
735			efx_fini_rx_queue(rx_queue);
736		efx_for_each_possible_channel_tx_queue(tx_queue, channel)
737			efx_fini_tx_queue(tx_queue);
738	}
739}
740
741static void efx_remove_channel(struct efx_channel *channel)
742{
743	struct efx_tx_queue *tx_queue;
744	struct efx_rx_queue *rx_queue;
745
746	netif_dbg(channel->efx, drv, channel->efx->net_dev,
747		  "destroy chan %d\n", channel->channel);
748
749	efx_for_each_channel_rx_queue(rx_queue, channel)
750		efx_remove_rx_queue(rx_queue);
751	efx_for_each_possible_channel_tx_queue(tx_queue, channel)
752		efx_remove_tx_queue(tx_queue);
753	efx_remove_eventq(channel);
754	channel->type->post_remove(channel);
755}
756
757static void efx_remove_channels(struct efx_nic *efx)
758{
759	struct efx_channel *channel;
760
761	efx_for_each_channel(channel, efx)
762		efx_remove_channel(channel);
763}
764
765int
766efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
767{
768	struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
769	u32 old_rxq_entries, old_txq_entries;
770	unsigned i, next_buffer_table = 0;
771	int rc, rc2;
772
773	rc = efx_check_disabled(efx);
774	if (rc)
775		return rc;
776
777	/* Not all channels should be reallocated. We must avoid
778	 * reallocating their buffer table entries.
779	 */
780	efx_for_each_channel(channel, efx) {
781		struct efx_rx_queue *rx_queue;
782		struct efx_tx_queue *tx_queue;
783
784		if (channel->type->copy)
785			continue;
786		next_buffer_table = max(next_buffer_table,
787					channel->eventq.index +
788					channel->eventq.entries);
789		efx_for_each_channel_rx_queue(rx_queue, channel)
790			next_buffer_table = max(next_buffer_table,
791						rx_queue->rxd.index +
792						rx_queue->rxd.entries);
793		efx_for_each_channel_tx_queue(tx_queue, channel)
794			next_buffer_table = max(next_buffer_table,
795						tx_queue->txd.index +
796						tx_queue->txd.entries);
797	}
798
799	efx_device_detach_sync(efx);
800	efx_stop_all(efx);
801	efx_soft_disable_interrupts(efx);
802
803	/* Clone channels (where possible) */
804	memset(other_channel, 0, sizeof(other_channel));
805	for (i = 0; i < efx->n_channels; i++) {
806		channel = efx->channel[i];
807		if (channel->type->copy)
808			channel = channel->type->copy(channel);
809		if (!channel) {
810			rc = -ENOMEM;
811			goto out;
812		}
813		other_channel[i] = channel;
814	}
815
816	/* Swap entry counts and channel pointers */
817	old_rxq_entries = efx->rxq_entries;
818	old_txq_entries = efx->txq_entries;
819	efx->rxq_entries = rxq_entries;
820	efx->txq_entries = txq_entries;
821	for (i = 0; i < efx->n_channels; i++) {
822		channel = efx->channel[i];
823		efx->channel[i] = other_channel[i];
824		other_channel[i] = channel;
825	}
826
827	/* Restart buffer table allocation */
828	efx->next_buffer_table = next_buffer_table;
829
830	for (i = 0; i < efx->n_channels; i++) {
831		channel = efx->channel[i];
832		if (!channel->type->copy)
833			continue;
834		rc = efx_probe_channel(channel);
835		if (rc)
836			goto rollback;
837		efx_init_napi_channel(efx->channel[i]);
838	}
839
840out:
841	/* Destroy unused channel structures */
842	for (i = 0; i < efx->n_channels; i++) {
843		channel = other_channel[i];
844		if (channel && channel->type->copy) {
845			efx_fini_napi_channel(channel);
846			efx_remove_channel(channel);
847			kfree(channel);
848		}
849	}
850
851	rc2 = efx_soft_enable_interrupts(efx);
852	if (rc2) {
853		rc = rc ? rc : rc2;
854		netif_err(efx, drv, efx->net_dev,
855			  "unable to restart interrupts on channel reallocation\n");
856		efx_schedule_reset(efx, RESET_TYPE_DISABLE);
857	} else {
858		efx_start_all(efx);
859		netif_device_attach(efx->net_dev);
860	}
861	return rc;
862
863rollback:
864	/* Swap back */
865	efx->rxq_entries = old_rxq_entries;
866	efx->txq_entries = old_txq_entries;
867	for (i = 0; i < efx->n_channels; i++) {
868		channel = efx->channel[i];
869		efx->channel[i] = other_channel[i];
870		other_channel[i] = channel;
871	}
872	goto out;
873}
874
875void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
876{
877	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
878}
879
880static const struct efx_channel_type efx_default_channel_type = {
881	.pre_probe		= efx_channel_dummy_op_int,
882	.post_remove		= efx_channel_dummy_op_void,
883	.get_name		= efx_get_channel_name,
884	.copy			= efx_copy_channel,
885	.keep_eventq		= false,
886};
887
888int efx_channel_dummy_op_int(struct efx_channel *channel)
889{
890	return 0;
891}
892
893void efx_channel_dummy_op_void(struct efx_channel *channel)
894{
895}
896
897/**************************************************************************
898 *
899 * Port handling
900 *
901 **************************************************************************/
902
903/* This ensures that the kernel is kept informed (via
904 * netif_carrier_on/off) of the link status, and also maintains the
905 * link status's stop on the port's TX queue.
906 */
907void efx_link_status_changed(struct efx_nic *efx)
908{
909	struct efx_link_state *link_state = &efx->link_state;
910
911	/* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
912	 * that no events are triggered between unregister_netdev() and the
913	 * driver unloading. A more general condition is that NETDEV_CHANGE
914	 * can only be generated between NETDEV_UP and NETDEV_DOWN */
915	if (!netif_running(efx->net_dev))
916		return;
917
918	if (link_state->up != netif_carrier_ok(efx->net_dev)) {
919		efx->n_link_state_changes++;
920
921		if (link_state->up)
922			netif_carrier_on(efx->net_dev);
923		else
924			netif_carrier_off(efx->net_dev);
925	}
926
927	/* Status message for kernel log */
928	if (link_state->up)
929		netif_info(efx, link, efx->net_dev,
930			   "link up at %uMbps %s-duplex (MTU %d)\n",
931			   link_state->speed, link_state->fd ? "full" : "half",
932			   efx->net_dev->mtu);
933	else
934		netif_info(efx, link, efx->net_dev, "link down\n");
935}
936
937void efx_link_set_advertising(struct efx_nic *efx, u32 advertising)
938{
939	efx->link_advertising = advertising;
940	if (advertising) {
941		if (advertising & ADVERTISED_Pause)
942			efx->wanted_fc |= (EFX_FC_TX | EFX_FC_RX);
943		else
944			efx->wanted_fc &= ~(EFX_FC_TX | EFX_FC_RX);
945		if (advertising & ADVERTISED_Asym_Pause)
946			efx->wanted_fc ^= EFX_FC_TX;
947	}
948}
949
950void efx_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc)
951{
952	efx->wanted_fc = wanted_fc;
953	if (efx->link_advertising) {
954		if (wanted_fc & EFX_FC_RX)
955			efx->link_advertising |= (ADVERTISED_Pause |
956						  ADVERTISED_Asym_Pause);
957		else
958			efx->link_advertising &= ~(ADVERTISED_Pause |
959						   ADVERTISED_Asym_Pause);
960		if (wanted_fc & EFX_FC_TX)
961			efx->link_advertising ^= ADVERTISED_Asym_Pause;
962	}
963}
964
965static void efx_fini_port(struct efx_nic *efx);
966
967/* We assume that efx->type->reconfigure_mac will always try to sync RX
968 * filters and therefore needs to read-lock the filter table against freeing
969 */
970void efx_mac_reconfigure(struct efx_nic *efx)
971{
972	down_read(&efx->filter_sem);
973	efx->type->reconfigure_mac(efx);
974	up_read(&efx->filter_sem);
975}
976
977/* Push loopback/power/transmit disable settings to the PHY, and reconfigure
978 * the MAC appropriately. All other PHY configuration changes are pushed
979 * through phy_op->set_settings(), and pushed asynchronously to the MAC
980 * through efx_monitor().
981 *
982 * Callers must hold the mac_lock
983 */
984int __efx_reconfigure_port(struct efx_nic *efx)
985{
986	enum efx_phy_mode phy_mode;
987	int rc;
988
989	WARN_ON(!mutex_is_locked(&efx->mac_lock));
990
991	/* Disable PHY transmit in mac level loopbacks */
992	phy_mode = efx->phy_mode;
993	if (LOOPBACK_INTERNAL(efx))
994		efx->phy_mode |= PHY_MODE_TX_DISABLED;
995	else
996		efx->phy_mode &= ~PHY_MODE_TX_DISABLED;
997
998	rc = efx->type->reconfigure_port(efx);
999
1000	if (rc)
1001		efx->phy_mode = phy_mode;
1002
1003	return rc;
1004}
1005
1006/* Reinitialise the MAC to pick up new PHY settings, even if the port is
1007 * disabled. */
1008int efx_reconfigure_port(struct efx_nic *efx)
1009{
1010	int rc;
1011
1012	EFX_ASSERT_RESET_SERIALISED(efx);
1013
1014	mutex_lock(&efx->mac_lock);
1015	rc = __efx_reconfigure_port(efx);
1016	mutex_unlock(&efx->mac_lock);
1017
1018	return rc;
1019}
1020
1021/* Asynchronous work item for changing MAC promiscuity and multicast
1022 * hash.  Avoid a drain/rx_ingress enable by reconfiguring the current
1023 * MAC directly. */
1024static void efx_mac_work(struct work_struct *data)
1025{
1026	struct efx_nic *efx = container_of(data, struct efx_nic, mac_work);
1027
1028	mutex_lock(&efx->mac_lock);
1029	if (efx->port_enabled)
1030		efx_mac_reconfigure(efx);
1031	mutex_unlock(&efx->mac_lock);
1032}
1033
1034static int efx_probe_port(struct efx_nic *efx)
1035{
1036	int rc;
1037
1038	netif_dbg(efx, probe, efx->net_dev, "create port\n");
1039
1040	if (phy_flash_cfg)
1041		efx->phy_mode = PHY_MODE_SPECIAL;
1042
1043	/* Connect up MAC/PHY operations table */
1044	rc = efx->type->probe_port(efx);
1045	if (rc)
1046		return rc;
1047
1048	/* Initialise MAC address to permanent address */
1049	ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
1050
1051	return 0;
1052}
1053
1054static int efx_init_port(struct efx_nic *efx)
1055{
1056	int rc;
1057
1058	netif_dbg(efx, drv, efx->net_dev, "init port\n");
1059
1060	mutex_lock(&efx->mac_lock);
1061
1062	rc = efx->phy_op->init(efx);
1063	if (rc)
1064		goto fail1;
1065
1066	efx->port_initialized = true;
1067
1068	/* Reconfigure the MAC before creating dma queues (required for
1069	 * Falcon/A1 where RX_INGR_EN/TX_DRAIN_EN isn't supported) */
1070	efx_mac_reconfigure(efx);
1071
1072	/* Ensure the PHY advertises the correct flow control settings */
1073	rc = efx->phy_op->reconfigure(efx);
1074	if (rc && rc != -EPERM)
1075		goto fail2;
1076
1077	mutex_unlock(&efx->mac_lock);
1078	return 0;
1079
1080fail2:
1081	efx->phy_op->fini(efx);
1082fail1:
1083	mutex_unlock(&efx->mac_lock);
1084	return rc;
1085}
1086
1087static void efx_start_port(struct efx_nic *efx)
1088{
1089	netif_dbg(efx, ifup, efx->net_dev, "start port\n");
1090	BUG_ON(efx->port_enabled);
1091
1092	mutex_lock(&efx->mac_lock);
1093	efx->port_enabled = true;
1094
1095	/* Ensure MAC ingress/egress is enabled */
1096	efx_mac_reconfigure(efx);
1097
1098	mutex_unlock(&efx->mac_lock);
1099}
1100
1101/* Cancel work for MAC reconfiguration, periodic hardware monitoring
1102 * and the async self-test, wait for them to finish and prevent them
1103 * being scheduled again.  This doesn't cover online resets, which
1104 * should only be cancelled when removing the device.
1105 */
1106static void efx_stop_port(struct efx_nic *efx)
1107{
1108	netif_dbg(efx, ifdown, efx->net_dev, "stop port\n");
1109
1110	EFX_ASSERT_RESET_SERIALISED(efx);
1111
1112	mutex_lock(&efx->mac_lock);
1113	efx->port_enabled = false;
1114	mutex_unlock(&efx->mac_lock);
1115
1116	/* Serialise against efx_set_multicast_list() */
1117	netif_addr_lock_bh(efx->net_dev);
1118	netif_addr_unlock_bh(efx->net_dev);
1119
1120	cancel_delayed_work_sync(&efx->monitor_work);
1121	efx_selftest_async_cancel(efx);
1122	cancel_work_sync(&efx->mac_work);
1123}
1124
1125static void efx_fini_port(struct efx_nic *efx)
1126{
1127	netif_dbg(efx, drv, efx->net_dev, "shut down port\n");
1128
1129	if (!efx->port_initialized)
1130		return;
1131
1132	efx->phy_op->fini(efx);
1133	efx->port_initialized = false;
1134
1135	efx->link_state.up = false;
1136	efx_link_status_changed(efx);
1137}
1138
1139static void efx_remove_port(struct efx_nic *efx)
1140{
1141	netif_dbg(efx, drv, efx->net_dev, "destroying port\n");
1142
1143	efx->type->remove_port(efx);
1144}
1145
1146/**************************************************************************
1147 *
1148 * NIC handling
1149 *
1150 **************************************************************************/
1151
1152static LIST_HEAD(efx_primary_list);
1153static LIST_HEAD(efx_unassociated_list);
1154
1155static bool efx_same_controller(struct efx_nic *left, struct efx_nic *right)
1156{
1157	return left->type == right->type &&
1158		left->vpd_sn && right->vpd_sn &&
1159		!strcmp(left->vpd_sn, right->vpd_sn);
1160}
1161
1162static void efx_associate(struct efx_nic *efx)
1163{
1164	struct efx_nic *other, *next;
1165
1166	if (efx->primary == efx) {
1167		/* Adding primary function; look for secondaries */
1168
1169		netif_dbg(efx, probe, efx->net_dev, "adding to primary list\n");
1170		list_add_tail(&efx->node, &efx_primary_list);
1171
1172		list_for_each_entry_safe(other, next, &efx_unassociated_list,
1173					 node) {
1174			if (efx_same_controller(efx, other)) {
1175				list_del(&other->node);
1176				netif_dbg(other, probe, other->net_dev,
1177					  "moving to secondary list of %s %s\n",
1178					  pci_name(efx->pci_dev),
1179					  efx->net_dev->name);
1180				list_add_tail(&other->node,
1181					      &efx->secondary_list);
1182				other->primary = efx;
1183			}
1184		}
1185	} else {
1186		/* Adding secondary function; look for primary */
1187
1188		list_for_each_entry(other, &efx_primary_list, node) {
1189			if (efx_same_controller(efx, other)) {
1190				netif_dbg(efx, probe, efx->net_dev,
1191					  "adding to secondary list of %s %s\n",
1192					  pci_name(other->pci_dev),
1193					  other->net_dev->name);
1194				list_add_tail(&efx->node,
1195					      &other->secondary_list);
1196				efx->primary = other;
1197				return;
1198			}
1199		}
1200
1201		netif_dbg(efx, probe, efx->net_dev,
1202			  "adding to unassociated list\n");
1203		list_add_tail(&efx->node, &efx_unassociated_list);
1204	}
1205}
1206
1207static void efx_dissociate(struct efx_nic *efx)
1208{
1209	struct efx_nic *other, *next;
1210
1211	list_del(&efx->node);
1212	efx->primary = NULL;
1213
1214	list_for_each_entry_safe(other, next, &efx->secondary_list, node) {
1215		list_del(&other->node);
1216		netif_dbg(other, probe, other->net_dev,
1217			  "moving to unassociated list\n");
1218		list_add_tail(&other->node, &efx_unassociated_list);
1219		other->primary = NULL;
1220	}
1221}
1222
1223/* This configures the PCI device to enable I/O and DMA. */
1224static int efx_init_io(struct efx_nic *efx)
1225{
1226	struct pci_dev *pci_dev = efx->pci_dev;
1227	dma_addr_t dma_mask = efx->type->max_dma_mask;
1228	unsigned int mem_map_size = efx->type->mem_map_size(efx);
1229	int rc, bar;
1230
1231	netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
1232
1233	bar = efx->type->mem_bar;
1234
1235	rc = pci_enable_device(pci_dev);
1236	if (rc) {
1237		netif_err(efx, probe, efx->net_dev,
1238			  "failed to enable PCI device\n");
1239		goto fail1;
1240	}
1241
1242	pci_set_master(pci_dev);
1243
1244	/* Set the PCI DMA mask.  Try all possibilities from our
1245	 * genuine mask down to 32 bits, because some architectures
1246	 * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
1247	 * masks event though they reject 46 bit masks.
1248	 */
1249	while (dma_mask > 0x7fffffffUL) {
1250		rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
1251		if (rc == 0)
1252			break;
1253		dma_mask >>= 1;
1254	}
1255	if (rc) {
1256		netif_err(efx, probe, efx->net_dev,
1257			  "could not find a suitable DMA mask\n");
1258		goto fail2;
1259	}
1260	netif_dbg(efx, probe, efx->net_dev,
1261		  "using DMA mask %llx\n", (unsigned long long) dma_mask);
1262
1263	efx->membase_phys = pci_resource_start(efx->pci_dev, bar);
1264	rc = pci_request_region(pci_dev, bar, "sfc");
1265	if (rc) {
1266		netif_err(efx, probe, efx->net_dev,
1267			  "request for memory BAR failed\n");
1268		rc = -EIO;
1269		goto fail3;
1270	}
1271	efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
1272	if (!efx->membase) {
1273		netif_err(efx, probe, efx->net_dev,
1274			  "could not map memory BAR at %llx+%x\n",
1275			  (unsigned long long)efx->membase_phys, mem_map_size);
1276		rc = -ENOMEM;
1277		goto fail4;
1278	}
1279	netif_dbg(efx, probe, efx->net_dev,
1280		  "memory BAR at %llx+%x (virtual %p)\n",
1281		  (unsigned long long)efx->membase_phys, mem_map_size,
1282		  efx->membase);
1283
1284	return 0;
1285
1286 fail4:
1287	pci_release_region(efx->pci_dev, bar);
1288 fail3:
1289	efx->membase_phys = 0;
1290 fail2:
1291	pci_disable_device(efx->pci_dev);
1292 fail1:
1293	return rc;
1294}
1295
1296static void efx_fini_io(struct efx_nic *efx)
1297{
1298	int bar;
1299
1300	netif_dbg(efx, drv, efx->net_dev, "shutting down I/O\n");
1301
1302	if (efx->membase) {
1303		iounmap(efx->membase);
1304		efx->membase = NULL;
1305	}
1306
1307	if (efx->membase_phys) {
1308		bar = efx->type->mem_bar;
1309		pci_release_region(efx->pci_dev, bar);
1310		efx->membase_phys = 0;
1311	}
1312
1313	/* Don't disable bus-mastering if VFs are assigned */
1314	if (!pci_vfs_assigned(efx->pci_dev))
1315		pci_disable_device(efx->pci_dev);
1316}
1317
1318void efx_set_default_rx_indir_table(struct efx_nic *efx)
1319{
1320	size_t i;
1321
1322	for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
1323		efx->rx_indir_table[i] =
1324			ethtool_rxfh_indir_default(i, efx->rss_spread);
1325}
1326
1327static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
1328{
1329	cpumask_var_t thread_mask;
1330	unsigned int count;
1331	int cpu;
1332
1333	if (rss_cpus) {
1334		count = rss_cpus;
1335	} else {
1336		if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
1337			netif_warn(efx, probe, efx->net_dev,
1338				   "RSS disabled due to allocation failure\n");
1339			return 1;
1340		}
1341
1342		count = 0;
1343		for_each_online_cpu(cpu) {
1344			if (!cpumask_test_cpu(cpu, thread_mask)) {
1345				++count;
1346				cpumask_or(thread_mask, thread_mask,
1347					   topology_sibling_cpumask(cpu));
1348			}
1349		}
1350
1351		free_cpumask_var(thread_mask);
1352	}
1353
1354	/* If RSS is requested for the PF *and* VFs then we can't write RSS
1355	 * table entries that are inaccessible to VFs
1356	 */
1357#ifdef CONFIG_SFC_SRIOV
1358	if (efx->type->sriov_wanted) {
1359		if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
1360		    count > efx_vf_size(efx)) {
1361			netif_warn(efx, probe, efx->net_dev,
1362				   "Reducing number of RSS channels from %u to %u for "
1363				   "VF support. Increase vf-msix-limit to use more "
1364				   "channels on the PF.\n",
1365				   count, efx_vf_size(efx));
1366			count = efx_vf_size(efx);
1367		}
1368	}
1369#endif
1370
1371	return count;
1372}
1373
1374/* Probe the number and type of interrupts we are able to obtain, and
1375 * the resulting numbers of channels and RX queues.
1376 */
1377static int efx_probe_interrupts(struct efx_nic *efx)
1378{
1379	unsigned int extra_channels = 0;
1380	unsigned int i, j;
1381	int rc;
1382
1383	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
1384		if (efx->extra_channel_type[i])
1385			++extra_channels;
1386
1387	if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
1388		struct msix_entry xentries[EFX_MAX_CHANNELS];
1389		unsigned int n_channels;
1390
1391		n_channels = efx_wanted_parallelism(efx);
1392		if (efx_separate_tx_channels)
1393			n_channels *= 2;
1394		n_channels += extra_channels;
1395		n_channels = min(n_channels, efx->max_channels);
1396
1397		for (i = 0; i < n_channels; i++)
1398			xentries[i].entry = i;
1399		rc = pci_enable_msix_range(efx->pci_dev,
1400					   xentries, 1, n_channels);
1401		if (rc < 0) {
1402			/* Fall back to single channel MSI */
1403			efx->interrupt_mode = EFX_INT_MODE_MSI;
1404			netif_err(efx, drv, efx->net_dev,
1405				  "could not enable MSI-X\n");
1406		} else if (rc < n_channels) {
1407			netif_err(efx, drv, efx->net_dev,
1408				  "WARNING: Insufficient MSI-X vectors"
1409				  " available (%d < %u).\n", rc, n_channels);
1410			netif_err(efx, drv, efx->net_dev,
1411				  "WARNING: Performance may be reduced.\n");
1412			n_channels = rc;
1413		}
1414
1415		if (rc > 0) {
1416			efx->n_channels = n_channels;
1417			if (n_channels > extra_channels)
1418				n_channels -= extra_channels;
1419			if (efx_separate_tx_channels) {
1420				efx->n_tx_channels = min(max(n_channels / 2,
1421							     1U),
1422							 efx->max_tx_channels);
1423				efx->n_rx_channels = max(n_channels -
1424							 efx->n_tx_channels,
1425							 1U);
1426			} else {
1427				efx->n_tx_channels = min(n_channels,
1428							 efx->max_tx_channels);
1429				efx->n_rx_channels = n_channels;
1430			}
1431			for (i = 0; i < efx->n_channels; i++)
1432				efx_get_channel(efx, i)->irq =
1433					xentries[i].vector;
1434		}
1435	}
1436
1437	/* Try single interrupt MSI */
1438	if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1439		efx->n_channels = 1;
1440		efx->n_rx_channels = 1;
1441		efx->n_tx_channels = 1;
1442		rc = pci_enable_msi(efx->pci_dev);
1443		if (rc == 0) {
1444			efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
1445		} else {
1446			netif_err(efx, drv, efx->net_dev,
1447				  "could not enable MSI\n");
1448			efx->interrupt_mode = EFX_INT_MODE_LEGACY;
1449		}
1450	}
1451
1452	/* Assume legacy interrupts */
1453	if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1454		efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
1455		efx->n_rx_channels = 1;
1456		efx->n_tx_channels = 1;
1457		efx->legacy_irq = efx->pci_dev->irq;
1458	}
1459
1460	/* Assign extra channels if possible */
1461	j = efx->n_channels;
1462	for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
1463		if (!efx->extra_channel_type[i])
1464			continue;
1465		if (efx->interrupt_mode != EFX_INT_MODE_MSIX ||
1466		    efx->n_channels <= extra_channels) {
1467			efx->extra_channel_type[i]->handle_no_channel(efx);
1468		} else {
1469			--j;
1470			efx_get_channel(efx, j)->type =
1471				efx->extra_channel_type[i];
1472		}
1473	}
1474
1475	/* RSS might be usable on VFs even if it is disabled on the PF */
1476#ifdef CONFIG_SFC_SRIOV
1477	if (efx->type->sriov_wanted) {
1478		efx->rss_spread = ((efx->n_rx_channels > 1 ||
1479				    !efx->type->sriov_wanted(efx)) ?
1480				   efx->n_rx_channels : efx_vf_size(efx));
1481		return 0;
1482	}
1483#endif
1484	efx->rss_spread = efx->n_rx_channels;
1485
1486	return 0;
1487}
1488
1489static int efx_soft_enable_interrupts(struct efx_nic *efx)
1490{
1491	struct efx_channel *channel, *end_channel;
1492	int rc;
1493
1494	BUG_ON(efx->state == STATE_DISABLED);
1495
1496	efx->irq_soft_enabled = true;
1497	smp_wmb();
1498
1499	efx_for_each_channel(channel, efx) {
1500		if (!channel->type->keep_eventq) {
1501			rc = efx_init_eventq(channel);
1502			if (rc)
1503				goto fail;
1504		}
1505		efx_start_eventq(channel);
1506	}
1507
1508	efx_mcdi_mode_event(efx);
1509
1510	return 0;
1511fail:
1512	end_channel = channel;
1513	efx_for_each_channel(channel, efx) {
1514		if (channel == end_channel)
1515			break;
1516		efx_stop_eventq(channel);
1517		if (!channel->type->keep_eventq)
1518			efx_fini_eventq(channel);
1519	}
1520
1521	return rc;
1522}
1523
1524static void efx_soft_disable_interrupts(struct efx_nic *efx)
1525{
1526	struct efx_channel *channel;
1527
1528	if (efx->state == STATE_DISABLED)
1529		return;
1530
1531	efx_mcdi_mode_poll(efx);
1532
1533	efx->irq_soft_enabled = false;
1534	smp_wmb();
1535
1536	if (efx->legacy_irq)
1537		synchronize_irq(efx->legacy_irq);
1538
1539	efx_for_each_channel(channel, efx) {
1540		if (channel->irq)
1541			synchronize_irq(channel->irq);
1542
1543		efx_stop_eventq(channel);
1544		if (!channel->type->keep_eventq)
1545			efx_fini_eventq(channel);
1546	}
1547
1548	/* Flush the asynchronous MCDI request queue */
1549	efx_mcdi_flush_async(efx);
1550}
1551
1552static int efx_enable_interrupts(struct efx_nic *efx)
1553{
1554	struct efx_channel *channel, *end_channel;
1555	int rc;
1556
1557	BUG_ON(efx->state == STATE_DISABLED);
1558
1559	if (efx->eeh_disabled_legacy_irq) {
1560		enable_irq(efx->legacy_irq);
1561		efx->eeh_disabled_legacy_irq = false;
1562	}
1563
1564	efx->type->irq_enable_master(efx);
1565
1566	efx_for_each_channel(channel, efx) {
1567		if (channel->type->keep_eventq) {
1568			rc = efx_init_eventq(channel);
1569			if (rc)
1570				goto fail;
1571		}
1572	}
1573
1574	rc = efx_soft_enable_interrupts(efx);
1575	if (rc)
1576		goto fail;
1577
1578	return 0;
1579
1580fail:
1581	end_channel = channel;
1582	efx_for_each_channel(channel, efx) {
1583		if (channel == end_channel)
1584			break;
1585		if (channel->type->keep_eventq)
1586			efx_fini_eventq(channel);
1587	}
1588
1589	efx->type->irq_disable_non_ev(efx);
1590
1591	return rc;
1592}
1593
1594static void efx_disable_interrupts(struct efx_nic *efx)
1595{
1596	struct efx_channel *channel;
1597
1598	efx_soft_disable_interrupts(efx);
1599
1600	efx_for_each_channel(channel, efx) {
1601		if (channel->type->keep_eventq)
1602			efx_fini_eventq(channel);
1603	}
1604
1605	efx->type->irq_disable_non_ev(efx);
1606}
1607
1608static void efx_remove_interrupts(struct efx_nic *efx)
1609{
1610	struct efx_channel *channel;
1611
1612	/* Remove MSI/MSI-X interrupts */
1613	efx_for_each_channel(channel, efx)
1614		channel->irq = 0;
1615	pci_disable_msi(efx->pci_dev);
1616	pci_disable_msix(efx->pci_dev);
1617
1618	/* Remove legacy interrupt */
1619	efx->legacy_irq = 0;
1620}
1621
1622static void efx_set_channels(struct efx_nic *efx)
1623{
1624	struct efx_channel *channel;
1625	struct efx_tx_queue *tx_queue;
1626
1627	efx->tx_channel_offset =
1628		efx_separate_tx_channels ?
1629		efx->n_channels - efx->n_tx_channels : 0;
1630
1631	/* We need to mark which channels really have RX and TX
1632	 * queues, and adjust the TX queue numbers if we have separate
1633	 * RX-only and TX-only channels.
1634	 */
1635	efx_for_each_channel(channel, efx) {
1636		if (channel->channel < efx->n_rx_channels)
1637			channel->rx_queue.core_index = channel->channel;
1638		else
1639			channel->rx_queue.core_index = -1;
1640
1641		efx_for_each_channel_tx_queue(tx_queue, channel)
1642			tx_queue->queue -= (efx->tx_channel_offset *
1643					    EFX_TXQ_TYPES);
1644	}
1645}
1646
1647static int efx_probe_nic(struct efx_nic *efx)
1648{
1649	int rc;
1650
1651	netif_dbg(efx, probe, efx->net_dev, "creating NIC\n");
1652
1653	/* Carry out hardware-type specific initialisation */
1654	rc = efx->type->probe(efx);
1655	if (rc)
1656		return rc;
1657
1658	do {
1659		if (!efx->max_channels || !efx->max_tx_channels) {
1660			netif_err(efx, drv, efx->net_dev,
1661				  "Insufficient resources to allocate"
1662				  " any channels\n");
1663			rc = -ENOSPC;
1664			goto fail1;
1665		}
1666
1667		/* Determine the number of channels and queues by trying
1668		 * to hook in MSI-X interrupts.
1669		 */
1670		rc = efx_probe_interrupts(efx);
1671		if (rc)
1672			goto fail1;
1673
1674		efx_set_channels(efx);
1675
1676		/* dimension_resources can fail with EAGAIN */
1677		rc = efx->type->dimension_resources(efx);
1678		if (rc != 0 && rc != -EAGAIN)
1679			goto fail2;
1680
1681		if (rc == -EAGAIN)
1682			/* try again with new max_channels */
1683			efx_remove_interrupts(efx);
1684
1685	} while (rc == -EAGAIN);
1686
1687	if (efx->n_channels > 1)
1688		netdev_rss_key_fill(&efx->rx_hash_key,
1689				    sizeof(efx->rx_hash_key));
1690	efx_set_default_rx_indir_table(efx);
1691
1692	netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
1693	netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
1694
1695	/* Initialise the interrupt moderation settings */
1696	efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true,
1697				true);
1698
1699	return 0;
1700
1701fail2:
1702	efx_remove_interrupts(efx);
1703fail1:
1704	efx->type->remove(efx);
1705	return rc;
1706}
1707
1708static void efx_remove_nic(struct efx_nic *efx)
1709{
1710	netif_dbg(efx, drv, efx->net_dev, "destroying NIC\n");
1711
1712	efx_remove_interrupts(efx);
1713	efx->type->remove(efx);
1714}
1715
1716static int efx_probe_filters(struct efx_nic *efx)
1717{
1718	int rc;
1719
1720	spin_lock_init(&efx->filter_lock);
1721	init_rwsem(&efx->filter_sem);
1722	down_write(&efx->filter_sem);
1723	rc = efx->type->filter_table_probe(efx);
1724	if (rc)
1725		goto out_unlock;
1726
1727#ifdef CONFIG_RFS_ACCEL
1728	if (efx->type->offload_features & NETIF_F_NTUPLE) {
1729		efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
1730					   sizeof(*efx->rps_flow_id),
1731					   GFP_KERNEL);
1732		if (!efx->rps_flow_id) {
1733			efx->type->filter_table_remove(efx);
1734			rc = -ENOMEM;
1735			goto out_unlock;
1736		}
1737	}
1738#endif
1739out_unlock:
1740	up_write(&efx->filter_sem);
1741	return rc;
1742}
1743
1744static void efx_remove_filters(struct efx_nic *efx)
1745{
1746#ifdef CONFIG_RFS_ACCEL
1747	kfree(efx->rps_flow_id);
1748#endif
1749	down_write(&efx->filter_sem);
1750	efx->type->filter_table_remove(efx);
1751	up_write(&efx->filter_sem);
1752}
1753
1754static void efx_restore_filters(struct efx_nic *efx)
1755{
1756	down_read(&efx->filter_sem);
1757	efx->type->filter_table_restore(efx);
1758	up_read(&efx->filter_sem);
1759}
1760
1761/**************************************************************************
1762 *
1763 * NIC startup/shutdown
1764 *
1765 *************************************************************************/
1766
1767static int efx_probe_all(struct efx_nic *efx)
1768{
1769	int rc;
1770
1771	rc = efx_probe_nic(efx);
1772	if (rc) {
1773		netif_err(efx, probe, efx->net_dev, "failed to create NIC\n");
1774		goto fail1;
1775	}
1776
1777	rc = efx_probe_port(efx);
1778	if (rc) {
1779		netif_err(efx, probe, efx->net_dev, "failed to create port\n");
1780		goto fail2;
1781	}
1782
1783	BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT);
1784	if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) {
1785		rc = -EINVAL;
1786		goto fail3;
1787	}
1788	efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE;
1789
1790#ifdef CONFIG_SFC_SRIOV
1791	rc = efx->type->vswitching_probe(efx);
1792	if (rc) /* not fatal; the PF will still work fine */
1793		netif_warn(efx, probe, efx->net_dev,
1794			   "failed to setup vswitching rc=%d;"
1795			   " VFs may not function\n", rc);
1796#endif
1797
1798	rc = efx_probe_filters(efx);
1799	if (rc) {
1800		netif_err(efx, probe, efx->net_dev,
1801			  "failed to create filter tables\n");
1802		goto fail4;
1803	}
1804
1805	rc = efx_probe_channels(efx);
1806	if (rc)
1807		goto fail5;
1808
1809	return 0;
1810
1811 fail5:
1812	efx_remove_filters(efx);
1813 fail4:
1814#ifdef CONFIG_SFC_SRIOV
1815	efx->type->vswitching_remove(efx);
1816#endif
1817 fail3:
1818	efx_remove_port(efx);
1819 fail2:
1820	efx_remove_nic(efx);
1821 fail1:
1822	return rc;
1823}
1824
1825/* If the interface is supposed to be running but is not, start
1826 * the hardware and software data path, regular activity for the port
1827 * (MAC statistics, link polling, etc.) and schedule the port to be
1828 * reconfigured.  Interrupts must already be enabled.  This function
1829 * is safe to call multiple times, so long as the NIC is not disabled.
1830 * Requires the RTNL lock.
1831 */
1832static void efx_start_all(struct efx_nic *efx)
1833{
1834	EFX_ASSERT_RESET_SERIALISED(efx);
1835	BUG_ON(efx->state == STATE_DISABLED);
1836
1837	/* Check that it is appropriate to restart the interface. All
1838	 * of these flags are safe to read under just the rtnl lock */
1839	if (efx->port_enabled || !netif_running(efx->net_dev) ||
1840	    efx->reset_pending)
1841		return;
1842
1843	efx_start_port(efx);
1844	efx_start_datapath(efx);
1845
1846	/* Start the hardware monitor if there is one */
1847	if (efx->type->monitor != NULL)
1848		queue_delayed_work(efx->workqueue, &efx->monitor_work,
1849				   efx_monitor_interval);
1850
1851	/* If link state detection is normally event-driven, we have
1852	 * to poll now because we could have missed a change
1853	 */
1854	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1855		mutex_lock(&efx->mac_lock);
1856		if (efx->phy_op->poll(efx))
1857			efx_link_status_changed(efx);
1858		mutex_unlock(&efx->mac_lock);
1859	}
1860
1861	efx->type->start_stats(efx);
1862	efx->type->pull_stats(efx);
1863	spin_lock_bh(&efx->stats_lock);
1864	efx->type->update_stats(efx, NULL, NULL);
1865	spin_unlock_bh(&efx->stats_lock);
1866}
1867
1868/* Quiesce the hardware and software data path, and regular activity
1869 * for the port without bringing the link down.  Safe to call multiple
1870 * times with the NIC in almost any state, but interrupts should be
1871 * enabled.  Requires the RTNL lock.
1872 */
1873static void efx_stop_all(struct efx_nic *efx)
1874{
1875	EFX_ASSERT_RESET_SERIALISED(efx);
1876
1877	/* port_enabled can be read safely under the rtnl lock */
1878	if (!efx->port_enabled)
1879		return;
1880
1881	/* update stats before we go down so we can accurately count
1882	 * rx_nodesc_drops
1883	 */
1884	efx->type->pull_stats(efx);
1885	spin_lock_bh(&efx->stats_lock);
1886	efx->type->update_stats(efx, NULL, NULL);
1887	spin_unlock_bh(&efx->stats_lock);
1888	efx->type->stop_stats(efx);
1889	efx_stop_port(efx);
1890
1891	/* Stop the kernel transmit interface.  This is only valid if
1892	 * the device is stopped or detached; otherwise the watchdog
1893	 * may fire immediately.
1894	 */
1895	WARN_ON(netif_running(efx->net_dev) &&
1896		netif_device_present(efx->net_dev));
1897	netif_tx_disable(efx->net_dev);
1898
1899	efx_stop_datapath(efx);
1900}
1901
1902static void efx_remove_all(struct efx_nic *efx)
1903{
1904	efx_remove_channels(efx);
1905	efx_remove_filters(efx);
1906#ifdef CONFIG_SFC_SRIOV
1907	efx->type->vswitching_remove(efx);
1908#endif
1909	efx_remove_port(efx);
1910	efx_remove_nic(efx);
1911}
1912
1913/**************************************************************************
1914 *
1915 * Interrupt moderation
1916 *
1917 **************************************************************************/
1918
1919static unsigned int irq_mod_ticks(unsigned int usecs, unsigned int quantum_ns)
1920{
1921	if (usecs == 0)
1922		return 0;
1923	if (usecs * 1000 < quantum_ns)
1924		return 1; /* never round down to 0 */
1925	return usecs * 1000 / quantum_ns;
1926}
1927
1928/* Set interrupt moderation parameters */
1929int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
1930			    unsigned int rx_usecs, bool rx_adaptive,
1931			    bool rx_may_override_tx)
1932{
1933	struct efx_channel *channel;
1934	unsigned int irq_mod_max = DIV_ROUND_UP(efx->type->timer_period_max *
1935						efx->timer_quantum_ns,
1936						1000);
1937	unsigned int tx_ticks;
1938	unsigned int rx_ticks;
1939
1940	EFX_ASSERT_RESET_SERIALISED(efx);
1941
1942	if (tx_usecs > irq_mod_max || rx_usecs > irq_mod_max)
1943		return -EINVAL;
1944
1945	tx_ticks = irq_mod_ticks(tx_usecs, efx->timer_quantum_ns);
1946	rx_ticks = irq_mod_ticks(rx_usecs, efx->timer_quantum_ns);
1947
1948	if (tx_ticks != rx_ticks && efx->tx_channel_offset == 0 &&
1949	    !rx_may_override_tx) {
1950		netif_err(efx, drv, efx->net_dev, "Channels are shared. "
1951			  "RX and TX IRQ moderation must be equal\n");
1952		return -EINVAL;
1953	}
1954
1955	efx->irq_rx_adaptive = rx_adaptive;
1956	efx->irq_rx_moderation = rx_ticks;
1957	efx_for_each_channel(channel, efx) {
1958		if (efx_channel_has_rx_queue(channel))
1959			channel->irq_moderation = rx_ticks;
1960		else if (efx_channel_has_tx_queues(channel))
1961			channel->irq_moderation = tx_ticks;
1962	}
1963
1964	return 0;
1965}
1966
1967void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
1968			    unsigned int *rx_usecs, bool *rx_adaptive)
1969{
1970	/* We must round up when converting ticks to microseconds
1971	 * because we round down when converting the other way.
1972	 */
1973
1974	*rx_adaptive = efx->irq_rx_adaptive;
1975	*rx_usecs = DIV_ROUND_UP(efx->irq_rx_moderation *
1976				 efx->timer_quantum_ns,
1977				 1000);
1978
1979	/* If channels are shared between RX and TX, so is IRQ
1980	 * moderation.  Otherwise, IRQ moderation is the same for all
1981	 * TX channels and is not adaptive.
1982	 */
1983	if (efx->tx_channel_offset == 0)
1984		*tx_usecs = *rx_usecs;
1985	else
1986		*tx_usecs = DIV_ROUND_UP(
1987			efx->channel[efx->tx_channel_offset]->irq_moderation *
1988			efx->timer_quantum_ns,
1989			1000);
1990}
1991
1992/**************************************************************************
1993 *
1994 * Hardware monitor
1995 *
1996 **************************************************************************/
1997
1998/* Run periodically off the general workqueue */
1999static void efx_monitor(struct work_struct *data)
2000{
2001	struct efx_nic *efx = container_of(data, struct efx_nic,
2002					   monitor_work.work);
2003
2004	netif_vdbg(efx, timer, efx->net_dev,
2005		   "hardware monitor executing on CPU %d\n",
2006		   raw_smp_processor_id());
2007	BUG_ON(efx->type->monitor == NULL);
2008
2009	/* If the mac_lock is already held then it is likely a port
2010	 * reconfiguration is already in place, which will likely do
2011	 * most of the work of monitor() anyway. */
2012	if (mutex_trylock(&efx->mac_lock)) {
2013		if (efx->port_enabled)
2014			efx->type->monitor(efx);
2015		mutex_unlock(&efx->mac_lock);
2016	}
2017
2018	queue_delayed_work(efx->workqueue, &efx->monitor_work,
2019			   efx_monitor_interval);
2020}
2021
2022/**************************************************************************
2023 *
2024 * ioctls
2025 *
2026 *************************************************************************/
2027
2028/* Net device ioctl
2029 * Context: process, rtnl_lock() held.
2030 */
2031static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
2032{
2033	struct efx_nic *efx = netdev_priv(net_dev);
2034	struct mii_ioctl_data *data = if_mii(ifr);
2035
2036	if (cmd == SIOCSHWTSTAMP)
2037		return efx_ptp_set_ts_config(efx, ifr);
2038	if (cmd == SIOCGHWTSTAMP)
2039		return efx_ptp_get_ts_config(efx, ifr);
2040
2041	/* Convert phy_id from older PRTAD/DEVAD format */
2042	if ((cmd == SIOCGMIIREG || cmd == SIOCSMIIREG) &&
2043	    (data->phy_id & 0xfc00) == 0x0400)
2044		data->phy_id ^= MDIO_PHY_ID_C45 | 0x0400;
2045
2046	return mdio_mii_ioctl(&efx->mdio, data, cmd);
2047}
2048
2049/**************************************************************************
2050 *
2051 * NAPI interface
2052 *
2053 **************************************************************************/
2054
2055static void efx_init_napi_channel(struct efx_channel *channel)
2056{
2057	struct efx_nic *efx = channel->efx;
2058
2059	channel->napi_dev = efx->net_dev;
2060	netif_napi_add(channel->napi_dev, &channel->napi_str,
2061		       efx_poll, napi_weight);
2062	napi_hash_add(&channel->napi_str);
2063	efx_channel_busy_poll_init(channel);
2064}
2065
2066static void efx_init_napi(struct efx_nic *efx)
2067{
2068	struct efx_channel *channel;
2069
2070	efx_for_each_channel(channel, efx)
2071		efx_init_napi_channel(channel);
2072}
2073
2074static void efx_fini_napi_channel(struct efx_channel *channel)
2075{
2076	if (channel->napi_dev) {
2077		netif_napi_del(&channel->napi_str);
2078		napi_hash_del(&channel->napi_str);
2079	}
2080	channel->napi_dev = NULL;
2081}
2082
2083static void efx_fini_napi(struct efx_nic *efx)
2084{
2085	struct efx_channel *channel;
2086
2087	efx_for_each_channel(channel, efx)
2088		efx_fini_napi_channel(channel);
2089}
2090
2091/**************************************************************************
2092 *
2093 * Kernel netpoll interface
2094 *
2095 *************************************************************************/
2096
2097#ifdef CONFIG_NET_POLL_CONTROLLER
2098
2099/* Although in the common case interrupts will be disabled, this is not
2100 * guaranteed. However, all our work happens inside the NAPI callback,
2101 * so no locking is required.
2102 */
2103static void efx_netpoll(struct net_device *net_dev)
2104{
2105	struct efx_nic *efx = netdev_priv(net_dev);
2106	struct efx_channel *channel;
2107
2108	efx_for_each_channel(channel, efx)
2109		efx_schedule_channel(channel);
2110}
2111
2112#endif
2113
2114#ifdef CONFIG_NET_RX_BUSY_POLL
2115static int efx_busy_poll(struct napi_struct *napi)
2116{
2117	struct efx_channel *channel =
2118		container_of(napi, struct efx_channel, napi_str);
2119	struct efx_nic *efx = channel->efx;
2120	int budget = 4;
2121	int old_rx_packets, rx_packets;
2122
2123	if (!netif_running(efx->net_dev))
2124		return LL_FLUSH_FAILED;
2125
2126	if (!efx_channel_try_lock_poll(channel))
2127		return LL_FLUSH_BUSY;
2128
2129	old_rx_packets = channel->rx_queue.rx_packets;
2130	efx_process_channel(channel, budget);
2131
2132	rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
2133
2134	/* There is no race condition with NAPI here.
2135	 * NAPI will automatically be rescheduled if it yielded during busy
2136	 * polling, because it was not able to take the lock and thus returned
2137	 * the full budget.
2138	 */
2139	efx_channel_unlock_poll(channel);
2140
2141	return rx_packets;
2142}
2143#endif
2144
2145/**************************************************************************
2146 *
2147 * Kernel net device interface
2148 *
2149 *************************************************************************/
2150
2151/* Context: process, rtnl_lock() held. */
2152int efx_net_open(struct net_device *net_dev)
2153{
2154	struct efx_nic *efx = netdev_priv(net_dev);
2155	int rc;
2156
2157	netif_dbg(efx, ifup, efx->net_dev, "opening device on CPU %d\n",
2158		  raw_smp_processor_id());
2159
2160	rc = efx_check_disabled(efx);
2161	if (rc)
2162		return rc;
2163	if (efx->phy_mode & PHY_MODE_SPECIAL)
2164		return -EBUSY;
2165	if (efx_mcdi_poll_reboot(efx) && efx_reset(efx, RESET_TYPE_ALL))
2166		return -EIO;
2167
2168	/* Notify the kernel of the link state polled during driver load,
2169	 * before the monitor starts running */
2170	efx_link_status_changed(efx);
2171
2172	efx_start_all(efx);
2173	efx_selftest_async_start(efx);
2174	return 0;
2175}
2176
2177/* Context: process, rtnl_lock() held.
2178 * Note that the kernel will ignore our return code; this method
2179 * should really be a void.
2180 */
2181int efx_net_stop(struct net_device *net_dev)
2182{
2183	struct efx_nic *efx = netdev_priv(net_dev);
2184
2185	netif_dbg(efx, ifdown, efx->net_dev, "closing on CPU %d\n",
2186		  raw_smp_processor_id());
2187
2188	/* Stop the device and flush all the channels */
2189	efx_stop_all(efx);
2190
2191	return 0;
2192}
2193
2194/* Context: process, dev_base_lock or RTNL held, non-blocking. */
2195static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
2196					       struct rtnl_link_stats64 *stats)
2197{
2198	struct efx_nic *efx = netdev_priv(net_dev);
2199
2200	spin_lock_bh(&efx->stats_lock);
2201	efx->type->update_stats(efx, NULL, stats);
2202	spin_unlock_bh(&efx->stats_lock);
2203
2204	return stats;
2205}
2206
2207/* Context: netif_tx_lock held, BHs disabled. */
2208static void efx_watchdog(struct net_device *net_dev)
2209{
2210	struct efx_nic *efx = netdev_priv(net_dev);
2211
2212	netif_err(efx, tx_err, efx->net_dev,
2213		  "TX stuck with port_enabled=%d: resetting channels\n",
2214		  efx->port_enabled);
2215
2216	efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
2217}
2218
2219
2220/* Context: process, rtnl_lock() held. */
2221static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
2222{
2223	struct efx_nic *efx = netdev_priv(net_dev);
2224	int rc;
2225
2226	rc = efx_check_disabled(efx);
2227	if (rc)
2228		return rc;
2229	if (new_mtu > EFX_MAX_MTU)
2230		return -EINVAL;
2231
2232	netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
2233
2234	efx_device_detach_sync(efx);
2235	efx_stop_all(efx);
2236
2237	mutex_lock(&efx->mac_lock);
2238	net_dev->mtu = new_mtu;
2239	efx_mac_reconfigure(efx);
2240	mutex_unlock(&efx->mac_lock);
2241
2242	efx_start_all(efx);
2243	netif_device_attach(efx->net_dev);
2244	return 0;
2245}
2246
2247static int efx_set_mac_address(struct net_device *net_dev, void *data)
2248{
2249	struct efx_nic *efx = netdev_priv(net_dev);
2250	struct sockaddr *addr = data;
2251	u8 *new_addr = addr->sa_data;
2252	u8 old_addr[6];
2253	int rc;
2254
2255	if (!is_valid_ether_addr(new_addr)) {
2256		netif_err(efx, drv, efx->net_dev,
2257			  "invalid ethernet MAC address requested: %pM\n",
2258			  new_addr);
2259		return -EADDRNOTAVAIL;
2260	}
2261
2262	/* save old address */
2263	ether_addr_copy(old_addr, net_dev->dev_addr);
2264	ether_addr_copy(net_dev->dev_addr, new_addr);
2265	if (efx->type->set_mac_address) {
2266		rc = efx->type->set_mac_address(efx);
2267		if (rc) {
2268			ether_addr_copy(net_dev->dev_addr, old_addr);
2269			return rc;
2270		}
2271	}
2272
2273	/* Reconfigure the MAC */
2274	mutex_lock(&efx->mac_lock);
2275	efx_mac_reconfigure(efx);
2276	mutex_unlock(&efx->mac_lock);
2277
2278	return 0;
2279}
2280
2281/* Context: netif_addr_lock held, BHs disabled. */
2282static void efx_set_rx_mode(struct net_device *net_dev)
2283{
2284	struct efx_nic *efx = netdev_priv(net_dev);
2285
2286	if (efx->port_enabled)
2287		queue_work(efx->workqueue, &efx->mac_work);
2288	/* Otherwise efx_start_port() will do this */
2289}
2290
2291static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
2292{
2293	struct efx_nic *efx = netdev_priv(net_dev);
2294
2295	/* If disabling RX n-tuple filtering, clear existing filters */
2296	if (net_dev->features & ~data & NETIF_F_NTUPLE)
2297		return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
2298
2299	return 0;
2300}
2301
2302static const struct net_device_ops efx_netdev_ops = {
2303	.ndo_open		= efx_net_open,
2304	.ndo_stop		= efx_net_stop,
2305	.ndo_get_stats64	= efx_net_stats,
2306	.ndo_tx_timeout		= efx_watchdog,
2307	.ndo_start_xmit		= efx_hard_start_xmit,
2308	.ndo_validate_addr	= eth_validate_addr,
2309	.ndo_do_ioctl		= efx_ioctl,
2310	.ndo_change_mtu		= efx_change_mtu,
2311	.ndo_set_mac_address	= efx_set_mac_address,
2312	.ndo_set_rx_mode	= efx_set_rx_mode,
2313	.ndo_set_features	= efx_set_features,
2314#ifdef CONFIG_SFC_SRIOV
2315	.ndo_set_vf_mac		= efx_sriov_set_vf_mac,
2316	.ndo_set_vf_vlan	= efx_sriov_set_vf_vlan,
2317	.ndo_set_vf_spoofchk	= efx_sriov_set_vf_spoofchk,
2318	.ndo_get_vf_config	= efx_sriov_get_vf_config,
2319	.ndo_set_vf_link_state  = efx_sriov_set_vf_link_state,
2320	.ndo_get_phys_port_id   = efx_sriov_get_phys_port_id,
2321#endif
2322#ifdef CONFIG_NET_POLL_CONTROLLER
2323	.ndo_poll_controller = efx_netpoll,
2324#endif
2325	.ndo_setup_tc		= efx_setup_tc,
2326#ifdef CONFIG_NET_RX_BUSY_POLL
2327	.ndo_busy_poll		= efx_busy_poll,
2328#endif
2329#ifdef CONFIG_RFS_ACCEL
2330	.ndo_rx_flow_steer	= efx_filter_rfs,
2331#endif
2332};
2333
2334static void efx_update_name(struct efx_nic *efx)
2335{
2336	strcpy(efx->name, efx->net_dev->name);
2337	efx_mtd_rename(efx);
2338	efx_set_channel_names(efx);
2339}
2340
2341static int efx_netdev_event(struct notifier_block *this,
2342			    unsigned long event, void *ptr)
2343{
2344	struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
2345
2346	if ((net_dev->netdev_ops == &efx_netdev_ops) &&
2347	    event == NETDEV_CHANGENAME)
2348		efx_update_name(netdev_priv(net_dev));
2349
2350	return NOTIFY_DONE;
2351}
2352
2353static struct notifier_block efx_netdev_notifier = {
2354	.notifier_call = efx_netdev_event,
2355};
2356
2357static ssize_t
2358show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
2359{
2360	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2361	return sprintf(buf, "%d\n", efx->phy_type);
2362}
2363static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
2364
2365#ifdef CONFIG_SFC_MCDI_LOGGING
2366static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
2367			     char *buf)
2368{
2369	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2370	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2371
2372	return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
2373}
2374static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
2375			    const char *buf, size_t count)
2376{
2377	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
2378	struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
2379	bool enable = count > 0 && *buf != '0';
2380
2381	mcdi->logging_enabled = enable;
2382	return count;
2383}
2384static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
2385#endif
2386
2387static int efx_register_netdev(struct efx_nic *efx)
2388{
2389	struct net_device *net_dev = efx->net_dev;
2390	struct efx_channel *channel;
2391	int rc;
2392
2393	net_dev->watchdog_timeo = 5 * HZ;
2394	net_dev->irq = efx->pci_dev->irq;
2395	net_dev->netdev_ops = &efx_netdev_ops;
2396	if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
2397		net_dev->priv_flags |= IFF_UNICAST_FLT;
2398	net_dev->ethtool_ops = &efx_ethtool_ops;
2399	net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
2400
2401	rtnl_lock();
2402
2403	/* Enable resets to be scheduled and check whether any were
2404	 * already requested.  If so, the NIC is probably hosed so we
2405	 * abort.
2406	 */
2407	efx->state = STATE_READY;
2408	smp_mb(); /* ensure we change state before checking reset_pending */
2409	if (efx->reset_pending) {
2410		netif_err(efx, probe, efx->net_dev,
2411			  "aborting probe due to scheduled reset\n");
2412		rc = -EIO;
2413		goto fail_locked;
2414	}
2415
2416	rc = dev_alloc_name(net_dev, net_dev->name);
2417	if (rc < 0)
2418		goto fail_locked;
2419	efx_update_name(efx);
2420
2421	/* Always start with carrier off; PHY events will detect the link */
2422	netif_carrier_off(net_dev);
2423
2424	rc = register_netdevice(net_dev);
2425	if (rc)
2426		goto fail_locked;
2427
2428	efx_for_each_channel(channel, efx) {
2429		struct efx_tx_queue *tx_queue;
2430		efx_for_each_channel_tx_queue(tx_queue, channel)
2431			efx_init_tx_queue_core_txq(tx_queue);
2432	}
2433
2434	efx_associate(efx);
2435
2436	rtnl_unlock();
2437
2438	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2439	if (rc) {
2440		netif_err(efx, drv, efx->net_dev,
2441			  "failed to init net dev attributes\n");
2442		goto fail_registered;
2443	}
2444#ifdef CONFIG_SFC_MCDI_LOGGING
2445	rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2446	if (rc) {
2447		netif_err(efx, drv, efx->net_dev,
2448			  "failed to init net dev attributes\n");
2449		goto fail_attr_mcdi_logging;
2450	}
2451#endif
2452
2453	return 0;
2454
2455#ifdef CONFIG_SFC_MCDI_LOGGING
2456fail_attr_mcdi_logging:
2457	device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2458#endif
2459fail_registered:
2460	rtnl_lock();
2461	efx_dissociate(efx);
2462	unregister_netdevice(net_dev);
2463fail_locked:
2464	efx->state = STATE_UNINIT;
2465	rtnl_unlock();
2466	netif_err(efx, drv, efx->net_dev, "could not register net dev\n");
2467	return rc;
2468}
2469
2470static void efx_unregister_netdev(struct efx_nic *efx)
2471{
2472	if (!efx->net_dev)
2473		return;
2474
2475	BUG_ON(netdev_priv(efx->net_dev) != efx);
2476
2477	if (efx_dev_registered(efx)) {
2478		strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
2479#ifdef CONFIG_SFC_MCDI_LOGGING
2480		device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
2481#endif
2482		device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
2483		unregister_netdev(efx->net_dev);
2484	}
2485}
2486
2487/**************************************************************************
2488 *
2489 * Device reset and suspend
2490 *
2491 **************************************************************************/
2492
2493/* Tears down the entire software state and most of the hardware state
2494 * before reset.  */
2495void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2496{
2497	EFX_ASSERT_RESET_SERIALISED(efx);
2498
2499	if (method == RESET_TYPE_MCDI_TIMEOUT)
2500		efx->type->prepare_flr(efx);
2501
2502	efx_stop_all(efx);
2503	efx_disable_interrupts(efx);
2504
2505	mutex_lock(&efx->mac_lock);
2506	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2507	    method != RESET_TYPE_DATAPATH)
2508		efx->phy_op->fini(efx);
2509	efx->type->fini(efx);
2510}
2511
2512/* This function will always ensure that the locks acquired in
2513 * efx_reset_down() are released. A failure return code indicates
2514 * that we were unable to reinitialise the hardware, and the
2515 * driver should be disabled. If ok is false, then the rx and tx
2516 * engines are not restarted, pending a RESET_DISABLE. */
2517int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2518{
2519	int rc;
2520
2521	EFX_ASSERT_RESET_SERIALISED(efx);
2522
2523	if (method == RESET_TYPE_MCDI_TIMEOUT)
2524		efx->type->finish_flr(efx);
2525
2526	/* Ensure that SRAM is initialised even if we're disabling the device */
2527	rc = efx->type->init(efx);
2528	if (rc) {
2529		netif_err(efx, drv, efx->net_dev, "failed to initialise NIC\n");
2530		goto fail;
2531	}
2532
2533	if (!ok)
2534		goto fail;
2535
2536	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2537	    method != RESET_TYPE_DATAPATH) {
2538		rc = efx->phy_op->init(efx);
2539		if (rc)
2540			goto fail;
2541		rc = efx->phy_op->reconfigure(efx);
2542		if (rc && rc != -EPERM)
2543			netif_err(efx, drv, efx->net_dev,
2544				  "could not restore PHY settings\n");
2545	}
2546
2547	rc = efx_enable_interrupts(efx);
2548	if (rc)
2549		goto fail;
2550
2551#ifdef CONFIG_SFC_SRIOV
2552	rc = efx->type->vswitching_restore(efx);
2553	if (rc) /* not fatal; the PF will still work fine */
2554		netif_warn(efx, probe, efx->net_dev,
2555			   "failed to restore vswitching rc=%d;"
2556			   " VFs may not function\n", rc);
2557#endif
2558
2559	down_read(&efx->filter_sem);
2560	efx_restore_filters(efx);
2561	up_read(&efx->filter_sem);
2562	if (efx->type->sriov_reset)
2563		efx->type->sriov_reset(efx);
2564
2565	mutex_unlock(&efx->mac_lock);
2566
2567	efx_start_all(efx);
2568
2569	return 0;
2570
2571fail:
2572	efx->port_initialized = false;
2573
2574	mutex_unlock(&efx->mac_lock);
2575
2576	return rc;
2577}
2578
2579/* Reset the NIC using the specified method.  Note that the reset may
2580 * fail, in which case the card will be left in an unusable state.
2581 *
2582 * Caller must hold the rtnl_lock.
2583 */
2584int efx_reset(struct efx_nic *efx, enum reset_type method)
2585{
2586	int rc, rc2;
2587	bool disabled;
2588
2589	netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
2590		   RESET_TYPE(method));
2591
2592	efx_device_detach_sync(efx);
2593	efx_reset_down(efx, method);
2594
2595	rc = efx->type->reset(efx, method);
2596	if (rc) {
2597		netif_err(efx, drv, efx->net_dev, "failed to reset hardware\n");
2598		goto out;
2599	}
2600
2601	/* Clear flags for the scopes we covered.  We assume the NIC and
2602	 * driver are now quiescent so that there is no race here.
2603	 */
2604	if (method < RESET_TYPE_MAX_METHOD)
2605		efx->reset_pending &= -(1 << (method + 1));
2606	else /* it doesn't fit into the well-ordered scope hierarchy */
2607		__clear_bit(method, &efx->reset_pending);
2608
2609	/* Reinitialise bus-mastering, which may have been turned off before
2610	 * the reset was scheduled. This is still appropriate, even in the
2611	 * RESET_TYPE_DISABLE since this driver generally assumes the hardware
2612	 * can respond to requests. */
2613	pci_set_master(efx->pci_dev);
2614
2615out:
2616	/* Leave device stopped if necessary */
2617	disabled = rc ||
2618		method == RESET_TYPE_DISABLE ||
2619		method == RESET_TYPE_RECOVER_OR_DISABLE;
2620	rc2 = efx_reset_up(efx, method, !disabled);
2621	if (rc2) {
2622		disabled = true;
2623		if (!rc)
2624			rc = rc2;
2625	}
2626
2627	if (disabled) {
2628		dev_close(efx->net_dev);
2629		netif_err(efx, drv, efx->net_dev, "has been disabled\n");
2630		efx->state = STATE_DISABLED;
2631	} else {
2632		netif_dbg(efx, drv, efx->net_dev, "reset complete\n");
2633		netif_device_attach(efx->net_dev);
2634	}
2635	return rc;
2636}
2637
2638/* Try recovery mechanisms.
2639 * For now only EEH is supported.
2640 * Returns 0 if the recovery mechanisms are unsuccessful.
2641 * Returns a non-zero value otherwise.
2642 */
2643int efx_try_recovery(struct efx_nic *efx)
2644{
2645#ifdef CONFIG_EEH
2646	/* A PCI error can occur and not be seen by EEH because nothing
2647	 * happens on the PCI bus. In this case the driver may fail and
2648	 * schedule a 'recover or reset', leading to this recovery handler.
2649	 * Manually call the eeh failure check function.
2650	 */
2651	struct eeh_dev *eehdev = pci_dev_to_eeh_dev(efx->pci_dev);
2652	if (eeh_dev_check_failure(eehdev)) {
2653		/* The EEH mechanisms will handle the error and reset the
2654		 * device if necessary.
2655		 */
2656		return 1;
2657	}
2658#endif
2659	return 0;
2660}
2661
2662static void efx_wait_for_bist_end(struct efx_nic *efx)
2663{
2664	int i;
2665
2666	for (i = 0; i < BIST_WAIT_DELAY_COUNT; ++i) {
2667		if (efx_mcdi_poll_reboot(efx))
2668			goto out;
2669		msleep(BIST_WAIT_DELAY_MS);
2670	}
2671
2672	netif_err(efx, drv, efx->net_dev, "Warning: No MC reboot after BIST mode\n");
2673out:
2674	/* Either way unset the BIST flag. If we found no reboot we probably
2675	 * won't recover, but we should try.
2676	 */
2677	efx->mc_bist_for_other_fn = false;
2678}
2679
2680/* The worker thread exists so that code that cannot sleep can
2681 * schedule a reset for later.
2682 */
2683static void efx_reset_work(struct work_struct *data)
2684{
2685	struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2686	unsigned long pending;
2687	enum reset_type method;
2688
2689	pending = ACCESS_ONCE(efx->reset_pending);
2690	method = fls(pending) - 1;
2691
2692	if (method == RESET_TYPE_MC_BIST)
2693		efx_wait_for_bist_end(efx);
2694
2695	if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2696	     method == RESET_TYPE_RECOVER_OR_ALL) &&
2697	    efx_try_recovery(efx))
2698		return;
2699
2700	if (!pending)
2701		return;
2702
2703	rtnl_lock();
2704
2705	/* We checked the state in efx_schedule_reset() but it may
2706	 * have changed by now.  Now that we have the RTNL lock,
2707	 * it cannot change again.
2708	 */
2709	if (efx->state == STATE_READY)
2710		(void)efx_reset(efx, method);
2711
2712	rtnl_unlock();
2713}
2714
2715void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2716{
2717	enum reset_type method;
2718
2719	if (efx->state == STATE_RECOVERY) {
2720		netif_dbg(efx, drv, efx->net_dev,
2721			  "recovering: skip scheduling %s reset\n",
2722			  RESET_TYPE(type));
2723		return;
2724	}
2725
2726	switch (type) {
2727	case RESET_TYPE_INVISIBLE:
2728	case RESET_TYPE_ALL:
2729	case RESET_TYPE_RECOVER_OR_ALL:
2730	case RESET_TYPE_WORLD:
2731	case RESET_TYPE_DISABLE:
2732	case RESET_TYPE_RECOVER_OR_DISABLE:
2733	case RESET_TYPE_DATAPATH:
2734	case RESET_TYPE_MC_BIST:
2735	case RESET_TYPE_MCDI_TIMEOUT:
2736		method = type;
2737		netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2738			  RESET_TYPE(method));
2739		break;
2740	default:
2741		method = efx->type->map_reset_reason(type);
2742		netif_dbg(efx, drv, efx->net_dev,
2743			  "scheduling %s reset for %s\n",
2744			  RESET_TYPE(method), RESET_TYPE(type));
2745		break;
2746	}
2747
2748	set_bit(method, &efx->reset_pending);
2749	smp_mb(); /* ensure we change reset_pending before checking state */
2750
2751	/* If we're not READY then just leave the flags set as the cue
2752	 * to abort probing or reschedule the reset later.
2753	 */
2754	if (ACCESS_ONCE(efx->state) != STATE_READY)
2755		return;
2756
2757	/* efx_process_channel() will no longer read events once a
2758	 * reset is scheduled. So switch back to poll'd MCDI completions. */
2759	efx_mcdi_mode_poll(efx);
2760
2761	queue_work(reset_workqueue, &efx->reset_work);
2762}
2763
2764/**************************************************************************
2765 *
2766 * List of NICs we support
2767 *
2768 **************************************************************************/
2769
2770/* PCI device ID table */
2771static const struct pci_device_id efx_pci_table[] = {
2772	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2773		    PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0),
2774	 .driver_data = (unsigned long) &falcon_a1_nic_type},
2775	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
2776		    PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
2777	 .driver_data = (unsigned long) &falcon_b0_nic_type},
2778	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803),	/* SFC9020 */
2779	 .driver_data = (unsigned long) &siena_a0_nic_type},
2780	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813),	/* SFL9021 */
2781	 .driver_data = (unsigned long) &siena_a0_nic_type},
2782	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
2783	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2784	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x1903),  /* SFC9120 VF */
2785	 .driver_data = (unsigned long) &efx_hunt_a0_vf_nic_type},
2786	{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
2787	 .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
2788	{0}			/* end of list */
2789};
2790
2791/**************************************************************************
2792 *
2793 * Dummy PHY/MAC operations
2794 *
2795 * Can be used for some unimplemented operations
2796 * Needed so all function pointers are valid and do not have to be tested
2797 * before use
2798 *
2799 **************************************************************************/
2800int efx_port_dummy_op_int(struct efx_nic *efx)
2801{
2802	return 0;
2803}
2804void efx_port_dummy_op_void(struct efx_nic *efx) {}
2805
2806static bool efx_port_dummy_op_poll(struct efx_nic *efx)
2807{
2808	return false;
2809}
2810
2811static const struct efx_phy_operations efx_dummy_phy_operations = {
2812	.init		 = efx_port_dummy_op_int,
2813	.reconfigure	 = efx_port_dummy_op_int,
2814	.poll		 = efx_port_dummy_op_poll,
2815	.fini		 = efx_port_dummy_op_void,
2816};
2817
2818/**************************************************************************
2819 *
2820 * Data housekeeping
2821 *
2822 **************************************************************************/
2823
2824/* This zeroes out and then fills in the invariants in a struct
2825 * efx_nic (including all sub-structures).
2826 */
2827static int efx_init_struct(struct efx_nic *efx,
2828			   struct pci_dev *pci_dev, struct net_device *net_dev)
2829{
2830	int i;
2831
2832	/* Initialise common structures */
2833	INIT_LIST_HEAD(&efx->node);
2834	INIT_LIST_HEAD(&efx->secondary_list);
2835	spin_lock_init(&efx->biu_lock);
2836#ifdef CONFIG_SFC_MTD
2837	INIT_LIST_HEAD(&efx->mtd_list);
2838#endif
2839	INIT_WORK(&efx->reset_work, efx_reset_work);
2840	INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
2841	INIT_DELAYED_WORK(&efx->selftest_work, efx_selftest_async_work);
2842	efx->pci_dev = pci_dev;
2843	efx->msg_enable = debug;
2844	efx->state = STATE_UNINIT;
2845	strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
2846
2847	efx->net_dev = net_dev;
2848	efx->rx_prefix_size = efx->type->rx_prefix_size;
2849	efx->rx_ip_align =
2850		NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
2851	efx->rx_packet_hash_offset =
2852		efx->type->rx_hash_offset - efx->type->rx_prefix_size;
2853	efx->rx_packet_ts_offset =
2854		efx->type->rx_ts_offset - efx->type->rx_prefix_size;
2855	spin_lock_init(&efx->stats_lock);
2856	mutex_init(&efx->mac_lock);
2857	efx->phy_op = &efx_dummy_phy_operations;
2858	efx->mdio.dev = net_dev;
2859	INIT_WORK(&efx->mac_work, efx_mac_work);
2860	init_waitqueue_head(&efx->flush_wq);
2861
2862	for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2863		efx->channel[i] = efx_alloc_channel(efx, i, NULL);
2864		if (!efx->channel[i])
2865			goto fail;
2866		efx->msi_context[i].efx = efx;
2867		efx->msi_context[i].index = i;
2868	}
2869
2870	/* Higher numbered interrupt modes are less capable! */
2871	efx->interrupt_mode = max(efx->type->max_interrupt_mode,
2872				  interrupt_mode);
2873
2874	/* Would be good to use the net_dev name, but we're too early */
2875	snprintf(efx->workqueue_name, sizeof(efx->workqueue_name), "sfc%s",
2876		 pci_name(pci_dev));
2877	efx->workqueue = create_singlethread_workqueue(efx->workqueue_name);
2878	if (!efx->workqueue)
2879		goto fail;
2880
2881	return 0;
2882
2883fail:
2884	efx_fini_struct(efx);
2885	return -ENOMEM;
2886}
2887
2888static void efx_fini_struct(struct efx_nic *efx)
2889{
2890	int i;
2891
2892	for (i = 0; i < EFX_MAX_CHANNELS; i++)
2893		kfree(efx->channel[i]);
2894
2895	kfree(efx->vpd_sn);
2896
2897	if (efx->workqueue) {
2898		destroy_workqueue(efx->workqueue);
2899		efx->workqueue = NULL;
2900	}
2901}
2902
2903void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
2904{
2905	u64 n_rx_nodesc_trunc = 0;
2906	struct efx_channel *channel;
2907
2908	efx_for_each_channel(channel, efx)
2909		n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
2910	stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
2911	stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
2912}
2913
2914/**************************************************************************
2915 *
2916 * PCI interface
2917 *
2918 **************************************************************************/
2919
2920/* Main body of final NIC shutdown code
2921 * This is called only at module unload (or hotplug removal).
2922 */
2923static void efx_pci_remove_main(struct efx_nic *efx)
2924{
2925	/* Flush reset_work. It can no longer be scheduled since we
2926	 * are not READY.
2927	 */
2928	BUG_ON(efx->state == STATE_READY);
2929	cancel_work_sync(&efx->reset_work);
2930
2931	efx_disable_interrupts(efx);
2932	efx_nic_fini_interrupt(efx);
2933	efx_fini_port(efx);
2934	efx->type->fini(efx);
2935	efx_fini_napi(efx);
2936	efx_remove_all(efx);
2937}
2938
2939/* Final NIC shutdown
2940 * This is called only at module unload (or hotplug removal).  A PF can call
2941 * this on its VFs to ensure they are unbound first.
2942 */
2943static void efx_pci_remove(struct pci_dev *pci_dev)
2944{
2945	struct efx_nic *efx;
2946
2947	efx = pci_get_drvdata(pci_dev);
2948	if (!efx)
2949		return;
2950
2951	/* Mark the NIC as fini, then stop the interface */
2952	rtnl_lock();
2953	efx_dissociate(efx);
2954	dev_close(efx->net_dev);
2955	efx_disable_interrupts(efx);
2956	efx->state = STATE_UNINIT;
2957	rtnl_unlock();
2958
2959	if (efx->type->sriov_fini)
2960		efx->type->sriov_fini(efx);
2961
2962	efx_unregister_netdev(efx);
2963
2964	efx_mtd_remove(efx);
2965
2966	efx_pci_remove_main(efx);
2967
2968	efx_fini_io(efx);
2969	netif_dbg(efx, drv, efx->net_dev, "shutdown successful\n");
2970
2971	efx_fini_struct(efx);
2972	free_netdev(efx->net_dev);
2973
2974	pci_disable_pcie_error_reporting(pci_dev);
2975};
2976
2977/* NIC VPD information
2978 * Called during probe to display the part number of the
2979 * installed NIC.  VPD is potentially very large but this should
2980 * always appear within the first 512 bytes.
2981 */
2982#define SFC_VPD_LEN 512
2983static void efx_probe_vpd_strings(struct efx_nic *efx)
2984{
2985	struct pci_dev *dev = efx->pci_dev;
2986	char vpd_data[SFC_VPD_LEN];
2987	ssize_t vpd_size;
2988	int ro_start, ro_size, i, j;
2989
2990	/* Get the vpd data from the device */
2991	vpd_size = pci_read_vpd(dev, 0, sizeof(vpd_data), vpd_data);
2992	if (vpd_size <= 0) {
2993		netif_err(efx, drv, efx->net_dev, "Unable to read VPD\n");
2994		return;
2995	}
2996
2997	/* Get the Read only section */
2998	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size, PCI_VPD_LRDT_RO_DATA);
2999	if (ro_start < 0) {
3000		netif_err(efx, drv, efx->net_dev, "VPD Read-only not found\n");
3001		return;
3002	}
3003
3004	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
3005	j = ro_size;
3006	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3007	if (i + j > vpd_size)
3008		j = vpd_size - i;
3009
3010	/* Get the Part number */
3011	i = pci_vpd_find_info_keyword(vpd_data, i, j, "PN");
3012	if (i < 0) {
3013		netif_err(efx, drv, efx->net_dev, "Part number not found\n");
3014		return;
3015	}
3016
3017	j = pci_vpd_info_field_size(&vpd_data[i]);
3018	i += PCI_VPD_INFO_FLD_HDR_SIZE;
3019	if (i + j > vpd_size) {
3020		netif_err(efx, drv, efx->net_dev, "Incomplete part number\n");
3021		return;
3022	}
3023
3024	netif_info(efx, drv, efx->net_dev,
3025		   "Part Number : %.*s\n", j, &vpd_data[i]);
3026
3027	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
3028	j = ro_size;
3029	i = pci_vpd_find_info_keyword(vpd_data, i, j, "SN");
3030	if (i < 0) {
3031		netif_err(efx, drv, efx->net_dev, "Serial number not found\n");
3032		return;
3033	}
3034
3035	j = pci_vpd_info_field_size(&vpd_data[i]);
3036	i += PCI_VPD_INFO_FLD_HDR_SIZE;
3037	if (i + j > vpd_size) {
3038		netif_err(efx, drv, efx->net_dev, "Incomplete serial number\n");
3039		return;
3040	}
3041
3042	efx->vpd_sn = kmalloc(j + 1, GFP_KERNEL);
3043	if (!efx->vpd_sn)
3044		return;
3045
3046	snprintf(efx->vpd_sn, j + 1, "%s", &vpd_data[i]);
3047}
3048
3049
3050/* Main body of NIC initialisation
3051 * This is called at module load (or hotplug insertion, theoretically).
3052 */
3053static int efx_pci_probe_main(struct efx_nic *efx)
3054{
3055	int rc;
3056
3057	/* Do start-of-day initialisation */
3058	rc = efx_probe_all(efx);
3059	if (rc)
3060		goto fail1;
3061
3062	efx_init_napi(efx);
3063
3064	rc = efx->type->init(efx);
3065	if (rc) {
3066		netif_err(efx, probe, efx->net_dev,
3067			  "failed to initialise NIC\n");
3068		goto fail3;
3069	}
3070
3071	rc = efx_init_port(efx);
3072	if (rc) {
3073		netif_err(efx, probe, efx->net_dev,
3074			  "failed to initialise port\n");
3075		goto fail4;
3076	}
3077
3078	rc = efx_nic_init_interrupt(efx);
3079	if (rc)
3080		goto fail5;
3081	rc = efx_enable_interrupts(efx);
3082	if (rc)
3083		goto fail6;
3084
3085	return 0;
3086
3087 fail6:
3088	efx_nic_fini_interrupt(efx);
3089 fail5:
3090	efx_fini_port(efx);
3091 fail4:
3092	efx->type->fini(efx);
3093 fail3:
3094	efx_fini_napi(efx);
3095	efx_remove_all(efx);
3096 fail1:
3097	return rc;
3098}
3099
3100/* NIC initialisation
3101 *
3102 * This is called at module load (or hotplug insertion,
3103 * theoretically).  It sets up PCI mappings, resets the NIC,
3104 * sets up and registers the network devices with the kernel and hooks
3105 * the interrupt service routine.  It does not prepare the device for
3106 * transmission; this is left to the first time one of the network
3107 * interfaces is brought up (i.e. efx_net_open).
3108 */
3109static int efx_pci_probe(struct pci_dev *pci_dev,
3110			 const struct pci_device_id *entry)
3111{
3112	struct net_device *net_dev;
3113	struct efx_nic *efx;
3114	int rc;
3115
3116	/* Allocate and initialise a struct net_device and struct efx_nic */
3117	net_dev = alloc_etherdev_mqs(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES,
3118				     EFX_MAX_RX_QUEUES);
3119	if (!net_dev)
3120		return -ENOMEM;
3121	efx = netdev_priv(net_dev);
3122	efx->type = (const struct efx_nic_type *) entry->driver_data;
3123	net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
3124			      NETIF_F_HIGHDMA | NETIF_F_TSO |
3125			      NETIF_F_RXCSUM);
3126	if (efx->type->offload_features & NETIF_F_V6_CSUM)
3127		net_dev->features |= NETIF_F_TSO6;
3128	/* Mask for features that also apply to VLAN devices */
3129	net_dev->vlan_features |= (NETIF_F_ALL_CSUM | NETIF_F_SG |
3130				   NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
3131				   NETIF_F_RXCSUM);
3132	/* All offloads can be toggled */
3133	net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
3134	pci_set_drvdata(pci_dev, efx);
3135	SET_NETDEV_DEV(net_dev, &pci_dev->dev);
3136	rc = efx_init_struct(efx, pci_dev, net_dev);
3137	if (rc)
3138		goto fail1;
3139
3140	netif_info(efx, probe, efx->net_dev,
3141		   "Solarflare NIC detected\n");
3142
3143	if (!efx->type->is_vf)
3144		efx_probe_vpd_strings(efx);
3145
3146	/* Set up basic I/O (BAR mappings etc) */
3147	rc = efx_init_io(efx);
3148	if (rc)
3149		goto fail2;
3150
3151	rc = efx_pci_probe_main(efx);
3152	if (rc)
3153		goto fail3;
3154
3155	rc = efx_register_netdev(efx);
3156	if (rc)
3157		goto fail4;
3158
3159	if (efx->type->sriov_init) {
3160		rc = efx->type->sriov_init(efx);
3161		if (rc)
3162			netif_err(efx, probe, efx->net_dev,
3163				  "SR-IOV can't be enabled rc %d\n", rc);
3164	}
3165
3166	netif_dbg(efx, probe, efx->net_dev, "initialisation successful\n");
3167
3168	/* Try to create MTDs, but allow this to fail */
3169	rtnl_lock();
3170	rc = efx_mtd_probe(efx);
3171	rtnl_unlock();
3172	if (rc)
3173		netif_warn(efx, probe, efx->net_dev,
3174			   "failed to create MTDs (%d)\n", rc);
3175
3176	rc = pci_enable_pcie_error_reporting(pci_dev);
3177	if (rc && rc != -EINVAL)
3178		netif_warn(efx, probe, efx->net_dev,
3179			   "pci_enable_pcie_error_reporting failed (%d)\n", rc);
3180
3181	return 0;
3182
3183 fail4:
3184	efx_pci_remove_main(efx);
3185 fail3:
3186	efx_fini_io(efx);
3187 fail2:
3188	efx_fini_struct(efx);
3189 fail1:
3190	WARN_ON(rc > 0);
3191	netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc);
3192	free_netdev(net_dev);
3193	return rc;
3194}
3195
3196/* efx_pci_sriov_configure returns the actual number of Virtual Functions
3197 * enabled on success
3198 */
3199#ifdef CONFIG_SFC_SRIOV
3200static int efx_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
3201{
3202	int rc;
3203	struct efx_nic *efx = pci_get_drvdata(dev);
3204
3205	if (efx->type->sriov_configure) {
3206		rc = efx->type->sriov_configure(efx, num_vfs);
3207		if (rc)
3208			return rc;
3209		else
3210			return num_vfs;
3211	} else
3212		return -EOPNOTSUPP;
3213}
3214#endif
3215
3216static int efx_pm_freeze(struct device *dev)
3217{
3218	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3219
3220	rtnl_lock();
3221
3222	if (efx->state != STATE_DISABLED) {
3223		efx->state = STATE_UNINIT;
3224
3225		efx_device_detach_sync(efx);
3226
3227		efx_stop_all(efx);
3228		efx_disable_interrupts(efx);
3229	}
3230
3231	rtnl_unlock();
3232
3233	return 0;
3234}
3235
3236static int efx_pm_thaw(struct device *dev)
3237{
3238	int rc;
3239	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
3240
3241	rtnl_lock();
3242
3243	if (efx->state != STATE_DISABLED) {
3244		rc = efx_enable_interrupts(efx);
3245		if (rc)
3246			goto fail;
3247
3248		mutex_lock(&efx->mac_lock);
3249		efx->phy_op->reconfigure(efx);
3250		mutex_unlock(&efx->mac_lock);
3251
3252		efx_start_all(efx);
3253
3254		netif_device_attach(efx->net_dev);
3255
3256		efx->state = STATE_READY;
3257
3258		efx->type->resume_wol(efx);
3259	}
3260
3261	rtnl_unlock();
3262
3263	/* Reschedule any quenched resets scheduled during efx_pm_freeze() */
3264	queue_work(reset_workqueue, &efx->reset_work);
3265
3266	return 0;
3267
3268fail:
3269	rtnl_unlock();
3270
3271	return rc;
3272}
3273
3274static int efx_pm_poweroff(struct device *dev)
3275{
3276	struct pci_dev *pci_dev = to_pci_dev(dev);
3277	struct efx_nic *efx = pci_get_drvdata(pci_dev);
3278
3279	efx->type->fini(efx);
3280
3281	efx->reset_pending = 0;
3282
3283	pci_save_state(pci_dev);
3284	return pci_set_power_state(pci_dev, PCI_D3hot);
3285}
3286
3287/* Used for both resume and restore */
3288static int efx_pm_resume(struct device *dev)
3289{
3290	struct pci_dev *pci_dev = to_pci_dev(dev);
3291	struct efx_nic *efx = pci_get_drvdata(pci_dev);
3292	int rc;
3293
3294	rc = pci_set_power_state(pci_dev, PCI_D0);
3295	if (rc)
3296		return rc;
3297	pci_restore_state(pci_dev);
3298	rc = pci_enable_device(pci_dev);
3299	if (rc)
3300		return rc;
3301	pci_set_master(efx->pci_dev);
3302	rc = efx->type->reset(efx, RESET_TYPE_ALL);
3303	if (rc)
3304		return rc;
3305	rc = efx->type->init(efx);
3306	if (rc)
3307		return rc;
3308	rc = efx_pm_thaw(dev);
3309	return rc;
3310}
3311
3312static int efx_pm_suspend(struct device *dev)
3313{
3314	int rc;
3315
3316	efx_pm_freeze(dev);
3317	rc = efx_pm_poweroff(dev);
3318	if (rc)
3319		efx_pm_resume(dev);
3320	return rc;
3321}
3322
3323static const struct dev_pm_ops efx_pm_ops = {
3324	.suspend	= efx_pm_suspend,
3325	.resume		= efx_pm_resume,
3326	.freeze		= efx_pm_freeze,
3327	.thaw		= efx_pm_thaw,
3328	.poweroff	= efx_pm_poweroff,
3329	.restore	= efx_pm_resume,
3330};
3331
3332/* A PCI error affecting this device was detected.
3333 * At this point MMIO and DMA may be disabled.
3334 * Stop the software path and request a slot reset.
3335 */
3336static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
3337					      enum pci_channel_state state)
3338{
3339	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3340	struct efx_nic *efx = pci_get_drvdata(pdev);
3341
3342	if (state == pci_channel_io_perm_failure)
3343		return PCI_ERS_RESULT_DISCONNECT;
3344
3345	rtnl_lock();
3346
3347	if (efx->state != STATE_DISABLED) {
3348		efx->state = STATE_RECOVERY;
3349		efx->reset_pending = 0;
3350
3351		efx_device_detach_sync(efx);
3352
3353		efx_stop_all(efx);
3354		efx_disable_interrupts(efx);
3355
3356		status = PCI_ERS_RESULT_NEED_RESET;
3357	} else {
3358		/* If the interface is disabled we don't want to do anything
3359		 * with it.
3360		 */
3361		status = PCI_ERS_RESULT_RECOVERED;
3362	}
3363
3364	rtnl_unlock();
3365
3366	pci_disable_device(pdev);
3367
3368	return status;
3369}
3370
3371/* Fake a successful reset, which will be performed later in efx_io_resume. */
3372static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
3373{
3374	struct efx_nic *efx = pci_get_drvdata(pdev);
3375	pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
3376	int rc;
3377
3378	if (pci_enable_device(pdev)) {
3379		netif_err(efx, hw, efx->net_dev,
3380			  "Cannot re-enable PCI device after reset.\n");
3381		status =  PCI_ERS_RESULT_DISCONNECT;
3382	}
3383
3384	rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3385	if (rc) {
3386		netif_err(efx, hw, efx->net_dev,
3387		"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3388		/* Non-fatal error. Continue. */
3389	}
3390
3391	return status;
3392}
3393
3394/* Perform the actual reset and resume I/O operations. */
3395static void efx_io_resume(struct pci_dev *pdev)
3396{
3397	struct efx_nic *efx = pci_get_drvdata(pdev);
3398	int rc;
3399
3400	rtnl_lock();
3401
3402	if (efx->state == STATE_DISABLED)
3403		goto out;
3404
3405	rc = efx_reset(efx, RESET_TYPE_ALL);
3406	if (rc) {
3407		netif_err(efx, hw, efx->net_dev,
3408			  "efx_reset failed after PCI error (%d)\n", rc);
3409	} else {
3410		efx->state = STATE_READY;
3411		netif_dbg(efx, hw, efx->net_dev,
3412			  "Done resetting and resuming IO after PCI error.\n");
3413	}
3414
3415out:
3416	rtnl_unlock();
3417}
3418
3419/* For simplicity and reliability, we always require a slot reset and try to
3420 * reset the hardware when a pci error affecting the device is detected.
3421 * We leave both the link_reset and mmio_enabled callback unimplemented:
3422 * with our request for slot reset the mmio_enabled callback will never be
3423 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3424 */
3425static const struct pci_error_handlers efx_err_handlers = {
3426	.error_detected = efx_io_error_detected,
3427	.slot_reset	= efx_io_slot_reset,
3428	.resume		= efx_io_resume,
3429};
3430
3431static struct pci_driver efx_pci_driver = {
3432	.name		= KBUILD_MODNAME,
3433	.id_table	= efx_pci_table,
3434	.probe		= efx_pci_probe,
3435	.remove		= efx_pci_remove,
3436	.driver.pm	= &efx_pm_ops,
3437	.err_handler	= &efx_err_handlers,
3438#ifdef CONFIG_SFC_SRIOV
3439	.sriov_configure = efx_pci_sriov_configure,
3440#endif
3441};
3442
3443/**************************************************************************
3444 *
3445 * Kernel module interface
3446 *
3447 *************************************************************************/
3448
3449module_param(interrupt_mode, uint, 0444);
3450MODULE_PARM_DESC(interrupt_mode,
3451		 "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
3452
3453static int __init efx_init_module(void)
3454{
3455	int rc;
3456
3457	printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
3458
3459	rc = register_netdevice_notifier(&efx_netdev_notifier);
3460	if (rc)
3461		goto err_notifier;
3462
3463#ifdef CONFIG_SFC_SRIOV
3464	rc = efx_init_sriov();
3465	if (rc)
3466		goto err_sriov;
3467#endif
3468
3469	reset_workqueue = create_singlethread_workqueue("sfc_reset");
3470	if (!reset_workqueue) {
3471		rc = -ENOMEM;
3472		goto err_reset;
3473	}
3474
3475	rc = pci_register_driver(&efx_pci_driver);
3476	if (rc < 0)
3477		goto err_pci;
3478
3479	return 0;
3480
3481 err_pci:
3482	destroy_workqueue(reset_workqueue);
3483 err_reset:
3484#ifdef CONFIG_SFC_SRIOV
3485	efx_fini_sriov();
3486 err_sriov:
3487#endif
3488	unregister_netdevice_notifier(&efx_netdev_notifier);
3489 err_notifier:
3490	return rc;
3491}
3492
3493static void __exit efx_exit_module(void)
3494{
3495	printk(KERN_INFO "Solarflare NET driver unloading\n");
3496
3497	pci_unregister_driver(&efx_pci_driver);
3498	destroy_workqueue(reset_workqueue);
3499#ifdef CONFIG_SFC_SRIOV
3500	efx_fini_sriov();
3501#endif
3502	unregister_netdevice_notifier(&efx_netdev_notifier);
3503
3504}
3505
3506module_init(efx_init_module);
3507module_exit(efx_exit_module);
3508
3509MODULE_AUTHOR("Solarflare Communications and "
3510	      "Michael Brown <mbrown@fensystems.co.uk>");
3511MODULE_DESCRIPTION("Solarflare network driver");
3512MODULE_LICENSE("GPL");
3513MODULE_DEVICE_TABLE(pci, efx_pci_table);
3514