1/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2014-2015 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/module.h>
11
12#include <linux/stringify.h>
13#include <linux/kernel.h>
14#include <linux/timer.h>
15#include <linux/errno.h>
16#include <linux/ioport.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19#include <linux/interrupt.h>
20#include <linux/pci.h>
21#include <linux/netdevice.h>
22#include <linux/etherdevice.h>
23#include <linux/skbuff.h>
24#include <linux/dma-mapping.h>
25#include <linux/bitops.h>
26#include <linux/io.h>
27#include <linux/irq.h>
28#include <linux/delay.h>
29#include <asm/byteorder.h>
30#include <asm/page.h>
31#include <linux/time.h>
32#include <linux/mii.h>
33#include <linux/if.h>
34#include <linux/if_vlan.h>
35#include <net/ip.h>
36#include <net/tcp.h>
37#include <net/udp.h>
38#include <net/checksum.h>
39#include <net/ip6_checksum.h>
40#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
41#include <net/vxlan.h>
42#endif
43#ifdef CONFIG_NET_RX_BUSY_POLL
44#include <net/busy_poll.h>
45#endif
46#include <linux/workqueue.h>
47#include <linux/prefetch.h>
48#include <linux/cache.h>
49#include <linux/log2.h>
50#include <linux/aer.h>
51#include <linux/bitmap.h>
52#include <linux/cpu_rmap.h>
53
54#include "bnxt_hsi.h"
55#include "bnxt.h"
56#include "bnxt_sriov.h"
57#include "bnxt_ethtool.h"
58
59#define BNXT_TX_TIMEOUT		(5 * HZ)
60
61static const char version[] =
62	"Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
63
64MODULE_LICENSE("GPL");
65MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
66MODULE_VERSION(DRV_MODULE_VERSION);
67
68#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256
71
72#define BNXT_TX_PUSH_THRESH 92
73
74enum board_idx {
75	BCM57302,
76	BCM57304,
77	BCM57404,
78	BCM57406,
79	BCM57304_VF,
80	BCM57404_VF,
81};
82
83/* indexed by enum above */
84static const struct {
85	char *name;
86} board_info[] = {
87	{ "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
88	{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
89	{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
90	{ "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
91	{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
92	{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
93};
94
95static const struct pci_device_id bnxt_pci_tbl[] = {
96	{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
97	{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
98	{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
99	{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
100#ifdef CONFIG_BNXT_SRIOV
101	{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
102	{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
103#endif
104	{ 0 }
105};
106
107MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
108
109static const u16 bnxt_vf_req_snif[] = {
110	HWRM_FUNC_CFG,
111	HWRM_PORT_PHY_QCFG,
112	HWRM_CFA_L2_FILTER_ALLOC,
113};
114
115static bool bnxt_vf_pciid(enum board_idx idx)
116{
117	return (idx == BCM57304_VF || idx == BCM57404_VF);
118}
119
120#define DB_CP_REARM_FLAGS	(DB_KEY_CP | DB_IDX_VALID)
121#define DB_CP_FLAGS		(DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
122#define DB_CP_IRQ_DIS_FLAGS	(DB_KEY_CP | DB_IRQ_DIS)
123
124#define BNXT_CP_DB_REARM(db, raw_cons)					\
125		writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
126
127#define BNXT_CP_DB(db, raw_cons)					\
128		writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
129
130#define BNXT_CP_DB_IRQ_DIS(db)						\
131		writel(DB_CP_IRQ_DIS_FLAGS, db)
132
133static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
134{
135	/* Tell compiler to fetch tx indices from memory. */
136	barrier();
137
138	return bp->tx_ring_size -
139		((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
140}
141
142static const u16 bnxt_lhint_arr[] = {
143	TX_BD_FLAGS_LHINT_512_AND_SMALLER,
144	TX_BD_FLAGS_LHINT_512_TO_1023,
145	TX_BD_FLAGS_LHINT_1024_TO_2047,
146	TX_BD_FLAGS_LHINT_1024_TO_2047,
147	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
148	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
149	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
150	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
151	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
152	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
153	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
154	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
155	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
156	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
157	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
158	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
159	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
160	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
161	TX_BD_FLAGS_LHINT_2048_AND_LARGER,
162};
163
164static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
165{
166	struct bnxt *bp = netdev_priv(dev);
167	struct tx_bd *txbd;
168	struct tx_bd_ext *txbd1;
169	struct netdev_queue *txq;
170	int i;
171	dma_addr_t mapping;
172	unsigned int length, pad = 0;
173	u32 len, free_size, vlan_tag_flags, cfa_action, flags;
174	u16 prod, last_frag;
175	struct pci_dev *pdev = bp->pdev;
176	struct bnxt_napi *bnapi;
177	struct bnxt_tx_ring_info *txr;
178	struct bnxt_sw_tx_bd *tx_buf;
179
180	i = skb_get_queue_mapping(skb);
181	if (unlikely(i >= bp->tx_nr_rings)) {
182		dev_kfree_skb_any(skb);
183		return NETDEV_TX_OK;
184	}
185
186	bnapi = bp->bnapi[i];
187	txr = &bnapi->tx_ring;
188	txq = netdev_get_tx_queue(dev, i);
189	prod = txr->tx_prod;
190
191	free_size = bnxt_tx_avail(bp, txr);
192	if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
193		netif_tx_stop_queue(txq);
194		return NETDEV_TX_BUSY;
195	}
196
197	length = skb->len;
198	len = skb_headlen(skb);
199	last_frag = skb_shinfo(skb)->nr_frags;
200
201	txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
202
203	txbd->tx_bd_opaque = prod;
204
205	tx_buf = &txr->tx_buf_ring[prod];
206	tx_buf->skb = skb;
207	tx_buf->nr_frags = last_frag;
208
209	vlan_tag_flags = 0;
210	cfa_action = 0;
211	if (skb_vlan_tag_present(skb)) {
212		vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
213				 skb_vlan_tag_get(skb);
214		/* Currently supports 8021Q, 8021AD vlan offloads
215		 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
216		 */
217		if (skb->vlan_proto == htons(ETH_P_8021Q))
218			vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
219	}
220
221	if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
222		struct tx_push_bd *push = txr->tx_push;
223		struct tx_bd *tx_push = &push->txbd1;
224		struct tx_bd_ext *tx_push1 = &push->txbd2;
225		void *pdata = tx_push1 + 1;
226		int j;
227
228		/* Set COAL_NOW to be ready quickly for the next push */
229		tx_push->tx_bd_len_flags_type =
230			cpu_to_le32((length << TX_BD_LEN_SHIFT) |
231					TX_BD_TYPE_LONG_TX_BD |
232					TX_BD_FLAGS_LHINT_512_AND_SMALLER |
233					TX_BD_FLAGS_COAL_NOW |
234					TX_BD_FLAGS_PACKET_END |
235					(2 << TX_BD_FLAGS_BD_CNT_SHIFT));
236
237		if (skb->ip_summed == CHECKSUM_PARTIAL)
238			tx_push1->tx_bd_hsize_lflags =
239					cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
240		else
241			tx_push1->tx_bd_hsize_lflags = 0;
242
243		tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
244		tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
245
246		skb_copy_from_linear_data(skb, pdata, len);
247		pdata += len;
248		for (j = 0; j < last_frag; j++) {
249			skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
250			void *fptr;
251
252			fptr = skb_frag_address_safe(frag);
253			if (!fptr)
254				goto normal_tx;
255
256			memcpy(pdata, fptr, skb_frag_size(frag));
257			pdata += skb_frag_size(frag);
258		}
259
260		memcpy(txbd, tx_push, sizeof(*txbd));
261		prod = NEXT_TX(prod);
262		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
263		memcpy(txbd, tx_push1, sizeof(*txbd));
264		prod = NEXT_TX(prod);
265		push->doorbell =
266			cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
267		txr->tx_prod = prod;
268
269		netdev_tx_sent_queue(txq, skb->len);
270
271		__iowrite64_copy(txr->tx_doorbell, push,
272				 (length + sizeof(*push) + 8) / 8);
273
274		tx_buf->is_push = 1;
275
276		goto tx_done;
277	}
278
279normal_tx:
280	if (length < BNXT_MIN_PKT_SIZE) {
281		pad = BNXT_MIN_PKT_SIZE - length;
282		if (skb_pad(skb, pad)) {
283			/* SKB already freed. */
284			tx_buf->skb = NULL;
285			return NETDEV_TX_OK;
286		}
287		length = BNXT_MIN_PKT_SIZE;
288	}
289
290	mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
291
292	if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
293		dev_kfree_skb_any(skb);
294		tx_buf->skb = NULL;
295		return NETDEV_TX_OK;
296	}
297
298	dma_unmap_addr_set(tx_buf, mapping, mapping);
299	flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
300		((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
301
302	txbd->tx_bd_haddr = cpu_to_le64(mapping);
303
304	prod = NEXT_TX(prod);
305	txbd1 = (struct tx_bd_ext *)
306		&txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
307
308	txbd1->tx_bd_hsize_lflags = 0;
309	if (skb_is_gso(skb)) {
310		u32 hdr_len;
311
312		if (skb->encapsulation)
313			hdr_len = skb_inner_network_offset(skb) +
314				skb_inner_network_header_len(skb) +
315				inner_tcp_hdrlen(skb);
316		else
317			hdr_len = skb_transport_offset(skb) +
318				tcp_hdrlen(skb);
319
320		txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
321					TX_BD_FLAGS_T_IPID |
322					(hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
323		length = skb_shinfo(skb)->gso_size;
324		txbd1->tx_bd_mss = cpu_to_le32(length);
325		length += hdr_len;
326	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
327		txbd1->tx_bd_hsize_lflags =
328			cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
329		txbd1->tx_bd_mss = 0;
330	}
331
332	length >>= 9;
333	flags |= bnxt_lhint_arr[length];
334	txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
335
336	txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
337	txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
338	for (i = 0; i < last_frag; i++) {
339		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
340
341		prod = NEXT_TX(prod);
342		txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
343
344		len = skb_frag_size(frag);
345		mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
346					   DMA_TO_DEVICE);
347
348		if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
349			goto tx_dma_error;
350
351		tx_buf = &txr->tx_buf_ring[prod];
352		dma_unmap_addr_set(tx_buf, mapping, mapping);
353
354		txbd->tx_bd_haddr = cpu_to_le64(mapping);
355
356		flags = len << TX_BD_LEN_SHIFT;
357		txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
358	}
359
360	flags &= ~TX_BD_LEN;
361	txbd->tx_bd_len_flags_type =
362		cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
363			    TX_BD_FLAGS_PACKET_END);
364
365	netdev_tx_sent_queue(txq, skb->len);
366
367	/* Sync BD data before updating doorbell */
368	wmb();
369
370	prod = NEXT_TX(prod);
371	txr->tx_prod = prod;
372
373	writel(DB_KEY_TX | prod, txr->tx_doorbell);
374	writel(DB_KEY_TX | prod, txr->tx_doorbell);
375
376tx_done:
377
378	mmiowb();
379
380	if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
381		netif_tx_stop_queue(txq);
382
383		/* netif_tx_stop_queue() must be done before checking
384		 * tx index in bnxt_tx_avail() below, because in
385		 * bnxt_tx_int(), we update tx index before checking for
386		 * netif_tx_queue_stopped().
387		 */
388		smp_mb();
389		if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
390			netif_tx_wake_queue(txq);
391	}
392	return NETDEV_TX_OK;
393
394tx_dma_error:
395	last_frag = i;
396
397	/* start back at beginning and unmap skb */
398	prod = txr->tx_prod;
399	tx_buf = &txr->tx_buf_ring[prod];
400	tx_buf->skb = NULL;
401	dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
402			 skb_headlen(skb), PCI_DMA_TODEVICE);
403	prod = NEXT_TX(prod);
404
405	/* unmap remaining mapped pages */
406	for (i = 0; i < last_frag; i++) {
407		prod = NEXT_TX(prod);
408		tx_buf = &txr->tx_buf_ring[prod];
409		dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
410			       skb_frag_size(&skb_shinfo(skb)->frags[i]),
411			       PCI_DMA_TODEVICE);
412	}
413
414	dev_kfree_skb_any(skb);
415	return NETDEV_TX_OK;
416}
417
418static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
419{
420	struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
421	int index = bnapi->index;
422	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
423	u16 cons = txr->tx_cons;
424	struct pci_dev *pdev = bp->pdev;
425	int i;
426	unsigned int tx_bytes = 0;
427
428	for (i = 0; i < nr_pkts; i++) {
429		struct bnxt_sw_tx_bd *tx_buf;
430		struct sk_buff *skb;
431		int j, last;
432
433		tx_buf = &txr->tx_buf_ring[cons];
434		cons = NEXT_TX(cons);
435		skb = tx_buf->skb;
436		tx_buf->skb = NULL;
437
438		if (tx_buf->is_push) {
439			tx_buf->is_push = 0;
440			goto next_tx_int;
441		}
442
443		dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
444				 skb_headlen(skb), PCI_DMA_TODEVICE);
445		last = tx_buf->nr_frags;
446
447		for (j = 0; j < last; j++) {
448			cons = NEXT_TX(cons);
449			tx_buf = &txr->tx_buf_ring[cons];
450			dma_unmap_page(
451				&pdev->dev,
452				dma_unmap_addr(tx_buf, mapping),
453				skb_frag_size(&skb_shinfo(skb)->frags[j]),
454				PCI_DMA_TODEVICE);
455		}
456
457next_tx_int:
458		cons = NEXT_TX(cons);
459
460		tx_bytes += skb->len;
461		dev_kfree_skb_any(skb);
462	}
463
464	netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
465	txr->tx_cons = cons;
466
467	/* Need to make the tx_cons update visible to bnxt_start_xmit()
468	 * before checking for netif_tx_queue_stopped().  Without the
469	 * memory barrier, there is a small possibility that bnxt_start_xmit()
470	 * will miss it and cause the queue to be stopped forever.
471	 */
472	smp_mb();
473
474	if (unlikely(netif_tx_queue_stopped(txq)) &&
475	    (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
476		__netif_tx_lock(txq, smp_processor_id());
477		if (netif_tx_queue_stopped(txq) &&
478		    bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
479		    txr->dev_state != BNXT_DEV_STATE_CLOSING)
480			netif_tx_wake_queue(txq);
481		__netif_tx_unlock(txq);
482	}
483}
484
485static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
486				       gfp_t gfp)
487{
488	u8 *data;
489	struct pci_dev *pdev = bp->pdev;
490
491	data = kmalloc(bp->rx_buf_size, gfp);
492	if (!data)
493		return NULL;
494
495	*mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
496				  bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
497
498	if (dma_mapping_error(&pdev->dev, *mapping)) {
499		kfree(data);
500		data = NULL;
501	}
502	return data;
503}
504
505static inline int bnxt_alloc_rx_data(struct bnxt *bp,
506				     struct bnxt_rx_ring_info *rxr,
507				     u16 prod, gfp_t gfp)
508{
509	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
510	struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
511	u8 *data;
512	dma_addr_t mapping;
513
514	data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
515	if (!data)
516		return -ENOMEM;
517
518	rx_buf->data = data;
519	dma_unmap_addr_set(rx_buf, mapping, mapping);
520
521	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
522
523	return 0;
524}
525
526static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
527			       u8 *data)
528{
529	u16 prod = rxr->rx_prod;
530	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
531	struct rx_bd *cons_bd, *prod_bd;
532
533	prod_rx_buf = &rxr->rx_buf_ring[prod];
534	cons_rx_buf = &rxr->rx_buf_ring[cons];
535
536	prod_rx_buf->data = data;
537
538	dma_unmap_addr_set(prod_rx_buf, mapping,
539			   dma_unmap_addr(cons_rx_buf, mapping));
540
541	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
542	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
543
544	prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
545}
546
547static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
548{
549	u16 next, max = rxr->rx_agg_bmap_size;
550
551	next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
552	if (next >= max)
553		next = find_first_zero_bit(rxr->rx_agg_bmap, max);
554	return next;
555}
556
557static inline int bnxt_alloc_rx_page(struct bnxt *bp,
558				     struct bnxt_rx_ring_info *rxr,
559				     u16 prod, gfp_t gfp)
560{
561	struct rx_bd *rxbd =
562		&rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
563	struct bnxt_sw_rx_agg_bd *rx_agg_buf;
564	struct pci_dev *pdev = bp->pdev;
565	struct page *page;
566	dma_addr_t mapping;
567	u16 sw_prod = rxr->rx_sw_agg_prod;
568
569	page = alloc_page(gfp);
570	if (!page)
571		return -ENOMEM;
572
573	mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
574			       PCI_DMA_FROMDEVICE);
575	if (dma_mapping_error(&pdev->dev, mapping)) {
576		__free_page(page);
577		return -EIO;
578	}
579
580	if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
581		sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
582
583	__set_bit(sw_prod, rxr->rx_agg_bmap);
584	rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
585	rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
586
587	rx_agg_buf->page = page;
588	rx_agg_buf->mapping = mapping;
589	rxbd->rx_bd_haddr = cpu_to_le64(mapping);
590	rxbd->rx_bd_opaque = sw_prod;
591	return 0;
592}
593
594static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
595				   u32 agg_bufs)
596{
597	struct bnxt *bp = bnapi->bp;
598	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
599	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
600	u16 prod = rxr->rx_agg_prod;
601	u16 sw_prod = rxr->rx_sw_agg_prod;
602	u32 i;
603
604	for (i = 0; i < agg_bufs; i++) {
605		u16 cons;
606		struct rx_agg_cmp *agg;
607		struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
608		struct rx_bd *prod_bd;
609		struct page *page;
610
611		agg = (struct rx_agg_cmp *)
612			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
613		cons = agg->rx_agg_cmp_opaque;
614		__clear_bit(cons, rxr->rx_agg_bmap);
615
616		if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
617			sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
618
619		__set_bit(sw_prod, rxr->rx_agg_bmap);
620		prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
621		cons_rx_buf = &rxr->rx_agg_ring[cons];
622
623		/* It is possible for sw_prod to be equal to cons, so
624		 * set cons_rx_buf->page to NULL first.
625		 */
626		page = cons_rx_buf->page;
627		cons_rx_buf->page = NULL;
628		prod_rx_buf->page = page;
629
630		prod_rx_buf->mapping = cons_rx_buf->mapping;
631
632		prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
633
634		prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
635		prod_bd->rx_bd_opaque = sw_prod;
636
637		prod = NEXT_RX_AGG(prod);
638		sw_prod = NEXT_RX_AGG(sw_prod);
639		cp_cons = NEXT_CMP(cp_cons);
640	}
641	rxr->rx_agg_prod = prod;
642	rxr->rx_sw_agg_prod = sw_prod;
643}
644
645static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
646				   struct bnxt_rx_ring_info *rxr, u16 cons,
647				   u16 prod, u8 *data, dma_addr_t dma_addr,
648				   unsigned int len)
649{
650	int err;
651	struct sk_buff *skb;
652
653	err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
654	if (unlikely(err)) {
655		bnxt_reuse_rx_data(rxr, cons, data);
656		return NULL;
657	}
658
659	skb = build_skb(data, 0);
660	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
661			 PCI_DMA_FROMDEVICE);
662	if (!skb) {
663		kfree(data);
664		return NULL;
665	}
666
667	skb_reserve(skb, BNXT_RX_OFFSET);
668	skb_put(skb, len);
669	return skb;
670}
671
672static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
673				     struct sk_buff *skb, u16 cp_cons,
674				     u32 agg_bufs)
675{
676	struct pci_dev *pdev = bp->pdev;
677	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
678	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
679	u16 prod = rxr->rx_agg_prod;
680	u32 i;
681
682	for (i = 0; i < agg_bufs; i++) {
683		u16 cons, frag_len;
684		struct rx_agg_cmp *agg;
685		struct bnxt_sw_rx_agg_bd *cons_rx_buf;
686		struct page *page;
687		dma_addr_t mapping;
688
689		agg = (struct rx_agg_cmp *)
690			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
691		cons = agg->rx_agg_cmp_opaque;
692		frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
693			    RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
694
695		cons_rx_buf = &rxr->rx_agg_ring[cons];
696		skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
697		__clear_bit(cons, rxr->rx_agg_bmap);
698
699		/* It is possible for bnxt_alloc_rx_page() to allocate
700		 * a sw_prod index that equals the cons index, so we
701		 * need to clear the cons entry now.
702		 */
703		mapping = dma_unmap_addr(cons_rx_buf, mapping);
704		page = cons_rx_buf->page;
705		cons_rx_buf->page = NULL;
706
707		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
708			struct skb_shared_info *shinfo;
709			unsigned int nr_frags;
710
711			shinfo = skb_shinfo(skb);
712			nr_frags = --shinfo->nr_frags;
713			__skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
714
715			dev_kfree_skb(skb);
716
717			cons_rx_buf->page = page;
718
719			/* Update prod since possibly some pages have been
720			 * allocated already.
721			 */
722			rxr->rx_agg_prod = prod;
723			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
724			return NULL;
725		}
726
727		dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
728			       PCI_DMA_FROMDEVICE);
729
730		skb->data_len += frag_len;
731		skb->len += frag_len;
732		skb->truesize += PAGE_SIZE;
733
734		prod = NEXT_RX_AGG(prod);
735		cp_cons = NEXT_CMP(cp_cons);
736	}
737	rxr->rx_agg_prod = prod;
738	return skb;
739}
740
741static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
742			       u8 agg_bufs, u32 *raw_cons)
743{
744	u16 last;
745	struct rx_agg_cmp *agg;
746
747	*raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
748	last = RING_CMP(*raw_cons);
749	agg = (struct rx_agg_cmp *)
750		&cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
751	return RX_AGG_CMP_VALID(agg, *raw_cons);
752}
753
754static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
755					    unsigned int len,
756					    dma_addr_t mapping)
757{
758	struct bnxt *bp = bnapi->bp;
759	struct pci_dev *pdev = bp->pdev;
760	struct sk_buff *skb;
761
762	skb = napi_alloc_skb(&bnapi->napi, len);
763	if (!skb)
764		return NULL;
765
766	dma_sync_single_for_cpu(&pdev->dev, mapping,
767				bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
768
769	memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
770
771	dma_sync_single_for_device(&pdev->dev, mapping,
772				   bp->rx_copy_thresh,
773				   PCI_DMA_FROMDEVICE);
774
775	skb_put(skb, len);
776	return skb;
777}
778
779static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
780			   struct rx_tpa_start_cmp *tpa_start,
781			   struct rx_tpa_start_cmp_ext *tpa_start1)
782{
783	u8 agg_id = TPA_START_AGG_ID(tpa_start);
784	u16 cons, prod;
785	struct bnxt_tpa_info *tpa_info;
786	struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
787	struct rx_bd *prod_bd;
788	dma_addr_t mapping;
789
790	cons = tpa_start->rx_tpa_start_cmp_opaque;
791	prod = rxr->rx_prod;
792	cons_rx_buf = &rxr->rx_buf_ring[cons];
793	prod_rx_buf = &rxr->rx_buf_ring[prod];
794	tpa_info = &rxr->rx_tpa[agg_id];
795
796	prod_rx_buf->data = tpa_info->data;
797
798	mapping = tpa_info->mapping;
799	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
800
801	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
802
803	prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
804
805	tpa_info->data = cons_rx_buf->data;
806	cons_rx_buf->data = NULL;
807	tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
808
809	tpa_info->len =
810		le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
811				RX_TPA_START_CMP_LEN_SHIFT;
812	if (likely(TPA_START_HASH_VALID(tpa_start))) {
813		u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
814
815		tpa_info->hash_type = PKT_HASH_TYPE_L4;
816		tpa_info->gso_type = SKB_GSO_TCPV4;
817		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
818		if (hash_type == 3)
819			tpa_info->gso_type = SKB_GSO_TCPV6;
820		tpa_info->rss_hash =
821			le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
822	} else {
823		tpa_info->hash_type = PKT_HASH_TYPE_NONE;
824		tpa_info->gso_type = 0;
825		if (netif_msg_rx_err(bp))
826			netdev_warn(bp->dev, "TPA packet without valid hash\n");
827	}
828	tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
829	tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
830
831	rxr->rx_prod = NEXT_RX(prod);
832	cons = NEXT_RX(cons);
833	cons_rx_buf = &rxr->rx_buf_ring[cons];
834
835	bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
836	rxr->rx_prod = NEXT_RX(rxr->rx_prod);
837	cons_rx_buf->data = NULL;
838}
839
840static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
841			   u16 cp_cons, u32 agg_bufs)
842{
843	if (agg_bufs)
844		bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
845}
846
847#define BNXT_IPV4_HDR_SIZE	(sizeof(struct iphdr) + sizeof(struct tcphdr))
848#define BNXT_IPV6_HDR_SIZE	(sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
849
850static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
851					   struct rx_tpa_end_cmp *tpa_end,
852					   struct rx_tpa_end_cmp_ext *tpa_end1,
853					   struct sk_buff *skb)
854{
855#ifdef CONFIG_INET
856	struct tcphdr *th;
857	int payload_off, tcp_opt_len = 0;
858	int len, nw_off;
859
860	NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
861	skb_shinfo(skb)->gso_size =
862		le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
863	skb_shinfo(skb)->gso_type = tpa_info->gso_type;
864	payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
865		       RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
866		      RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
867	if (TPA_END_GRO_TS(tpa_end))
868		tcp_opt_len = 12;
869
870	if (tpa_info->gso_type == SKB_GSO_TCPV4) {
871		struct iphdr *iph;
872
873		nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
874			 ETH_HLEN;
875		skb_set_network_header(skb, nw_off);
876		iph = ip_hdr(skb);
877		skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
878		len = skb->len - skb_transport_offset(skb);
879		th = tcp_hdr(skb);
880		th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
881	} else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
882		struct ipv6hdr *iph;
883
884		nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
885			 ETH_HLEN;
886		skb_set_network_header(skb, nw_off);
887		iph = ipv6_hdr(skb);
888		skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
889		len = skb->len - skb_transport_offset(skb);
890		th = tcp_hdr(skb);
891		th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
892	} else {
893		dev_kfree_skb_any(skb);
894		return NULL;
895	}
896	tcp_gro_complete(skb);
897
898	if (nw_off) { /* tunnel */
899		struct udphdr *uh = NULL;
900
901		if (skb->protocol == htons(ETH_P_IP)) {
902			struct iphdr *iph = (struct iphdr *)skb->data;
903
904			if (iph->protocol == IPPROTO_UDP)
905				uh = (struct udphdr *)(iph + 1);
906		} else {
907			struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
908
909			if (iph->nexthdr == IPPROTO_UDP)
910				uh = (struct udphdr *)(iph + 1);
911		}
912		if (uh) {
913			if (uh->check)
914				skb_shinfo(skb)->gso_type |=
915					SKB_GSO_UDP_TUNNEL_CSUM;
916			else
917				skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
918		}
919	}
920#endif
921	return skb;
922}
923
924static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
925					   struct bnxt_napi *bnapi,
926					   u32 *raw_cons,
927					   struct rx_tpa_end_cmp *tpa_end,
928					   struct rx_tpa_end_cmp_ext *tpa_end1,
929					   bool *agg_event)
930{
931	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
932	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
933	u8 agg_id = TPA_END_AGG_ID(tpa_end);
934	u8 *data, agg_bufs;
935	u16 cp_cons = RING_CMP(*raw_cons);
936	unsigned int len;
937	struct bnxt_tpa_info *tpa_info;
938	dma_addr_t mapping;
939	struct sk_buff *skb;
940
941	tpa_info = &rxr->rx_tpa[agg_id];
942	data = tpa_info->data;
943	prefetch(data);
944	len = tpa_info->len;
945	mapping = tpa_info->mapping;
946
947	agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
948		    RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
949
950	if (agg_bufs) {
951		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
952			return ERR_PTR(-EBUSY);
953
954		*agg_event = true;
955		cp_cons = NEXT_CMP(cp_cons);
956	}
957
958	if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
959		bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
960		netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
961			    agg_bufs, (int)MAX_SKB_FRAGS);
962		return NULL;
963	}
964
965	if (len <= bp->rx_copy_thresh) {
966		skb = bnxt_copy_skb(bnapi, data, len, mapping);
967		if (!skb) {
968			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
969			return NULL;
970		}
971	} else {
972		u8 *new_data;
973		dma_addr_t new_mapping;
974
975		new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
976		if (!new_data) {
977			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
978			return NULL;
979		}
980
981		tpa_info->data = new_data;
982		tpa_info->mapping = new_mapping;
983
984		skb = build_skb(data, 0);
985		dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
986				 PCI_DMA_FROMDEVICE);
987
988		if (!skb) {
989			kfree(data);
990			bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
991			return NULL;
992		}
993		skb_reserve(skb, BNXT_RX_OFFSET);
994		skb_put(skb, len);
995	}
996
997	if (agg_bufs) {
998		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
999		if (!skb) {
1000			/* Page reuse already handled by bnxt_rx_pages(). */
1001			return NULL;
1002		}
1003	}
1004	skb->protocol = eth_type_trans(skb, bp->dev);
1005
1006	if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1007		skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1008
1009	if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
1010		netdev_features_t features = skb->dev->features;
1011		u16 vlan_proto = tpa_info->metadata >>
1012			RX_CMP_FLAGS2_METADATA_TPID_SFT;
1013
1014		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1015		     vlan_proto == ETH_P_8021Q) ||
1016		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1017		     vlan_proto == ETH_P_8021AD)) {
1018			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1019					       tpa_info->metadata &
1020					       RX_CMP_FLAGS2_METADATA_VID_MASK);
1021		}
1022	}
1023
1024	skb_checksum_none_assert(skb);
1025	if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1026		skb->ip_summed = CHECKSUM_UNNECESSARY;
1027		skb->csum_level =
1028			(tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1029	}
1030
1031	if (TPA_END_GRO(tpa_end))
1032		skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
1033
1034	return skb;
1035}
1036
1037/* returns the following:
1038 * 1       - 1 packet successfully received
1039 * 0       - successful TPA_START, packet not completed yet
1040 * -EBUSY  - completion ring does not have all the agg buffers yet
1041 * -ENOMEM - packet aborted due to out of memory
1042 * -EIO    - packet aborted due to hw error indicated in BD
1043 */
1044static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1045		       bool *agg_event)
1046{
1047	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1048	struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
1049	struct net_device *dev = bp->dev;
1050	struct rx_cmp *rxcmp;
1051	struct rx_cmp_ext *rxcmp1;
1052	u32 tmp_raw_cons = *raw_cons;
1053	u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
1054	struct bnxt_sw_rx_bd *rx_buf;
1055	unsigned int len;
1056	u8 *data, agg_bufs, cmp_type;
1057	dma_addr_t dma_addr;
1058	struct sk_buff *skb;
1059	int rc = 0;
1060
1061	rxcmp = (struct rx_cmp *)
1062			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1063
1064	tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1065	cp_cons = RING_CMP(tmp_raw_cons);
1066	rxcmp1 = (struct rx_cmp_ext *)
1067			&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1068
1069	if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1070		return -EBUSY;
1071
1072	cmp_type = RX_CMP_TYPE(rxcmp);
1073
1074	prod = rxr->rx_prod;
1075
1076	if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1077		bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1078			       (struct rx_tpa_start_cmp_ext *)rxcmp1);
1079
1080		goto next_rx_no_prod;
1081
1082	} else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1083		skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
1084				   (struct rx_tpa_end_cmp *)rxcmp,
1085				   (struct rx_tpa_end_cmp_ext *)rxcmp1,
1086				   agg_event);
1087
1088		if (unlikely(IS_ERR(skb)))
1089			return -EBUSY;
1090
1091		rc = -ENOMEM;
1092		if (likely(skb)) {
1093			skb_record_rx_queue(skb, bnapi->index);
1094			skb_mark_napi_id(skb, &bnapi->napi);
1095			if (bnxt_busy_polling(bnapi))
1096				netif_receive_skb(skb);
1097			else
1098				napi_gro_receive(&bnapi->napi, skb);
1099			rc = 1;
1100		}
1101		goto next_rx_no_prod;
1102	}
1103
1104	cons = rxcmp->rx_cmp_opaque;
1105	rx_buf = &rxr->rx_buf_ring[cons];
1106	data = rx_buf->data;
1107	prefetch(data);
1108
1109	agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
1110				RX_CMP_AGG_BUFS_SHIFT;
1111
1112	if (agg_bufs) {
1113		if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1114			return -EBUSY;
1115
1116		cp_cons = NEXT_CMP(cp_cons);
1117		*agg_event = true;
1118	}
1119
1120	rx_buf->data = NULL;
1121	if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1122		bnxt_reuse_rx_data(rxr, cons, data);
1123		if (agg_bufs)
1124			bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
1125
1126		rc = -EIO;
1127		goto next_rx;
1128	}
1129
1130	len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
1131	dma_addr = dma_unmap_addr(rx_buf, mapping);
1132
1133	if (len <= bp->rx_copy_thresh) {
1134		skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
1135		bnxt_reuse_rx_data(rxr, cons, data);
1136		if (!skb) {
1137			rc = -ENOMEM;
1138			goto next_rx;
1139		}
1140	} else {
1141		skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
1142		if (!skb) {
1143			rc = -ENOMEM;
1144			goto next_rx;
1145		}
1146	}
1147
1148	if (agg_bufs) {
1149		skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
1150		if (!skb) {
1151			rc = -ENOMEM;
1152			goto next_rx;
1153		}
1154	}
1155
1156	if (RX_CMP_HASH_VALID(rxcmp)) {
1157		u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1158		enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1159
1160		/* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1161		if (hash_type != 1 && hash_type != 3)
1162			type = PKT_HASH_TYPE_L3;
1163		skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1164	}
1165
1166	skb->protocol = eth_type_trans(skb, dev);
1167
1168	if (rxcmp1->rx_cmp_flags2 &
1169	    cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
1170		netdev_features_t features = skb->dev->features;
1171		u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1172		u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1173
1174		if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1175		     vlan_proto == ETH_P_8021Q) ||
1176		    ((features & NETIF_F_HW_VLAN_STAG_RX) &&
1177		     vlan_proto == ETH_P_8021AD))
1178			__vlan_hwaccel_put_tag(skb, htons(vlan_proto),
1179					       meta_data &
1180					       RX_CMP_FLAGS2_METADATA_VID_MASK);
1181	}
1182
1183	skb_checksum_none_assert(skb);
1184	if (RX_CMP_L4_CS_OK(rxcmp1)) {
1185		if (dev->features & NETIF_F_RXCSUM) {
1186			skb->ip_summed = CHECKSUM_UNNECESSARY;
1187			skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1188		}
1189	} else {
1190		if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
1191			cpr->rx_l4_csum_errors++;
1192	}
1193
1194	skb_record_rx_queue(skb, bnapi->index);
1195	skb_mark_napi_id(skb, &bnapi->napi);
1196	if (bnxt_busy_polling(bnapi))
1197		netif_receive_skb(skb);
1198	else
1199		napi_gro_receive(&bnapi->napi, skb);
1200	rc = 1;
1201
1202next_rx:
1203	rxr->rx_prod = NEXT_RX(prod);
1204
1205next_rx_no_prod:
1206	*raw_cons = tmp_raw_cons;
1207
1208	return rc;
1209}
1210
1211static int bnxt_async_event_process(struct bnxt *bp,
1212				    struct hwrm_async_event_cmpl *cmpl)
1213{
1214	u16 event_id = le16_to_cpu(cmpl->event_id);
1215
1216	/* TODO CHIMP_FW: Define event id's for link change, error etc */
1217	switch (event_id) {
1218	case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
1219		set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
1220		schedule_work(&bp->sp_task);
1221		break;
1222	default:
1223		netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
1224			   event_id);
1225		break;
1226	}
1227	return 0;
1228}
1229
1230static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1231{
1232	u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1233	struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1234	struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1235				(struct hwrm_fwd_req_cmpl *)txcmp;
1236
1237	switch (cmpl_type) {
1238	case CMPL_BASE_TYPE_HWRM_DONE:
1239		seq_id = le16_to_cpu(h_cmpl->sequence_id);
1240		if (seq_id == bp->hwrm_intr_seq_id)
1241			bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1242		else
1243			netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1244		break;
1245
1246	case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1247		vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1248
1249		if ((vf_id < bp->pf.first_vf_id) ||
1250		    (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1251			netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1252				   vf_id);
1253			return -EINVAL;
1254		}
1255
1256		set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1257		set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
1258		schedule_work(&bp->sp_task);
1259		break;
1260
1261	case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1262		bnxt_async_event_process(bp,
1263					 (struct hwrm_async_event_cmpl *)txcmp);
1264
1265	default:
1266		break;
1267	}
1268
1269	return 0;
1270}
1271
1272static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1273{
1274	struct bnxt_napi *bnapi = dev_instance;
1275	struct bnxt *bp = bnapi->bp;
1276	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1277	u32 cons = RING_CMP(cpr->cp_raw_cons);
1278
1279	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1280	napi_schedule(&bnapi->napi);
1281	return IRQ_HANDLED;
1282}
1283
1284static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1285{
1286	u32 raw_cons = cpr->cp_raw_cons;
1287	u16 cons = RING_CMP(raw_cons);
1288	struct tx_cmp *txcmp;
1289
1290	txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1291
1292	return TX_CMP_VALID(txcmp, raw_cons);
1293}
1294
1295static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1296{
1297	struct bnxt_napi *bnapi = dev_instance;
1298	struct bnxt *bp = bnapi->bp;
1299	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1300	u32 cons = RING_CMP(cpr->cp_raw_cons);
1301	u32 int_status;
1302
1303	prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1304
1305	if (!bnxt_has_work(bp, cpr)) {
1306		int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
1307		/* return if erroneous interrupt */
1308		if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1309			return IRQ_NONE;
1310	}
1311
1312	/* disable ring IRQ */
1313	BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
1314
1315	/* Return here if interrupt is shared and is disabled. */
1316	if (unlikely(atomic_read(&bp->intr_sem) != 0))
1317		return IRQ_HANDLED;
1318
1319	napi_schedule(&bnapi->napi);
1320	return IRQ_HANDLED;
1321}
1322
1323static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
1324{
1325	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1326	u32 raw_cons = cpr->cp_raw_cons;
1327	u32 cons;
1328	int tx_pkts = 0;
1329	int rx_pkts = 0;
1330	bool rx_event = false;
1331	bool agg_event = false;
1332	struct tx_cmp *txcmp;
1333
1334	while (1) {
1335		int rc;
1336
1337		cons = RING_CMP(raw_cons);
1338		txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1339
1340		if (!TX_CMP_VALID(txcmp, raw_cons))
1341			break;
1342
1343		if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1344			tx_pkts++;
1345			/* return full budget so NAPI will complete. */
1346			if (unlikely(tx_pkts > bp->tx_wake_thresh))
1347				rx_pkts = budget;
1348		} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
1349			rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
1350			if (likely(rc >= 0))
1351				rx_pkts += rc;
1352			else if (rc == -EBUSY)	/* partial completion */
1353				break;
1354			rx_event = true;
1355		} else if (unlikely((TX_CMP_TYPE(txcmp) ==
1356				     CMPL_BASE_TYPE_HWRM_DONE) ||
1357				    (TX_CMP_TYPE(txcmp) ==
1358				     CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1359				    (TX_CMP_TYPE(txcmp) ==
1360				     CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1361			bnxt_hwrm_handler(bp, txcmp);
1362		}
1363		raw_cons = NEXT_RAW_CMP(raw_cons);
1364
1365		if (rx_pkts == budget)
1366			break;
1367	}
1368
1369	cpr->cp_raw_cons = raw_cons;
1370	/* ACK completion ring before freeing tx ring and producing new
1371	 * buffers in rx/agg rings to prevent overflowing the completion
1372	 * ring.
1373	 */
1374	BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
1375
1376	if (tx_pkts)
1377		bnxt_tx_int(bp, bnapi, tx_pkts);
1378
1379	if (rx_event) {
1380		struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
1381
1382		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1383		writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
1384		if (agg_event) {
1385			writel(DB_KEY_RX | rxr->rx_agg_prod,
1386			       rxr->rx_agg_doorbell);
1387			writel(DB_KEY_RX | rxr->rx_agg_prod,
1388			       rxr->rx_agg_doorbell);
1389		}
1390	}
1391	return rx_pkts;
1392}
1393
1394static int bnxt_poll(struct napi_struct *napi, int budget)
1395{
1396	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1397	struct bnxt *bp = bnapi->bp;
1398	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1399	int work_done = 0;
1400
1401	if (!bnxt_lock_napi(bnapi))
1402		return budget;
1403
1404	while (1) {
1405		work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
1406
1407		if (work_done >= budget)
1408			break;
1409
1410		if (!bnxt_has_work(bp, cpr)) {
1411			napi_complete(napi);
1412			BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1413			break;
1414		}
1415	}
1416	mmiowb();
1417	bnxt_unlock_napi(bnapi);
1418	return work_done;
1419}
1420
1421#ifdef CONFIG_NET_RX_BUSY_POLL
1422static int bnxt_busy_poll(struct napi_struct *napi)
1423{
1424	struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
1425	struct bnxt *bp = bnapi->bp;
1426	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1427	int rx_work, budget = 4;
1428
1429	if (atomic_read(&bp->intr_sem) != 0)
1430		return LL_FLUSH_FAILED;
1431
1432	if (!bnxt_lock_poll(bnapi))
1433		return LL_FLUSH_BUSY;
1434
1435	rx_work = bnxt_poll_work(bp, bnapi, budget);
1436
1437	BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
1438
1439	bnxt_unlock_poll(bnapi);
1440	return rx_work;
1441}
1442#endif
1443
1444static void bnxt_free_tx_skbs(struct bnxt *bp)
1445{
1446	int i, max_idx;
1447	struct pci_dev *pdev = bp->pdev;
1448
1449	if (!bp->bnapi)
1450		return;
1451
1452	max_idx = bp->tx_nr_pages * TX_DESC_CNT;
1453	for (i = 0; i < bp->tx_nr_rings; i++) {
1454		struct bnxt_napi *bnapi = bp->bnapi[i];
1455		struct bnxt_tx_ring_info *txr;
1456		int j;
1457
1458		if (!bnapi)
1459			continue;
1460
1461		txr = &bnapi->tx_ring;
1462		for (j = 0; j < max_idx;) {
1463			struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
1464			struct sk_buff *skb = tx_buf->skb;
1465			int k, last;
1466
1467			if (!skb) {
1468				j++;
1469				continue;
1470			}
1471
1472			tx_buf->skb = NULL;
1473
1474			if (tx_buf->is_push) {
1475				dev_kfree_skb(skb);
1476				j += 2;
1477				continue;
1478			}
1479
1480			dma_unmap_single(&pdev->dev,
1481					 dma_unmap_addr(tx_buf, mapping),
1482					 skb_headlen(skb),
1483					 PCI_DMA_TODEVICE);
1484
1485			last = tx_buf->nr_frags;
1486			j += 2;
1487			for (k = 0; k < last; k++, j = NEXT_TX(j)) {
1488				skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
1489
1490				tx_buf = &txr->tx_buf_ring[j];
1491				dma_unmap_page(
1492					&pdev->dev,
1493					dma_unmap_addr(tx_buf, mapping),
1494					skb_frag_size(frag), PCI_DMA_TODEVICE);
1495			}
1496			dev_kfree_skb(skb);
1497		}
1498		netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
1499	}
1500}
1501
1502static void bnxt_free_rx_skbs(struct bnxt *bp)
1503{
1504	int i, max_idx, max_agg_idx;
1505	struct pci_dev *pdev = bp->pdev;
1506
1507	if (!bp->bnapi)
1508		return;
1509
1510	max_idx = bp->rx_nr_pages * RX_DESC_CNT;
1511	max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
1512	for (i = 0; i < bp->rx_nr_rings; i++) {
1513		struct bnxt_napi *bnapi = bp->bnapi[i];
1514		struct bnxt_rx_ring_info *rxr;
1515		int j;
1516
1517		if (!bnapi)
1518			continue;
1519
1520		rxr = &bnapi->rx_ring;
1521
1522		if (rxr->rx_tpa) {
1523			for (j = 0; j < MAX_TPA; j++) {
1524				struct bnxt_tpa_info *tpa_info =
1525							&rxr->rx_tpa[j];
1526				u8 *data = tpa_info->data;
1527
1528				if (!data)
1529					continue;
1530
1531				dma_unmap_single(
1532					&pdev->dev,
1533					dma_unmap_addr(tpa_info, mapping),
1534					bp->rx_buf_use_size,
1535					PCI_DMA_FROMDEVICE);
1536
1537				tpa_info->data = NULL;
1538
1539				kfree(data);
1540			}
1541		}
1542
1543		for (j = 0; j < max_idx; j++) {
1544			struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
1545			u8 *data = rx_buf->data;
1546
1547			if (!data)
1548				continue;
1549
1550			dma_unmap_single(&pdev->dev,
1551					 dma_unmap_addr(rx_buf, mapping),
1552					 bp->rx_buf_use_size,
1553					 PCI_DMA_FROMDEVICE);
1554
1555			rx_buf->data = NULL;
1556
1557			kfree(data);
1558		}
1559
1560		for (j = 0; j < max_agg_idx; j++) {
1561			struct bnxt_sw_rx_agg_bd *rx_agg_buf =
1562				&rxr->rx_agg_ring[j];
1563			struct page *page = rx_agg_buf->page;
1564
1565			if (!page)
1566				continue;
1567
1568			dma_unmap_page(&pdev->dev,
1569				       dma_unmap_addr(rx_agg_buf, mapping),
1570				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
1571
1572			rx_agg_buf->page = NULL;
1573			__clear_bit(j, rxr->rx_agg_bmap);
1574
1575			__free_page(page);
1576		}
1577	}
1578}
1579
1580static void bnxt_free_skbs(struct bnxt *bp)
1581{
1582	bnxt_free_tx_skbs(bp);
1583	bnxt_free_rx_skbs(bp);
1584}
1585
1586static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1587{
1588	struct pci_dev *pdev = bp->pdev;
1589	int i;
1590
1591	for (i = 0; i < ring->nr_pages; i++) {
1592		if (!ring->pg_arr[i])
1593			continue;
1594
1595		dma_free_coherent(&pdev->dev, ring->page_size,
1596				  ring->pg_arr[i], ring->dma_arr[i]);
1597
1598		ring->pg_arr[i] = NULL;
1599	}
1600	if (ring->pg_tbl) {
1601		dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
1602				  ring->pg_tbl, ring->pg_tbl_map);
1603		ring->pg_tbl = NULL;
1604	}
1605	if (ring->vmem_size && *ring->vmem) {
1606		vfree(*ring->vmem);
1607		*ring->vmem = NULL;
1608	}
1609}
1610
1611static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
1612{
1613	int i;
1614	struct pci_dev *pdev = bp->pdev;
1615
1616	if (ring->nr_pages > 1) {
1617		ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
1618						  ring->nr_pages * 8,
1619						  &ring->pg_tbl_map,
1620						  GFP_KERNEL);
1621		if (!ring->pg_tbl)
1622			return -ENOMEM;
1623	}
1624
1625	for (i = 0; i < ring->nr_pages; i++) {
1626		ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
1627						     ring->page_size,
1628						     &ring->dma_arr[i],
1629						     GFP_KERNEL);
1630		if (!ring->pg_arr[i])
1631			return -ENOMEM;
1632
1633		if (ring->nr_pages > 1)
1634			ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
1635	}
1636
1637	if (ring->vmem_size) {
1638		*ring->vmem = vzalloc(ring->vmem_size);
1639		if (!(*ring->vmem))
1640			return -ENOMEM;
1641	}
1642	return 0;
1643}
1644
1645static void bnxt_free_rx_rings(struct bnxt *bp)
1646{
1647	int i;
1648
1649	if (!bp->bnapi)
1650		return;
1651
1652	for (i = 0; i < bp->rx_nr_rings; i++) {
1653		struct bnxt_napi *bnapi = bp->bnapi[i];
1654		struct bnxt_rx_ring_info *rxr;
1655		struct bnxt_ring_struct *ring;
1656
1657		if (!bnapi)
1658			continue;
1659
1660		rxr = &bnapi->rx_ring;
1661
1662		kfree(rxr->rx_tpa);
1663		rxr->rx_tpa = NULL;
1664
1665		kfree(rxr->rx_agg_bmap);
1666		rxr->rx_agg_bmap = NULL;
1667
1668		ring = &rxr->rx_ring_struct;
1669		bnxt_free_ring(bp, ring);
1670
1671		ring = &rxr->rx_agg_ring_struct;
1672		bnxt_free_ring(bp, ring);
1673	}
1674}
1675
1676static int bnxt_alloc_rx_rings(struct bnxt *bp)
1677{
1678	int i, rc, agg_rings = 0, tpa_rings = 0;
1679
1680	if (bp->flags & BNXT_FLAG_AGG_RINGS)
1681		agg_rings = 1;
1682
1683	if (bp->flags & BNXT_FLAG_TPA)
1684		tpa_rings = 1;
1685
1686	for (i = 0; i < bp->rx_nr_rings; i++) {
1687		struct bnxt_napi *bnapi = bp->bnapi[i];
1688		struct bnxt_rx_ring_info *rxr;
1689		struct bnxt_ring_struct *ring;
1690
1691		if (!bnapi)
1692			continue;
1693
1694		rxr = &bnapi->rx_ring;
1695		ring = &rxr->rx_ring_struct;
1696
1697		rc = bnxt_alloc_ring(bp, ring);
1698		if (rc)
1699			return rc;
1700
1701		if (agg_rings) {
1702			u16 mem_size;
1703
1704			ring = &rxr->rx_agg_ring_struct;
1705			rc = bnxt_alloc_ring(bp, ring);
1706			if (rc)
1707				return rc;
1708
1709			rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
1710			mem_size = rxr->rx_agg_bmap_size / 8;
1711			rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
1712			if (!rxr->rx_agg_bmap)
1713				return -ENOMEM;
1714
1715			if (tpa_rings) {
1716				rxr->rx_tpa = kcalloc(MAX_TPA,
1717						sizeof(struct bnxt_tpa_info),
1718						GFP_KERNEL);
1719				if (!rxr->rx_tpa)
1720					return -ENOMEM;
1721			}
1722		}
1723	}
1724	return 0;
1725}
1726
1727static void bnxt_free_tx_rings(struct bnxt *bp)
1728{
1729	int i;
1730	struct pci_dev *pdev = bp->pdev;
1731
1732	if (!bp->bnapi)
1733		return;
1734
1735	for (i = 0; i < bp->tx_nr_rings; i++) {
1736		struct bnxt_napi *bnapi = bp->bnapi[i];
1737		struct bnxt_tx_ring_info *txr;
1738		struct bnxt_ring_struct *ring;
1739
1740		if (!bnapi)
1741			continue;
1742
1743		txr = &bnapi->tx_ring;
1744
1745		if (txr->tx_push) {
1746			dma_free_coherent(&pdev->dev, bp->tx_push_size,
1747					  txr->tx_push, txr->tx_push_mapping);
1748			txr->tx_push = NULL;
1749		}
1750
1751		ring = &txr->tx_ring_struct;
1752
1753		bnxt_free_ring(bp, ring);
1754	}
1755}
1756
1757static int bnxt_alloc_tx_rings(struct bnxt *bp)
1758{
1759	int i, j, rc;
1760	struct pci_dev *pdev = bp->pdev;
1761
1762	bp->tx_push_size = 0;
1763	if (bp->tx_push_thresh) {
1764		int push_size;
1765
1766		push_size  = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1767					bp->tx_push_thresh);
1768
1769		if (push_size > 128) {
1770			push_size = 0;
1771			bp->tx_push_thresh = 0;
1772		}
1773
1774		bp->tx_push_size = push_size;
1775	}
1776
1777	for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
1778		struct bnxt_napi *bnapi = bp->bnapi[i];
1779		struct bnxt_tx_ring_info *txr;
1780		struct bnxt_ring_struct *ring;
1781
1782		if (!bnapi)
1783			continue;
1784
1785		txr = &bnapi->tx_ring;
1786		ring = &txr->tx_ring_struct;
1787
1788		rc = bnxt_alloc_ring(bp, ring);
1789		if (rc)
1790			return rc;
1791
1792		if (bp->tx_push_size) {
1793			struct tx_bd *txbd;
1794			dma_addr_t mapping;
1795
1796			/* One pre-allocated DMA buffer to backup
1797			 * TX push operation
1798			 */
1799			txr->tx_push = dma_alloc_coherent(&pdev->dev,
1800						bp->tx_push_size,
1801						&txr->tx_push_mapping,
1802						GFP_KERNEL);
1803
1804			if (!txr->tx_push)
1805				return -ENOMEM;
1806
1807			txbd = &txr->tx_push->txbd1;
1808
1809			mapping = txr->tx_push_mapping +
1810				sizeof(struct tx_push_bd);
1811			txbd->tx_bd_haddr = cpu_to_le64(mapping);
1812
1813			memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
1814		}
1815		ring->queue_id = bp->q_info[j].queue_id;
1816		if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
1817			j++;
1818	}
1819	return 0;
1820}
1821
1822static void bnxt_free_cp_rings(struct bnxt *bp)
1823{
1824	int i;
1825
1826	if (!bp->bnapi)
1827		return;
1828
1829	for (i = 0; i < bp->cp_nr_rings; i++) {
1830		struct bnxt_napi *bnapi = bp->bnapi[i];
1831		struct bnxt_cp_ring_info *cpr;
1832		struct bnxt_ring_struct *ring;
1833
1834		if (!bnapi)
1835			continue;
1836
1837		cpr = &bnapi->cp_ring;
1838		ring = &cpr->cp_ring_struct;
1839
1840		bnxt_free_ring(bp, ring);
1841	}
1842}
1843
1844static int bnxt_alloc_cp_rings(struct bnxt *bp)
1845{
1846	int i, rc;
1847
1848	for (i = 0; i < bp->cp_nr_rings; i++) {
1849		struct bnxt_napi *bnapi = bp->bnapi[i];
1850		struct bnxt_cp_ring_info *cpr;
1851		struct bnxt_ring_struct *ring;
1852
1853		if (!bnapi)
1854			continue;
1855
1856		cpr = &bnapi->cp_ring;
1857		ring = &cpr->cp_ring_struct;
1858
1859		rc = bnxt_alloc_ring(bp, ring);
1860		if (rc)
1861			return rc;
1862	}
1863	return 0;
1864}
1865
1866static void bnxt_init_ring_struct(struct bnxt *bp)
1867{
1868	int i;
1869
1870	for (i = 0; i < bp->cp_nr_rings; i++) {
1871		struct bnxt_napi *bnapi = bp->bnapi[i];
1872		struct bnxt_cp_ring_info *cpr;
1873		struct bnxt_rx_ring_info *rxr;
1874		struct bnxt_tx_ring_info *txr;
1875		struct bnxt_ring_struct *ring;
1876
1877		if (!bnapi)
1878			continue;
1879
1880		cpr = &bnapi->cp_ring;
1881		ring = &cpr->cp_ring_struct;
1882		ring->nr_pages = bp->cp_nr_pages;
1883		ring->page_size = HW_CMPD_RING_SIZE;
1884		ring->pg_arr = (void **)cpr->cp_desc_ring;
1885		ring->dma_arr = cpr->cp_desc_mapping;
1886		ring->vmem_size = 0;
1887
1888		rxr = &bnapi->rx_ring;
1889		ring = &rxr->rx_ring_struct;
1890		ring->nr_pages = bp->rx_nr_pages;
1891		ring->page_size = HW_RXBD_RING_SIZE;
1892		ring->pg_arr = (void **)rxr->rx_desc_ring;
1893		ring->dma_arr = rxr->rx_desc_mapping;
1894		ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
1895		ring->vmem = (void **)&rxr->rx_buf_ring;
1896
1897		ring = &rxr->rx_agg_ring_struct;
1898		ring->nr_pages = bp->rx_agg_nr_pages;
1899		ring->page_size = HW_RXBD_RING_SIZE;
1900		ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
1901		ring->dma_arr = rxr->rx_agg_desc_mapping;
1902		ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
1903		ring->vmem = (void **)&rxr->rx_agg_ring;
1904
1905		txr = &bnapi->tx_ring;
1906		ring = &txr->tx_ring_struct;
1907		ring->nr_pages = bp->tx_nr_pages;
1908		ring->page_size = HW_RXBD_RING_SIZE;
1909		ring->pg_arr = (void **)txr->tx_desc_ring;
1910		ring->dma_arr = txr->tx_desc_mapping;
1911		ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
1912		ring->vmem = (void **)&txr->tx_buf_ring;
1913	}
1914}
1915
1916static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
1917{
1918	int i;
1919	u32 prod;
1920	struct rx_bd **rx_buf_ring;
1921
1922	rx_buf_ring = (struct rx_bd **)ring->pg_arr;
1923	for (i = 0, prod = 0; i < ring->nr_pages; i++) {
1924		int j;
1925		struct rx_bd *rxbd;
1926
1927		rxbd = rx_buf_ring[i];
1928		if (!rxbd)
1929			continue;
1930
1931		for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
1932			rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
1933			rxbd->rx_bd_opaque = prod;
1934		}
1935	}
1936}
1937
1938static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
1939{
1940	struct net_device *dev = bp->dev;
1941	struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
1942	struct bnxt_rx_ring_info *rxr;
1943	struct bnxt_ring_struct *ring;
1944	u32 prod, type;
1945	int i;
1946
1947	if (!bnapi)
1948		return -EINVAL;
1949
1950	type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
1951		RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
1952
1953	if (NET_IP_ALIGN == 2)
1954		type |= RX_BD_FLAGS_SOP;
1955
1956	rxr = &bnapi->rx_ring;
1957	ring = &rxr->rx_ring_struct;
1958	bnxt_init_rxbd_pages(ring, type);
1959
1960	prod = rxr->rx_prod;
1961	for (i = 0; i < bp->rx_ring_size; i++) {
1962		if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
1963			netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
1964				    ring_nr, i, bp->rx_ring_size);
1965			break;
1966		}
1967		prod = NEXT_RX(prod);
1968	}
1969	rxr->rx_prod = prod;
1970	ring->fw_ring_id = INVALID_HW_RING_ID;
1971
1972	if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
1973		return 0;
1974
1975	ring = &rxr->rx_agg_ring_struct;
1976
1977	type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
1978		RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
1979
1980	bnxt_init_rxbd_pages(ring, type);
1981
1982	prod = rxr->rx_agg_prod;
1983	for (i = 0; i < bp->rx_agg_ring_size; i++) {
1984		if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
1985			netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
1986				    ring_nr, i, bp->rx_ring_size);
1987			break;
1988		}
1989		prod = NEXT_RX_AGG(prod);
1990	}
1991	rxr->rx_agg_prod = prod;
1992	ring->fw_ring_id = INVALID_HW_RING_ID;
1993
1994	if (bp->flags & BNXT_FLAG_TPA) {
1995		if (rxr->rx_tpa) {
1996			u8 *data;
1997			dma_addr_t mapping;
1998
1999			for (i = 0; i < MAX_TPA; i++) {
2000				data = __bnxt_alloc_rx_data(bp, &mapping,
2001							    GFP_KERNEL);
2002				if (!data)
2003					return -ENOMEM;
2004
2005				rxr->rx_tpa[i].data = data;
2006				rxr->rx_tpa[i].mapping = mapping;
2007			}
2008		} else {
2009			netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2010			return -ENOMEM;
2011		}
2012	}
2013
2014	return 0;
2015}
2016
2017static int bnxt_init_rx_rings(struct bnxt *bp)
2018{
2019	int i, rc = 0;
2020
2021	for (i = 0; i < bp->rx_nr_rings; i++) {
2022		rc = bnxt_init_one_rx_ring(bp, i);
2023		if (rc)
2024			break;
2025	}
2026
2027	return rc;
2028}
2029
2030static int bnxt_init_tx_rings(struct bnxt *bp)
2031{
2032	u16 i;
2033
2034	bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2035				   MAX_SKB_FRAGS + 1);
2036
2037	for (i = 0; i < bp->tx_nr_rings; i++) {
2038		struct bnxt_napi *bnapi = bp->bnapi[i];
2039		struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
2040		struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2041
2042		ring->fw_ring_id = INVALID_HW_RING_ID;
2043	}
2044
2045	return 0;
2046}
2047
2048static void bnxt_free_ring_grps(struct bnxt *bp)
2049{
2050	kfree(bp->grp_info);
2051	bp->grp_info = NULL;
2052}
2053
2054static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2055{
2056	int i;
2057
2058	if (irq_re_init) {
2059		bp->grp_info = kcalloc(bp->cp_nr_rings,
2060				       sizeof(struct bnxt_ring_grp_info),
2061				       GFP_KERNEL);
2062		if (!bp->grp_info)
2063			return -ENOMEM;
2064	}
2065	for (i = 0; i < bp->cp_nr_rings; i++) {
2066		if (irq_re_init)
2067			bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2068		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2069		bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2070		bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2071		bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2072	}
2073	return 0;
2074}
2075
2076static void bnxt_free_vnics(struct bnxt *bp)
2077{
2078	kfree(bp->vnic_info);
2079	bp->vnic_info = NULL;
2080	bp->nr_vnics = 0;
2081}
2082
2083static int bnxt_alloc_vnics(struct bnxt *bp)
2084{
2085	int num_vnics = 1;
2086
2087#ifdef CONFIG_RFS_ACCEL
2088	if (bp->flags & BNXT_FLAG_RFS)
2089		num_vnics += bp->rx_nr_rings;
2090#endif
2091
2092	bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2093				GFP_KERNEL);
2094	if (!bp->vnic_info)
2095		return -ENOMEM;
2096
2097	bp->nr_vnics = num_vnics;
2098	return 0;
2099}
2100
2101static void bnxt_init_vnics(struct bnxt *bp)
2102{
2103	int i;
2104
2105	for (i = 0; i < bp->nr_vnics; i++) {
2106		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2107
2108		vnic->fw_vnic_id = INVALID_HW_RING_ID;
2109		vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
2110		vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
2111
2112		if (bp->vnic_info[i].rss_hash_key) {
2113			if (i == 0)
2114				prandom_bytes(vnic->rss_hash_key,
2115					      HW_HASH_KEY_SIZE);
2116			else
2117				memcpy(vnic->rss_hash_key,
2118				       bp->vnic_info[0].rss_hash_key,
2119				       HW_HASH_KEY_SIZE);
2120		}
2121	}
2122}
2123
2124static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
2125{
2126	int pages;
2127
2128	pages = ring_size / desc_per_pg;
2129
2130	if (!pages)
2131		return 1;
2132
2133	pages++;
2134
2135	while (pages & (pages - 1))
2136		pages++;
2137
2138	return pages;
2139}
2140
2141static void bnxt_set_tpa_flags(struct bnxt *bp)
2142{
2143	bp->flags &= ~BNXT_FLAG_TPA;
2144	if (bp->dev->features & NETIF_F_LRO)
2145		bp->flags |= BNXT_FLAG_LRO;
2146	if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
2147		bp->flags |= BNXT_FLAG_GRO;
2148}
2149
2150/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
2151 * be set on entry.
2152 */
2153void bnxt_set_ring_params(struct bnxt *bp)
2154{
2155	u32 ring_size, rx_size, rx_space;
2156	u32 agg_factor = 0, agg_ring_size = 0;
2157
2158	/* 8 for CRC and VLAN */
2159	rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
2160
2161	rx_space = rx_size + NET_SKB_PAD +
2162		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2163
2164	bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
2165	ring_size = bp->rx_ring_size;
2166	bp->rx_agg_ring_size = 0;
2167	bp->rx_agg_nr_pages = 0;
2168
2169	if (bp->flags & BNXT_FLAG_TPA)
2170		agg_factor = 4;
2171
2172	bp->flags &= ~BNXT_FLAG_JUMBO;
2173	if (rx_space > PAGE_SIZE) {
2174		u32 jumbo_factor;
2175
2176		bp->flags |= BNXT_FLAG_JUMBO;
2177		jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
2178		if (jumbo_factor > agg_factor)
2179			agg_factor = jumbo_factor;
2180	}
2181	agg_ring_size = ring_size * agg_factor;
2182
2183	if (agg_ring_size) {
2184		bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
2185							RX_DESC_CNT);
2186		if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
2187			u32 tmp = agg_ring_size;
2188
2189			bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
2190			agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
2191			netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
2192				    tmp, agg_ring_size);
2193		}
2194		bp->rx_agg_ring_size = agg_ring_size;
2195		bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
2196		rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
2197		rx_space = rx_size + NET_SKB_PAD +
2198			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2199	}
2200
2201	bp->rx_buf_use_size = rx_size;
2202	bp->rx_buf_size = rx_space;
2203
2204	bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
2205	bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
2206
2207	ring_size = bp->tx_ring_size;
2208	bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
2209	bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
2210
2211	ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
2212	bp->cp_ring_size = ring_size;
2213
2214	bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
2215	if (bp->cp_nr_pages > MAX_CP_PAGES) {
2216		bp->cp_nr_pages = MAX_CP_PAGES;
2217		bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
2218		netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
2219			    ring_size, bp->cp_ring_size);
2220	}
2221	bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
2222	bp->cp_ring_mask = bp->cp_bit - 1;
2223}
2224
2225static void bnxt_free_vnic_attributes(struct bnxt *bp)
2226{
2227	int i;
2228	struct bnxt_vnic_info *vnic;
2229	struct pci_dev *pdev = bp->pdev;
2230
2231	if (!bp->vnic_info)
2232		return;
2233
2234	for (i = 0; i < bp->nr_vnics; i++) {
2235		vnic = &bp->vnic_info[i];
2236
2237		kfree(vnic->fw_grp_ids);
2238		vnic->fw_grp_ids = NULL;
2239
2240		kfree(vnic->uc_list);
2241		vnic->uc_list = NULL;
2242
2243		if (vnic->mc_list) {
2244			dma_free_coherent(&pdev->dev, vnic->mc_list_size,
2245					  vnic->mc_list, vnic->mc_list_mapping);
2246			vnic->mc_list = NULL;
2247		}
2248
2249		if (vnic->rss_table) {
2250			dma_free_coherent(&pdev->dev, PAGE_SIZE,
2251					  vnic->rss_table,
2252					  vnic->rss_table_dma_addr);
2253			vnic->rss_table = NULL;
2254		}
2255
2256		vnic->rss_hash_key = NULL;
2257		vnic->flags = 0;
2258	}
2259}
2260
2261static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
2262{
2263	int i, rc = 0, size;
2264	struct bnxt_vnic_info *vnic;
2265	struct pci_dev *pdev = bp->pdev;
2266	int max_rings;
2267
2268	for (i = 0; i < bp->nr_vnics; i++) {
2269		vnic = &bp->vnic_info[i];
2270
2271		if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
2272			int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
2273
2274			if (mem_size > 0) {
2275				vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
2276				if (!vnic->uc_list) {
2277					rc = -ENOMEM;
2278					goto out;
2279				}
2280			}
2281		}
2282
2283		if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
2284			vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
2285			vnic->mc_list =
2286				dma_alloc_coherent(&pdev->dev,
2287						   vnic->mc_list_size,
2288						   &vnic->mc_list_mapping,
2289						   GFP_KERNEL);
2290			if (!vnic->mc_list) {
2291				rc = -ENOMEM;
2292				goto out;
2293			}
2294		}
2295
2296		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2297			max_rings = bp->rx_nr_rings;
2298		else
2299			max_rings = 1;
2300
2301		vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
2302		if (!vnic->fw_grp_ids) {
2303			rc = -ENOMEM;
2304			goto out;
2305		}
2306
2307		/* Allocate rss table and hash key */
2308		vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2309						     &vnic->rss_table_dma_addr,
2310						     GFP_KERNEL);
2311		if (!vnic->rss_table) {
2312			rc = -ENOMEM;
2313			goto out;
2314		}
2315
2316		size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
2317
2318		vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
2319		vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
2320	}
2321	return 0;
2322
2323out:
2324	return rc;
2325}
2326
2327static void bnxt_free_hwrm_resources(struct bnxt *bp)
2328{
2329	struct pci_dev *pdev = bp->pdev;
2330
2331	dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
2332			  bp->hwrm_cmd_resp_dma_addr);
2333
2334	bp->hwrm_cmd_resp_addr = NULL;
2335	if (bp->hwrm_dbg_resp_addr) {
2336		dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
2337				  bp->hwrm_dbg_resp_addr,
2338				  bp->hwrm_dbg_resp_dma_addr);
2339
2340		bp->hwrm_dbg_resp_addr = NULL;
2341	}
2342}
2343
2344static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
2345{
2346	struct pci_dev *pdev = bp->pdev;
2347
2348	bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
2349						   &bp->hwrm_cmd_resp_dma_addr,
2350						   GFP_KERNEL);
2351	if (!bp->hwrm_cmd_resp_addr)
2352		return -ENOMEM;
2353	bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
2354						    HWRM_DBG_REG_BUF_SIZE,
2355						    &bp->hwrm_dbg_resp_dma_addr,
2356						    GFP_KERNEL);
2357	if (!bp->hwrm_dbg_resp_addr)
2358		netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
2359
2360	return 0;
2361}
2362
2363static void bnxt_free_stats(struct bnxt *bp)
2364{
2365	u32 size, i;
2366	struct pci_dev *pdev = bp->pdev;
2367
2368	if (!bp->bnapi)
2369		return;
2370
2371	size = sizeof(struct ctx_hw_stats);
2372
2373	for (i = 0; i < bp->cp_nr_rings; i++) {
2374		struct bnxt_napi *bnapi = bp->bnapi[i];
2375		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2376
2377		if (cpr->hw_stats) {
2378			dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
2379					  cpr->hw_stats_map);
2380			cpr->hw_stats = NULL;
2381		}
2382	}
2383}
2384
2385static int bnxt_alloc_stats(struct bnxt *bp)
2386{
2387	u32 size, i;
2388	struct pci_dev *pdev = bp->pdev;
2389
2390	size = sizeof(struct ctx_hw_stats);
2391
2392	for (i = 0; i < bp->cp_nr_rings; i++) {
2393		struct bnxt_napi *bnapi = bp->bnapi[i];
2394		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2395
2396		cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
2397						   &cpr->hw_stats_map,
2398						   GFP_KERNEL);
2399		if (!cpr->hw_stats)
2400			return -ENOMEM;
2401
2402		cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
2403	}
2404	return 0;
2405}
2406
2407static void bnxt_clear_ring_indices(struct bnxt *bp)
2408{
2409	int i;
2410
2411	if (!bp->bnapi)
2412		return;
2413
2414	for (i = 0; i < bp->cp_nr_rings; i++) {
2415		struct bnxt_napi *bnapi = bp->bnapi[i];
2416		struct bnxt_cp_ring_info *cpr;
2417		struct bnxt_rx_ring_info *rxr;
2418		struct bnxt_tx_ring_info *txr;
2419
2420		if (!bnapi)
2421			continue;
2422
2423		cpr = &bnapi->cp_ring;
2424		cpr->cp_raw_cons = 0;
2425
2426		txr = &bnapi->tx_ring;
2427		txr->tx_prod = 0;
2428		txr->tx_cons = 0;
2429
2430		rxr = &bnapi->rx_ring;
2431		rxr->rx_prod = 0;
2432		rxr->rx_agg_prod = 0;
2433		rxr->rx_sw_agg_prod = 0;
2434	}
2435}
2436
2437static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
2438{
2439#ifdef CONFIG_RFS_ACCEL
2440	int i;
2441
2442	/* Under rtnl_lock and all our NAPIs have been disabled.  It's
2443	 * safe to delete the hash table.
2444	 */
2445	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
2446		struct hlist_head *head;
2447		struct hlist_node *tmp;
2448		struct bnxt_ntuple_filter *fltr;
2449
2450		head = &bp->ntp_fltr_hash_tbl[i];
2451		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
2452			hlist_del(&fltr->hash);
2453			kfree(fltr);
2454		}
2455	}
2456	if (irq_reinit) {
2457		kfree(bp->ntp_fltr_bmap);
2458		bp->ntp_fltr_bmap = NULL;
2459	}
2460	bp->ntp_fltr_count = 0;
2461#endif
2462}
2463
2464static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
2465{
2466#ifdef CONFIG_RFS_ACCEL
2467	int i, rc = 0;
2468
2469	if (!(bp->flags & BNXT_FLAG_RFS))
2470		return 0;
2471
2472	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
2473		INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
2474
2475	bp->ntp_fltr_count = 0;
2476	bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
2477				    GFP_KERNEL);
2478
2479	if (!bp->ntp_fltr_bmap)
2480		rc = -ENOMEM;
2481
2482	return rc;
2483#else
2484	return 0;
2485#endif
2486}
2487
2488static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
2489{
2490	bnxt_free_vnic_attributes(bp);
2491	bnxt_free_tx_rings(bp);
2492	bnxt_free_rx_rings(bp);
2493	bnxt_free_cp_rings(bp);
2494	bnxt_free_ntp_fltrs(bp, irq_re_init);
2495	if (irq_re_init) {
2496		bnxt_free_stats(bp);
2497		bnxt_free_ring_grps(bp);
2498		bnxt_free_vnics(bp);
2499		kfree(bp->bnapi);
2500		bp->bnapi = NULL;
2501	} else {
2502		bnxt_clear_ring_indices(bp);
2503	}
2504}
2505
2506static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
2507{
2508	int i, rc, size, arr_size;
2509	void *bnapi;
2510
2511	if (irq_re_init) {
2512		/* Allocate bnapi mem pointer array and mem block for
2513		 * all queues
2514		 */
2515		arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
2516				bp->cp_nr_rings);
2517		size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
2518		bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
2519		if (!bnapi)
2520			return -ENOMEM;
2521
2522		bp->bnapi = bnapi;
2523		bnapi += arr_size;
2524		for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
2525			bp->bnapi[i] = bnapi;
2526			bp->bnapi[i]->index = i;
2527			bp->bnapi[i]->bp = bp;
2528		}
2529
2530		rc = bnxt_alloc_stats(bp);
2531		if (rc)
2532			goto alloc_mem_err;
2533
2534		rc = bnxt_alloc_ntp_fltrs(bp);
2535		if (rc)
2536			goto alloc_mem_err;
2537
2538		rc = bnxt_alloc_vnics(bp);
2539		if (rc)
2540			goto alloc_mem_err;
2541	}
2542
2543	bnxt_init_ring_struct(bp);
2544
2545	rc = bnxt_alloc_rx_rings(bp);
2546	if (rc)
2547		goto alloc_mem_err;
2548
2549	rc = bnxt_alloc_tx_rings(bp);
2550	if (rc)
2551		goto alloc_mem_err;
2552
2553	rc = bnxt_alloc_cp_rings(bp);
2554	if (rc)
2555		goto alloc_mem_err;
2556
2557	bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
2558				  BNXT_VNIC_UCAST_FLAG;
2559	rc = bnxt_alloc_vnic_attributes(bp);
2560	if (rc)
2561		goto alloc_mem_err;
2562	return 0;
2563
2564alloc_mem_err:
2565	bnxt_free_mem(bp, true);
2566	return rc;
2567}
2568
2569void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
2570			    u16 cmpl_ring, u16 target_id)
2571{
2572	struct hwrm_cmd_req_hdr *req = request;
2573
2574	req->cmpl_ring_req_type =
2575		cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
2576	req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
2577	req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
2578}
2579
2580int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2581{
2582	int i, intr_process, rc;
2583	struct hwrm_cmd_req_hdr *req = msg;
2584	u32 *data = msg;
2585	__le32 *resp_len, *valid;
2586	u16 cp_ring_id, len = 0;
2587	struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
2588
2589	req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
2590	memset(resp, 0, PAGE_SIZE);
2591	cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
2592		      HWRM_CMPL_RING_MASK) >>
2593		     HWRM_CMPL_RING_SFT;
2594	intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
2595
2596	/* Write request msg to hwrm channel */
2597	__iowrite32_copy(bp->bar0, data, msg_len / 4);
2598
2599	/* currently supports only one outstanding message */
2600	if (intr_process)
2601		bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
2602				       HWRM_SEQ_ID_MASK;
2603
2604	/* Ring channel doorbell */
2605	writel(1, bp->bar0 + 0x100);
2606
2607	i = 0;
2608	if (intr_process) {
2609		/* Wait until hwrm response cmpl interrupt is processed */
2610		while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
2611		       i++ < timeout) {
2612			usleep_range(600, 800);
2613		}
2614
2615		if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
2616			netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
2617				   req->cmpl_ring_req_type);
2618			return -1;
2619		}
2620	} else {
2621		/* Check if response len is updated */
2622		resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
2623		for (i = 0; i < timeout; i++) {
2624			len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
2625			      HWRM_RESP_LEN_SFT;
2626			if (len)
2627				break;
2628			usleep_range(600, 800);
2629		}
2630
2631		if (i >= timeout) {
2632			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
2633				   timeout, req->cmpl_ring_req_type,
2634				   req->target_id_seq_id, *resp_len);
2635			return -1;
2636		}
2637
2638		/* Last word of resp contains valid bit */
2639		valid = bp->hwrm_cmd_resp_addr + len - 4;
2640		for (i = 0; i < timeout; i++) {
2641			if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
2642				break;
2643			usleep_range(600, 800);
2644		}
2645
2646		if (i >= timeout) {
2647			netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
2648				   timeout, req->cmpl_ring_req_type,
2649				   req->target_id_seq_id, len, *valid);
2650			return -1;
2651		}
2652	}
2653
2654	rc = le16_to_cpu(resp->error_code);
2655	if (rc) {
2656		netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
2657			   le16_to_cpu(resp->req_type),
2658			   le16_to_cpu(resp->seq_id), rc);
2659		return rc;
2660	}
2661	return 0;
2662}
2663
2664int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
2665{
2666	int rc;
2667
2668	mutex_lock(&bp->hwrm_cmd_lock);
2669	rc = _hwrm_send_message(bp, msg, msg_len, timeout);
2670	mutex_unlock(&bp->hwrm_cmd_lock);
2671	return rc;
2672}
2673
2674static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
2675{
2676	struct hwrm_func_drv_rgtr_input req = {0};
2677	int i;
2678
2679	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
2680
2681	req.enables =
2682		cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
2683			    FUNC_DRV_RGTR_REQ_ENABLES_VER |
2684			    FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
2685
2686	/* TODO: current async event fwd bits are not defined and the firmware
2687	 * only checks if it is non-zero to enable async event forwarding
2688	 */
2689	req.async_event_fwd[0] |= cpu_to_le32(1);
2690	req.os_type = cpu_to_le16(1);
2691	req.ver_maj = DRV_VER_MAJ;
2692	req.ver_min = DRV_VER_MIN;
2693	req.ver_upd = DRV_VER_UPD;
2694
2695	if (BNXT_PF(bp)) {
2696		DECLARE_BITMAP(vf_req_snif_bmap, 256);
2697		u32 *data = (u32 *)vf_req_snif_bmap;
2698
2699		memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
2700		for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
2701			__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
2702
2703		for (i = 0; i < 8; i++)
2704			req.vf_req_fwd[i] = cpu_to_le32(data[i]);
2705
2706		req.enables |=
2707			cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
2708	}
2709
2710	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2711}
2712
2713static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
2714{
2715	u32 rc = 0;
2716	struct hwrm_tunnel_dst_port_free_input req = {0};
2717
2718	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
2719	req.tunnel_type = tunnel_type;
2720
2721	switch (tunnel_type) {
2722	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
2723		req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
2724		break;
2725	case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
2726		req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
2727		break;
2728	default:
2729		break;
2730	}
2731
2732	rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2733	if (rc)
2734		netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
2735			   rc);
2736	return rc;
2737}
2738
2739static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
2740					   u8 tunnel_type)
2741{
2742	u32 rc = 0;
2743	struct hwrm_tunnel_dst_port_alloc_input req = {0};
2744	struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2745
2746	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
2747
2748	req.tunnel_type = tunnel_type;
2749	req.tunnel_dst_port_val = port;
2750
2751	mutex_lock(&bp->hwrm_cmd_lock);
2752	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2753	if (rc) {
2754		netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
2755			   rc);
2756		goto err_out;
2757	}
2758
2759	if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
2760		bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2761
2762	else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
2763		bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
2764err_out:
2765	mutex_unlock(&bp->hwrm_cmd_lock);
2766	return rc;
2767}
2768
2769static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
2770{
2771	struct hwrm_cfa_l2_set_rx_mask_input req = {0};
2772	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2773
2774	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
2775	req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
2776
2777	req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
2778	req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
2779	req.mask = cpu_to_le32(vnic->rx_mask);
2780	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2781}
2782
2783#ifdef CONFIG_RFS_ACCEL
2784static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
2785					    struct bnxt_ntuple_filter *fltr)
2786{
2787	struct hwrm_cfa_ntuple_filter_free_input req = {0};
2788
2789	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
2790	req.ntuple_filter_id = fltr->filter_id;
2791	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2792}
2793
2794#define BNXT_NTP_FLTR_FLAGS					\
2795	(CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID |	\
2796	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE |	\
2797	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR |	\
2798	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE |	\
2799	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |	\
2800	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK |	\
2801	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |	\
2802	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK |	\
2803	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL |	\
2804	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT |		\
2805	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK |	\
2806	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT |		\
2807	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK |	\
2808	 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
2809
2810static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
2811					     struct bnxt_ntuple_filter *fltr)
2812{
2813	int rc = 0;
2814	struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
2815	struct hwrm_cfa_ntuple_filter_alloc_output *resp =
2816		bp->hwrm_cmd_resp_addr;
2817	struct flow_keys *keys = &fltr->fkeys;
2818	struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
2819
2820	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
2821	req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
2822
2823	req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
2824
2825	req.ethertype = htons(ETH_P_IP);
2826	memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
2827	req.ipaddr_type = 4;
2828	req.ip_protocol = keys->basic.ip_proto;
2829
2830	req.src_ipaddr[0] = keys->addrs.v4addrs.src;
2831	req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2832	req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
2833	req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
2834
2835	req.src_port = keys->ports.src;
2836	req.src_port_mask = cpu_to_be16(0xffff);
2837	req.dst_port = keys->ports.dst;
2838	req.dst_port_mask = cpu_to_be16(0xffff);
2839
2840	req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
2841	mutex_lock(&bp->hwrm_cmd_lock);
2842	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2843	if (!rc)
2844		fltr->filter_id = resp->ntuple_filter_id;
2845	mutex_unlock(&bp->hwrm_cmd_lock);
2846	return rc;
2847}
2848#endif
2849
2850static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
2851				     u8 *mac_addr)
2852{
2853	u32 rc = 0;
2854	struct hwrm_cfa_l2_filter_alloc_input req = {0};
2855	struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2856
2857	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
2858	req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
2859				CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
2860	req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
2861	req.enables =
2862		cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
2863			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
2864			    CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
2865	memcpy(req.l2_addr, mac_addr, ETH_ALEN);
2866	req.l2_addr_mask[0] = 0xff;
2867	req.l2_addr_mask[1] = 0xff;
2868	req.l2_addr_mask[2] = 0xff;
2869	req.l2_addr_mask[3] = 0xff;
2870	req.l2_addr_mask[4] = 0xff;
2871	req.l2_addr_mask[5] = 0xff;
2872
2873	mutex_lock(&bp->hwrm_cmd_lock);
2874	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2875	if (!rc)
2876		bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
2877							resp->l2_filter_id;
2878	mutex_unlock(&bp->hwrm_cmd_lock);
2879	return rc;
2880}
2881
2882static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
2883{
2884	u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
2885	int rc = 0;
2886
2887	/* Any associated ntuple filters will also be cleared by firmware. */
2888	mutex_lock(&bp->hwrm_cmd_lock);
2889	for (i = 0; i < num_of_vnics; i++) {
2890		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
2891
2892		for (j = 0; j < vnic->uc_filter_count; j++) {
2893			struct hwrm_cfa_l2_filter_free_input req = {0};
2894
2895			bnxt_hwrm_cmd_hdr_init(bp, &req,
2896					       HWRM_CFA_L2_FILTER_FREE, -1, -1);
2897
2898			req.l2_filter_id = vnic->fw_l2_filter_id[j];
2899
2900			rc = _hwrm_send_message(bp, &req, sizeof(req),
2901						HWRM_CMD_TIMEOUT);
2902		}
2903		vnic->uc_filter_count = 0;
2904	}
2905	mutex_unlock(&bp->hwrm_cmd_lock);
2906
2907	return rc;
2908}
2909
2910static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
2911{
2912	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2913	struct hwrm_vnic_tpa_cfg_input req = {0};
2914
2915	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
2916
2917	if (tpa_flags) {
2918		u16 mss = bp->dev->mtu - 40;
2919		u32 nsegs, n, segs = 0, flags;
2920
2921		flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
2922			VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
2923			VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
2924			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
2925			VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
2926		if (tpa_flags & BNXT_FLAG_GRO)
2927			flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
2928
2929		req.flags = cpu_to_le32(flags);
2930
2931		req.enables =
2932			cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
2933				    VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
2934
2935		/* Number of segs are log2 units, and first packet is not
2936		 * included as part of this units.
2937		 */
2938		if (mss <= PAGE_SIZE) {
2939			n = PAGE_SIZE / mss;
2940			nsegs = (MAX_SKB_FRAGS - 1) * n;
2941		} else {
2942			n = mss / PAGE_SIZE;
2943			if (mss & (PAGE_SIZE - 1))
2944				n++;
2945			nsegs = (MAX_SKB_FRAGS - n) / n;
2946		}
2947
2948		segs = ilog2(nsegs);
2949		req.max_agg_segs = cpu_to_le16(segs);
2950		req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
2951	}
2952	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
2953
2954	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2955}
2956
2957static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
2958{
2959	u32 i, j, max_rings;
2960	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2961	struct hwrm_vnic_rss_cfg_input req = {0};
2962
2963	if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
2964		return 0;
2965
2966	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
2967	if (set_rss) {
2968		vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
2969				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
2970				 BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
2971				 BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
2972
2973		req.hash_type = cpu_to_le32(vnic->hash_type);
2974
2975		if (vnic->flags & BNXT_VNIC_RSS_FLAG)
2976			max_rings = bp->rx_nr_rings;
2977		else
2978			max_rings = 1;
2979
2980		/* Fill the RSS indirection table with ring group ids */
2981		for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
2982			if (j == max_rings)
2983				j = 0;
2984			vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
2985		}
2986
2987		req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
2988		req.hash_key_tbl_addr =
2989			cpu_to_le64(vnic->rss_hash_key_dma_addr);
2990	}
2991	req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
2992	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2993}
2994
2995static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
2996{
2997	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
2998	struct hwrm_vnic_plcmodes_cfg_input req = {0};
2999
3000	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
3001	req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
3002				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
3003				VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
3004	req.enables =
3005		cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
3006			    VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
3007	/* thresholds not implemented in firmware yet */
3008	req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
3009	req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
3010	req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
3011	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3012}
3013
3014static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
3015{
3016	struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
3017
3018	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
3019	req.rss_cos_lb_ctx_id =
3020		cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
3021
3022	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3023	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3024}
3025
3026static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
3027{
3028	int i;
3029
3030	for (i = 0; i < bp->nr_vnics; i++) {
3031		struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
3032
3033		if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
3034			bnxt_hwrm_vnic_ctx_free_one(bp, i);
3035	}
3036	bp->rsscos_nr_ctxs = 0;
3037}
3038
3039static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
3040{
3041	int rc;
3042	struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
3043	struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
3044						bp->hwrm_cmd_resp_addr;
3045
3046	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
3047			       -1);
3048
3049	mutex_lock(&bp->hwrm_cmd_lock);
3050	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3051	if (!rc)
3052		bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
3053			le16_to_cpu(resp->rss_cos_lb_ctx_id);
3054	mutex_unlock(&bp->hwrm_cmd_lock);
3055
3056	return rc;
3057}
3058
3059static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
3060{
3061	int grp_idx = 0;
3062	struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3063	struct hwrm_vnic_cfg_input req = {0};
3064
3065	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
3066	/* Only RSS support for now TBD: COS & LB */
3067	req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
3068				  VNIC_CFG_REQ_ENABLES_RSS_RULE);
3069	req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
3070	req.cos_rule = cpu_to_le16(0xffff);
3071	if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3072		grp_idx = 0;
3073	else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
3074		grp_idx = vnic_id - 1;
3075
3076	req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
3077	req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
3078
3079	req.lb_rule = cpu_to_le16(0xffff);
3080	req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
3081			      VLAN_HLEN);
3082
3083	if (bp->flags & BNXT_FLAG_STRIP_VLAN)
3084		req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
3085
3086	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3087}
3088
3089static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
3090{
3091	u32 rc = 0;
3092
3093	if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
3094		struct hwrm_vnic_free_input req = {0};
3095
3096		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
3097		req.vnic_id =
3098			cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
3099
3100		rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3101		if (rc)
3102			return rc;
3103		bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
3104	}
3105	return rc;
3106}
3107
3108static void bnxt_hwrm_vnic_free(struct bnxt *bp)
3109{
3110	u16 i;
3111
3112	for (i = 0; i < bp->nr_vnics; i++)
3113		bnxt_hwrm_vnic_free_one(bp, i);
3114}
3115
3116static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
3117				u16 end_grp_id)
3118{
3119	u32 rc = 0, i, j;
3120	struct hwrm_vnic_alloc_input req = {0};
3121	struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3122
3123	/* map ring groups to this vnic */
3124	for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
3125		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
3126			netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
3127				   j, (end_grp_id - start_grp_id));
3128			break;
3129		}
3130		bp->vnic_info[vnic_id].fw_grp_ids[j] =
3131					bp->grp_info[i].fw_grp_id;
3132	}
3133
3134	bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
3135	if (vnic_id == 0)
3136		req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
3137
3138	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
3139
3140	mutex_lock(&bp->hwrm_cmd_lock);
3141	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3142	if (!rc)
3143		bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
3144	mutex_unlock(&bp->hwrm_cmd_lock);
3145	return rc;
3146}
3147
3148static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
3149{
3150	u16 i;
3151	u32 rc = 0;
3152
3153	mutex_lock(&bp->hwrm_cmd_lock);
3154	for (i = 0; i < bp->rx_nr_rings; i++) {
3155		struct hwrm_ring_grp_alloc_input req = {0};
3156		struct hwrm_ring_grp_alloc_output *resp =
3157					bp->hwrm_cmd_resp_addr;
3158
3159		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
3160
3161		req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3162		req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
3163		req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
3164		req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
3165
3166		rc = _hwrm_send_message(bp, &req, sizeof(req),
3167					HWRM_CMD_TIMEOUT);
3168		if (rc)
3169			break;
3170
3171		bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
3172	}
3173	mutex_unlock(&bp->hwrm_cmd_lock);
3174	return rc;
3175}
3176
3177static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
3178{
3179	u16 i;
3180	u32 rc = 0;
3181	struct hwrm_ring_grp_free_input req = {0};
3182
3183	if (!bp->grp_info)
3184		return 0;
3185
3186	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
3187
3188	mutex_lock(&bp->hwrm_cmd_lock);
3189	for (i = 0; i < bp->cp_nr_rings; i++) {
3190		if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
3191			continue;
3192		req.ring_group_id =
3193			cpu_to_le32(bp->grp_info[i].fw_grp_id);
3194
3195		rc = _hwrm_send_message(bp, &req, sizeof(req),
3196					HWRM_CMD_TIMEOUT);
3197		if (rc)
3198			break;
3199		bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
3200	}
3201	mutex_unlock(&bp->hwrm_cmd_lock);
3202	return rc;
3203}
3204
3205static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
3206				    struct bnxt_ring_struct *ring,
3207				    u32 ring_type, u32 map_index,
3208				    u32 stats_ctx_id)
3209{
3210	int rc = 0, err = 0;
3211	struct hwrm_ring_alloc_input req = {0};
3212	struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3213	u16 ring_id;
3214
3215	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
3216
3217	req.enables = 0;
3218	if (ring->nr_pages > 1) {
3219		req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
3220		/* Page size is in log2 units */
3221		req.page_size = BNXT_PAGE_SHIFT;
3222		req.page_tbl_depth = 1;
3223	} else {
3224		req.page_tbl_addr =  cpu_to_le64(ring->dma_arr[0]);
3225	}
3226	req.fbo = 0;
3227	/* Association of ring index with doorbell index and MSIX number */
3228	req.logical_id = cpu_to_le16(map_index);
3229
3230	switch (ring_type) {
3231	case HWRM_RING_ALLOC_TX:
3232		req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
3233		/* Association of transmit ring with completion ring */
3234		req.cmpl_ring_id =
3235			cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
3236		req.length = cpu_to_le32(bp->tx_ring_mask + 1);
3237		req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
3238		req.queue_id = cpu_to_le16(ring->queue_id);
3239		break;
3240	case HWRM_RING_ALLOC_RX:
3241		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3242		req.length = cpu_to_le32(bp->rx_ring_mask + 1);
3243		break;
3244	case HWRM_RING_ALLOC_AGG:
3245		req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
3246		req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
3247		break;
3248	case HWRM_RING_ALLOC_CMPL:
3249		req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
3250		req.length = cpu_to_le32(bp->cp_ring_mask + 1);
3251		if (bp->flags & BNXT_FLAG_USING_MSIX)
3252			req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
3253		break;
3254	default:
3255		netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
3256			   ring_type);
3257		return -1;
3258	}
3259
3260	mutex_lock(&bp->hwrm_cmd_lock);
3261	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3262	err = le16_to_cpu(resp->error_code);
3263	ring_id = le16_to_cpu(resp->ring_id);
3264	mutex_unlock(&bp->hwrm_cmd_lock);
3265
3266	if (rc || err) {
3267		switch (ring_type) {
3268		case RING_FREE_REQ_RING_TYPE_CMPL:
3269			netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
3270				   rc, err);
3271			return -1;
3272
3273		case RING_FREE_REQ_RING_TYPE_RX:
3274			netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
3275				   rc, err);
3276			return -1;
3277
3278		case RING_FREE_REQ_RING_TYPE_TX:
3279			netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
3280				   rc, err);
3281			return -1;
3282
3283		default:
3284			netdev_err(bp->dev, "Invalid ring\n");
3285			return -1;
3286		}
3287	}
3288	ring->fw_ring_id = ring_id;
3289	return rc;
3290}
3291
3292static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
3293{
3294	int i, rc = 0;
3295
3296	if (bp->cp_nr_rings) {
3297		for (i = 0; i < bp->cp_nr_rings; i++) {
3298			struct bnxt_napi *bnapi = bp->bnapi[i];
3299			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3300			struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3301
3302			rc = hwrm_ring_alloc_send_msg(bp, ring,
3303						      HWRM_RING_ALLOC_CMPL, i,
3304						      INVALID_STATS_CTX_ID);
3305			if (rc)
3306				goto err_out;
3307			cpr->cp_doorbell = bp->bar1 + i * 0x80;
3308			BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3309			bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
3310		}
3311	}
3312
3313	if (bp->tx_nr_rings) {
3314		for (i = 0; i < bp->tx_nr_rings; i++) {
3315			struct bnxt_napi *bnapi = bp->bnapi[i];
3316			struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
3317			struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3318			u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
3319
3320			rc = hwrm_ring_alloc_send_msg(bp, ring,
3321						      HWRM_RING_ALLOC_TX, i,
3322						      fw_stats_ctx);
3323			if (rc)
3324				goto err_out;
3325			txr->tx_doorbell = bp->bar1 + i * 0x80;
3326		}
3327	}
3328
3329	if (bp->rx_nr_rings) {
3330		for (i = 0; i < bp->rx_nr_rings; i++) {
3331			struct bnxt_napi *bnapi = bp->bnapi[i];
3332			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3333			struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3334
3335			rc = hwrm_ring_alloc_send_msg(bp, ring,
3336						      HWRM_RING_ALLOC_RX, i,
3337						      INVALID_STATS_CTX_ID);
3338			if (rc)
3339				goto err_out;
3340			rxr->rx_doorbell = bp->bar1 + i * 0x80;
3341			writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
3342			bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
3343		}
3344	}
3345
3346	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3347		for (i = 0; i < bp->rx_nr_rings; i++) {
3348			struct bnxt_napi *bnapi = bp->bnapi[i];
3349			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3350			struct bnxt_ring_struct *ring =
3351						&rxr->rx_agg_ring_struct;
3352
3353			rc = hwrm_ring_alloc_send_msg(bp, ring,
3354						      HWRM_RING_ALLOC_AGG,
3355						      bp->rx_nr_rings + i,
3356						      INVALID_STATS_CTX_ID);
3357			if (rc)
3358				goto err_out;
3359
3360			rxr->rx_agg_doorbell =
3361				bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
3362			writel(DB_KEY_RX | rxr->rx_agg_prod,
3363			       rxr->rx_agg_doorbell);
3364			bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
3365		}
3366	}
3367err_out:
3368	return rc;
3369}
3370
3371static int hwrm_ring_free_send_msg(struct bnxt *bp,
3372				   struct bnxt_ring_struct *ring,
3373				   u32 ring_type, int cmpl_ring_id)
3374{
3375	int rc;
3376	struct hwrm_ring_free_input req = {0};
3377	struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
3378	u16 error_code;
3379
3380	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
3381	req.ring_type = ring_type;
3382	req.ring_id = cpu_to_le16(ring->fw_ring_id);
3383
3384	mutex_lock(&bp->hwrm_cmd_lock);
3385	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3386	error_code = le16_to_cpu(resp->error_code);
3387	mutex_unlock(&bp->hwrm_cmd_lock);
3388
3389	if (rc || error_code) {
3390		switch (ring_type) {
3391		case RING_FREE_REQ_RING_TYPE_CMPL:
3392			netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
3393				   rc);
3394			return rc;
3395		case RING_FREE_REQ_RING_TYPE_RX:
3396			netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
3397				   rc);
3398			return rc;
3399		case RING_FREE_REQ_RING_TYPE_TX:
3400			netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
3401				   rc);
3402			return rc;
3403		default:
3404			netdev_err(bp->dev, "Invalid ring\n");
3405			return -1;
3406		}
3407	}
3408	return 0;
3409}
3410
3411static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
3412{
3413	int i, rc = 0;
3414
3415	if (!bp->bnapi)
3416		return 0;
3417
3418	if (bp->tx_nr_rings) {
3419		for (i = 0; i < bp->tx_nr_rings; i++) {
3420			struct bnxt_napi *bnapi = bp->bnapi[i];
3421			struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
3422			struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
3423			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3424
3425			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3426				hwrm_ring_free_send_msg(
3427					bp, ring,
3428					RING_FREE_REQ_RING_TYPE_TX,
3429					close_path ? cmpl_ring_id :
3430					INVALID_HW_RING_ID);
3431				ring->fw_ring_id = INVALID_HW_RING_ID;
3432			}
3433		}
3434	}
3435
3436	if (bp->rx_nr_rings) {
3437		for (i = 0; i < bp->rx_nr_rings; i++) {
3438			struct bnxt_napi *bnapi = bp->bnapi[i];
3439			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3440			struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
3441			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3442
3443			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3444				hwrm_ring_free_send_msg(
3445					bp, ring,
3446					RING_FREE_REQ_RING_TYPE_RX,
3447					close_path ? cmpl_ring_id :
3448					INVALID_HW_RING_ID);
3449				ring->fw_ring_id = INVALID_HW_RING_ID;
3450				bp->grp_info[i].rx_fw_ring_id =
3451					INVALID_HW_RING_ID;
3452			}
3453		}
3454	}
3455
3456	if (bp->rx_agg_nr_pages) {
3457		for (i = 0; i < bp->rx_nr_rings; i++) {
3458			struct bnxt_napi *bnapi = bp->bnapi[i];
3459			struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
3460			struct bnxt_ring_struct *ring =
3461						&rxr->rx_agg_ring_struct;
3462			u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
3463
3464			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3465				hwrm_ring_free_send_msg(
3466					bp, ring,
3467					RING_FREE_REQ_RING_TYPE_RX,
3468					close_path ? cmpl_ring_id :
3469					INVALID_HW_RING_ID);
3470				ring->fw_ring_id = INVALID_HW_RING_ID;
3471				bp->grp_info[i].agg_fw_ring_id =
3472					INVALID_HW_RING_ID;
3473			}
3474		}
3475	}
3476
3477	if (bp->cp_nr_rings) {
3478		for (i = 0; i < bp->cp_nr_rings; i++) {
3479			struct bnxt_napi *bnapi = bp->bnapi[i];
3480			struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3481			struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
3482
3483			if (ring->fw_ring_id != INVALID_HW_RING_ID) {
3484				hwrm_ring_free_send_msg(
3485					bp, ring,
3486					RING_FREE_REQ_RING_TYPE_CMPL,
3487					INVALID_HW_RING_ID);
3488				ring->fw_ring_id = INVALID_HW_RING_ID;
3489				bp->grp_info[i].cp_fw_ring_id =
3490							INVALID_HW_RING_ID;
3491			}
3492		}
3493	}
3494
3495	return rc;
3496}
3497
3498int bnxt_hwrm_set_coal(struct bnxt *bp)
3499{
3500	int i, rc = 0;
3501	struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
3502	u16 max_buf, max_buf_irq;
3503	u16 buf_tmr, buf_tmr_irq;
3504	u32 flags;
3505
3506	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
3507			       -1, -1);
3508
3509	/* Each rx completion (2 records) should be DMAed immediately */
3510	max_buf = min_t(u16, bp->coal_bufs / 4, 2);
3511	/* max_buf must not be zero */
3512	max_buf = clamp_t(u16, max_buf, 1, 63);
3513	max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
3514	buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
3515	buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
3516
3517	flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
3518
3519	/* RING_IDLE generates more IRQs for lower latency.  Enable it only
3520	 * if coal_ticks is less than 25 us.
3521	 */
3522	if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
3523		flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
3524
3525	req.flags = cpu_to_le16(flags);
3526	req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
3527	req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
3528	req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
3529	req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
3530	req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
3531	req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
3532	req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
3533
3534	mutex_lock(&bp->hwrm_cmd_lock);
3535	for (i = 0; i < bp->cp_nr_rings; i++) {
3536		req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
3537
3538		rc = _hwrm_send_message(bp, &req, sizeof(req),
3539					HWRM_CMD_TIMEOUT);
3540		if (rc)
3541			break;
3542	}
3543	mutex_unlock(&bp->hwrm_cmd_lock);
3544	return rc;
3545}
3546
3547static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
3548{
3549	int rc = 0, i;
3550	struct hwrm_stat_ctx_free_input req = {0};
3551
3552	if (!bp->bnapi)
3553		return 0;
3554
3555	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
3556
3557	mutex_lock(&bp->hwrm_cmd_lock);
3558	for (i = 0; i < bp->cp_nr_rings; i++) {
3559		struct bnxt_napi *bnapi = bp->bnapi[i];
3560		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3561
3562		if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
3563			req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
3564
3565			rc = _hwrm_send_message(bp, &req, sizeof(req),
3566						HWRM_CMD_TIMEOUT);
3567			if (rc)
3568				break;
3569
3570			cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3571		}
3572	}
3573	mutex_unlock(&bp->hwrm_cmd_lock);
3574	return rc;
3575}
3576
3577static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
3578{
3579	int rc = 0, i;
3580	struct hwrm_stat_ctx_alloc_input req = {0};
3581	struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3582
3583	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
3584
3585	req.update_period_ms = cpu_to_le32(1000);
3586
3587	mutex_lock(&bp->hwrm_cmd_lock);
3588	for (i = 0; i < bp->cp_nr_rings; i++) {
3589		struct bnxt_napi *bnapi = bp->bnapi[i];
3590		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3591
3592		req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
3593
3594		rc = _hwrm_send_message(bp, &req, sizeof(req),
3595					HWRM_CMD_TIMEOUT);
3596		if (rc)
3597			break;
3598
3599		cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
3600
3601		bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
3602	}
3603	mutex_unlock(&bp->hwrm_cmd_lock);
3604	return 0;
3605}
3606
3607static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
3608{
3609	int rc = 0;
3610	struct hwrm_func_qcaps_input req = {0};
3611	struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3612
3613	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
3614	req.fid = cpu_to_le16(0xffff);
3615
3616	mutex_lock(&bp->hwrm_cmd_lock);
3617	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3618	if (rc)
3619		goto hwrm_func_qcaps_exit;
3620
3621	if (BNXT_PF(bp)) {
3622		struct bnxt_pf_info *pf = &bp->pf;
3623
3624		pf->fw_fid = le16_to_cpu(resp->fid);
3625		pf->port_id = le16_to_cpu(resp->port_id);
3626		memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3627		memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
3628		pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3629		pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3630		pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3631		pf->max_pf_tx_rings = pf->max_tx_rings;
3632		pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3633		pf->max_pf_rx_rings = pf->max_rx_rings;
3634		pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3635		pf->max_vnics = le16_to_cpu(resp->max_vnics);
3636		pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3637		pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
3638		pf->max_vfs = le16_to_cpu(resp->max_vfs);
3639		pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
3640		pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
3641		pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
3642		pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
3643		pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
3644		pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
3645	} else {
3646#ifdef CONFIG_BNXT_SRIOV
3647		struct bnxt_vf_info *vf = &bp->vf;
3648
3649		vf->fw_fid = le16_to_cpu(resp->fid);
3650		memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
3651		if (is_valid_ether_addr(vf->mac_addr))
3652			/* overwrite netdev dev_adr with admin VF MAC */
3653			memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
3654		else
3655			random_ether_addr(bp->dev->dev_addr);
3656
3657		vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
3658		vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
3659		vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
3660		vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
3661		vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
3662		vf->max_vnics = le16_to_cpu(resp->max_vnics);
3663		vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
3664#endif
3665	}
3666
3667	bp->tx_push_thresh = 0;
3668	if (resp->flags &
3669	    cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
3670		bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
3671
3672hwrm_func_qcaps_exit:
3673	mutex_unlock(&bp->hwrm_cmd_lock);
3674	return rc;
3675}
3676
3677static int bnxt_hwrm_func_reset(struct bnxt *bp)
3678{
3679	struct hwrm_func_reset_input req = {0};
3680
3681	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
3682	req.enables = 0;
3683
3684	return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
3685}
3686
3687static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
3688{
3689	int rc = 0;
3690	struct hwrm_queue_qportcfg_input req = {0};
3691	struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
3692	u8 i, *qptr;
3693
3694	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
3695
3696	mutex_lock(&bp->hwrm_cmd_lock);
3697	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3698	if (rc)
3699		goto qportcfg_exit;
3700
3701	if (!resp->max_configurable_queues) {
3702		rc = -EINVAL;
3703		goto qportcfg_exit;
3704	}
3705	bp->max_tc = resp->max_configurable_queues;
3706	if (bp->max_tc > BNXT_MAX_QUEUE)
3707		bp->max_tc = BNXT_MAX_QUEUE;
3708
3709	qptr = &resp->queue_id0;
3710	for (i = 0; i < bp->max_tc; i++) {
3711		bp->q_info[i].queue_id = *qptr++;
3712		bp->q_info[i].queue_profile = *qptr++;
3713	}
3714
3715qportcfg_exit:
3716	mutex_unlock(&bp->hwrm_cmd_lock);
3717	return rc;
3718}
3719
3720static int bnxt_hwrm_ver_get(struct bnxt *bp)
3721{
3722	int rc;
3723	struct hwrm_ver_get_input req = {0};
3724	struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
3725
3726	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
3727	req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
3728	req.hwrm_intf_min = HWRM_VERSION_MINOR;
3729	req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
3730	mutex_lock(&bp->hwrm_cmd_lock);
3731	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3732	if (rc)
3733		goto hwrm_ver_get_exit;
3734
3735	memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
3736
3737	if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
3738	    req.hwrm_intf_min != resp->hwrm_intf_min ||
3739	    req.hwrm_intf_upd != resp->hwrm_intf_upd) {
3740		netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
3741			    resp->hwrm_intf_maj, resp->hwrm_intf_min,
3742			    resp->hwrm_intf_upd, req.hwrm_intf_maj,
3743			    req.hwrm_intf_min, req.hwrm_intf_upd);
3744		netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
3745	}
3746	snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
3747		 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
3748		 resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
3749
3750hwrm_ver_get_exit:
3751	mutex_unlock(&bp->hwrm_cmd_lock);
3752	return rc;
3753}
3754
3755static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
3756{
3757	if (bp->vxlan_port_cnt) {
3758		bnxt_hwrm_tunnel_dst_port_free(
3759			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
3760	}
3761	bp->vxlan_port_cnt = 0;
3762	if (bp->nge_port_cnt) {
3763		bnxt_hwrm_tunnel_dst_port_free(
3764			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
3765	}
3766	bp->nge_port_cnt = 0;
3767}
3768
3769static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
3770{
3771	int rc, i;
3772	u32 tpa_flags = 0;
3773
3774	if (set_tpa)
3775		tpa_flags = bp->flags & BNXT_FLAG_TPA;
3776	for (i = 0; i < bp->nr_vnics; i++) {
3777		rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
3778		if (rc) {
3779			netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
3780				   rc, i);
3781			return rc;
3782		}
3783	}
3784	return 0;
3785}
3786
3787static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
3788{
3789	int i;
3790
3791	for (i = 0; i < bp->nr_vnics; i++)
3792		bnxt_hwrm_vnic_set_rss(bp, i, false);
3793}
3794
3795static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
3796				    bool irq_re_init)
3797{
3798	if (bp->vnic_info) {
3799		bnxt_hwrm_clear_vnic_filter(bp);
3800		/* clear all RSS setting before free vnic ctx */
3801		bnxt_hwrm_clear_vnic_rss(bp);
3802		bnxt_hwrm_vnic_ctx_free(bp);
3803		/* before free the vnic, undo the vnic tpa settings */
3804		if (bp->flags & BNXT_FLAG_TPA)
3805			bnxt_set_tpa(bp, false);
3806		bnxt_hwrm_vnic_free(bp);
3807	}
3808	bnxt_hwrm_ring_free(bp, close_path);
3809	bnxt_hwrm_ring_grp_free(bp);
3810	if (irq_re_init) {
3811		bnxt_hwrm_stat_ctx_free(bp);
3812		bnxt_hwrm_free_tunnel_ports(bp);
3813	}
3814}
3815
3816static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
3817{
3818	int rc;
3819
3820	/* allocate context for vnic */
3821	rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
3822	if (rc) {
3823		netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3824			   vnic_id, rc);
3825		goto vnic_setup_err;
3826	}
3827	bp->rsscos_nr_ctxs++;
3828
3829	/* configure default vnic, ring grp */
3830	rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
3831	if (rc) {
3832		netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
3833			   vnic_id, rc);
3834		goto vnic_setup_err;
3835	}
3836
3837	/* Enable RSS hashing on vnic */
3838	rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
3839	if (rc) {
3840		netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
3841			   vnic_id, rc);
3842		goto vnic_setup_err;
3843	}
3844
3845	if (bp->flags & BNXT_FLAG_AGG_RINGS) {
3846		rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
3847		if (rc) {
3848			netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
3849				   vnic_id, rc);
3850		}
3851	}
3852
3853vnic_setup_err:
3854	return rc;
3855}
3856
3857static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
3858{
3859#ifdef CONFIG_RFS_ACCEL
3860	int i, rc = 0;
3861
3862	for (i = 0; i < bp->rx_nr_rings; i++) {
3863		u16 vnic_id = i + 1;
3864		u16 ring_id = i;
3865
3866		if (vnic_id >= bp->nr_vnics)
3867			break;
3868
3869		bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
3870		rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
3871		if (rc) {
3872			netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
3873				   vnic_id, rc);
3874			break;
3875		}
3876		rc = bnxt_setup_vnic(bp, vnic_id);
3877		if (rc)
3878			break;
3879	}
3880	return rc;
3881#else
3882	return 0;
3883#endif
3884}
3885
3886static int bnxt_cfg_rx_mode(struct bnxt *);
3887
3888static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
3889{
3890	int rc = 0;
3891
3892	if (irq_re_init) {
3893		rc = bnxt_hwrm_stat_ctx_alloc(bp);
3894		if (rc) {
3895			netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
3896				   rc);
3897			goto err_out;
3898		}
3899	}
3900
3901	rc = bnxt_hwrm_ring_alloc(bp);
3902	if (rc) {
3903		netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
3904		goto err_out;
3905	}
3906
3907	rc = bnxt_hwrm_ring_grp_alloc(bp);
3908	if (rc) {
3909		netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
3910		goto err_out;
3911	}
3912
3913	/* default vnic 0 */
3914	rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
3915	if (rc) {
3916		netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
3917		goto err_out;
3918	}
3919
3920	rc = bnxt_setup_vnic(bp, 0);
3921	if (rc)
3922		goto err_out;
3923
3924	if (bp->flags & BNXT_FLAG_RFS) {
3925		rc = bnxt_alloc_rfs_vnics(bp);
3926		if (rc)
3927			goto err_out;
3928	}
3929
3930	if (bp->flags & BNXT_FLAG_TPA) {
3931		rc = bnxt_set_tpa(bp, true);
3932		if (rc)
3933			goto err_out;
3934	}
3935
3936	if (BNXT_VF(bp))
3937		bnxt_update_vf_mac(bp);
3938
3939	/* Filter for default vnic 0 */
3940	rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
3941	if (rc) {
3942		netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
3943		goto err_out;
3944	}
3945	bp->vnic_info[0].uc_filter_count = 1;
3946
3947	bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
3948				   CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
3949
3950	if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
3951		bp->vnic_info[0].rx_mask |=
3952				CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
3953
3954	rc = bnxt_cfg_rx_mode(bp);
3955	if (rc)
3956		goto err_out;
3957
3958	rc = bnxt_hwrm_set_coal(bp);
3959	if (rc)
3960		netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
3961			    rc);
3962
3963	return 0;
3964
3965err_out:
3966	bnxt_hwrm_resource_free(bp, 0, true);
3967
3968	return rc;
3969}
3970
3971static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
3972{
3973	bnxt_hwrm_resource_free(bp, 1, irq_re_init);
3974	return 0;
3975}
3976
3977static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
3978{
3979	bnxt_init_rx_rings(bp);
3980	bnxt_init_tx_rings(bp);
3981	bnxt_init_ring_grps(bp, irq_re_init);
3982	bnxt_init_vnics(bp);
3983
3984	return bnxt_init_chip(bp, irq_re_init);
3985}
3986
3987static void bnxt_disable_int(struct bnxt *bp)
3988{
3989	int i;
3990
3991	if (!bp->bnapi)
3992		return;
3993
3994	for (i = 0; i < bp->cp_nr_rings; i++) {
3995		struct bnxt_napi *bnapi = bp->bnapi[i];
3996		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3997
3998		BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
3999	}
4000}
4001
4002static void bnxt_enable_int(struct bnxt *bp)
4003{
4004	int i;
4005
4006	atomic_set(&bp->intr_sem, 0);
4007	for (i = 0; i < bp->cp_nr_rings; i++) {
4008		struct bnxt_napi *bnapi = bp->bnapi[i];
4009		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4010
4011		BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
4012	}
4013}
4014
4015static int bnxt_set_real_num_queues(struct bnxt *bp)
4016{
4017	int rc;
4018	struct net_device *dev = bp->dev;
4019
4020	rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
4021	if (rc)
4022		return rc;
4023
4024	rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
4025	if (rc)
4026		return rc;
4027
4028#ifdef CONFIG_RFS_ACCEL
4029	if (bp->rx_nr_rings)
4030		dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
4031	if (!dev->rx_cpu_rmap)
4032		rc = -ENOMEM;
4033#endif
4034
4035	return rc;
4036}
4037
4038static int bnxt_setup_msix(struct bnxt *bp)
4039{
4040	struct msix_entry *msix_ent;
4041	struct net_device *dev = bp->dev;
4042	int i, total_vecs, rc = 0;
4043	const int len = sizeof(bp->irq_tbl[0].name);
4044
4045	bp->flags &= ~BNXT_FLAG_USING_MSIX;
4046	total_vecs = bp->cp_nr_rings;
4047
4048	msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
4049	if (!msix_ent)
4050		return -ENOMEM;
4051
4052	for (i = 0; i < total_vecs; i++) {
4053		msix_ent[i].entry = i;
4054		msix_ent[i].vector = 0;
4055	}
4056
4057	total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
4058	if (total_vecs < 0) {
4059		rc = -ENODEV;
4060		goto msix_setup_exit;
4061	}
4062
4063	bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
4064	if (bp->irq_tbl) {
4065		int tcs;
4066
4067		/* Trim rings based upon num of vectors allocated */
4068		bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
4069		bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
4070		bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4071		tcs = netdev_get_num_tc(dev);
4072		if (tcs > 1) {
4073			bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
4074			if (bp->tx_nr_rings_per_tc == 0) {
4075				netdev_reset_tc(dev);
4076				bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4077			} else {
4078				int i, off, count;
4079
4080				bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
4081				for (i = 0; i < tcs; i++) {
4082					count = bp->tx_nr_rings_per_tc;
4083					off = i * count;
4084					netdev_set_tc_queue(dev, i, count, off);
4085				}
4086			}
4087		}
4088		bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
4089
4090		for (i = 0; i < bp->cp_nr_rings; i++) {
4091			bp->irq_tbl[i].vector = msix_ent[i].vector;
4092			snprintf(bp->irq_tbl[i].name, len,
4093				 "%s-%s-%d", dev->name, "TxRx", i);
4094			bp->irq_tbl[i].handler = bnxt_msix;
4095		}
4096		rc = bnxt_set_real_num_queues(bp);
4097		if (rc)
4098			goto msix_setup_exit;
4099	} else {
4100		rc = -ENOMEM;
4101		goto msix_setup_exit;
4102	}
4103	bp->flags |= BNXT_FLAG_USING_MSIX;
4104	kfree(msix_ent);
4105	return 0;
4106
4107msix_setup_exit:
4108	netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
4109	pci_disable_msix(bp->pdev);
4110	kfree(msix_ent);
4111	return rc;
4112}
4113
4114static int bnxt_setup_inta(struct bnxt *bp)
4115{
4116	int rc;
4117	const int len = sizeof(bp->irq_tbl[0].name);
4118
4119	if (netdev_get_num_tc(bp->dev))
4120		netdev_reset_tc(bp->dev);
4121
4122	bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
4123	if (!bp->irq_tbl) {
4124		rc = -ENOMEM;
4125		return rc;
4126	}
4127	bp->rx_nr_rings = 1;
4128	bp->tx_nr_rings = 1;
4129	bp->cp_nr_rings = 1;
4130	bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
4131	bp->irq_tbl[0].vector = bp->pdev->irq;
4132	snprintf(bp->irq_tbl[0].name, len,
4133		 "%s-%s-%d", bp->dev->name, "TxRx", 0);
4134	bp->irq_tbl[0].handler = bnxt_inta;
4135	rc = bnxt_set_real_num_queues(bp);
4136	return rc;
4137}
4138
4139static int bnxt_setup_int_mode(struct bnxt *bp)
4140{
4141	int rc = 0;
4142
4143	if (bp->flags & BNXT_FLAG_MSIX_CAP)
4144		rc = bnxt_setup_msix(bp);
4145
4146	if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
4147		/* fallback to INTA */
4148		rc = bnxt_setup_inta(bp);
4149	}
4150	return rc;
4151}
4152
4153static void bnxt_free_irq(struct bnxt *bp)
4154{
4155	struct bnxt_irq *irq;
4156	int i;
4157
4158#ifdef CONFIG_RFS_ACCEL
4159	free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
4160	bp->dev->rx_cpu_rmap = NULL;
4161#endif
4162	if (!bp->irq_tbl)
4163		return;
4164
4165	for (i = 0; i < bp->cp_nr_rings; i++) {
4166		irq = &bp->irq_tbl[i];
4167		if (irq->requested)
4168			free_irq(irq->vector, bp->bnapi[i]);
4169		irq->requested = 0;
4170	}
4171	if (bp->flags & BNXT_FLAG_USING_MSIX)
4172		pci_disable_msix(bp->pdev);
4173	kfree(bp->irq_tbl);
4174	bp->irq_tbl = NULL;
4175}
4176
4177static int bnxt_request_irq(struct bnxt *bp)
4178{
4179	int i, rc = 0;
4180	unsigned long flags = 0;
4181#ifdef CONFIG_RFS_ACCEL
4182	struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
4183#endif
4184
4185	if (!(bp->flags & BNXT_FLAG_USING_MSIX))
4186		flags = IRQF_SHARED;
4187
4188	for (i = 0; i < bp->cp_nr_rings; i++) {
4189		struct bnxt_irq *irq = &bp->irq_tbl[i];
4190#ifdef CONFIG_RFS_ACCEL
4191		if (rmap && (i < bp->rx_nr_rings)) {
4192			rc = irq_cpu_rmap_add(rmap, irq->vector);
4193			if (rc)
4194				netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
4195					    i);
4196		}
4197#endif
4198		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
4199				 bp->bnapi[i]);
4200		if (rc)
4201			break;
4202
4203		irq->requested = 1;
4204	}
4205	return rc;
4206}
4207
4208static void bnxt_del_napi(struct bnxt *bp)
4209{
4210	int i;
4211
4212	if (!bp->bnapi)
4213		return;
4214
4215	for (i = 0; i < bp->cp_nr_rings; i++) {
4216		struct bnxt_napi *bnapi = bp->bnapi[i];
4217
4218		napi_hash_del(&bnapi->napi);
4219		netif_napi_del(&bnapi->napi);
4220	}
4221}
4222
4223static void bnxt_init_napi(struct bnxt *bp)
4224{
4225	int i;
4226	struct bnxt_napi *bnapi;
4227
4228	if (bp->flags & BNXT_FLAG_USING_MSIX) {
4229		for (i = 0; i < bp->cp_nr_rings; i++) {
4230			bnapi = bp->bnapi[i];
4231			netif_napi_add(bp->dev, &bnapi->napi,
4232				       bnxt_poll, 64);
4233			napi_hash_add(&bnapi->napi);
4234		}
4235	} else {
4236		bnapi = bp->bnapi[0];
4237		netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
4238		napi_hash_add(&bnapi->napi);
4239	}
4240}
4241
4242static void bnxt_disable_napi(struct bnxt *bp)
4243{
4244	int i;
4245
4246	if (!bp->bnapi)
4247		return;
4248
4249	for (i = 0; i < bp->cp_nr_rings; i++) {
4250		napi_disable(&bp->bnapi[i]->napi);
4251		bnxt_disable_poll(bp->bnapi[i]);
4252	}
4253}
4254
4255static void bnxt_enable_napi(struct bnxt *bp)
4256{
4257	int i;
4258
4259	for (i = 0; i < bp->cp_nr_rings; i++) {
4260		bnxt_enable_poll(bp->bnapi[i]);
4261		napi_enable(&bp->bnapi[i]->napi);
4262	}
4263}
4264
4265static void bnxt_tx_disable(struct bnxt *bp)
4266{
4267	int i;
4268	struct bnxt_napi *bnapi;
4269	struct bnxt_tx_ring_info *txr;
4270	struct netdev_queue *txq;
4271
4272	if (bp->bnapi) {
4273		for (i = 0; i < bp->tx_nr_rings; i++) {
4274			bnapi = bp->bnapi[i];
4275			txr = &bnapi->tx_ring;
4276			txq = netdev_get_tx_queue(bp->dev, i);
4277			__netif_tx_lock(txq, smp_processor_id());
4278			txr->dev_state = BNXT_DEV_STATE_CLOSING;
4279			__netif_tx_unlock(txq);
4280		}
4281	}
4282	/* Stop all TX queues */
4283	netif_tx_disable(bp->dev);
4284	netif_carrier_off(bp->dev);
4285}
4286
4287static void bnxt_tx_enable(struct bnxt *bp)
4288{
4289	int i;
4290	struct bnxt_napi *bnapi;
4291	struct bnxt_tx_ring_info *txr;
4292	struct netdev_queue *txq;
4293
4294	for (i = 0; i < bp->tx_nr_rings; i++) {
4295		bnapi = bp->bnapi[i];
4296		txr = &bnapi->tx_ring;
4297		txq = netdev_get_tx_queue(bp->dev, i);
4298		txr->dev_state = 0;
4299	}
4300	netif_tx_wake_all_queues(bp->dev);
4301	if (bp->link_info.link_up)
4302		netif_carrier_on(bp->dev);
4303}
4304
4305static void bnxt_report_link(struct bnxt *bp)
4306{
4307	if (bp->link_info.link_up) {
4308		const char *duplex;
4309		const char *flow_ctrl;
4310		u16 speed;
4311
4312		netif_carrier_on(bp->dev);
4313		if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
4314			duplex = "full";
4315		else
4316			duplex = "half";
4317		if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
4318			flow_ctrl = "ON - receive & transmit";
4319		else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
4320			flow_ctrl = "ON - transmit";
4321		else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
4322			flow_ctrl = "ON - receive";
4323		else
4324			flow_ctrl = "none";
4325		speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
4326		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
4327			    speed, duplex, flow_ctrl);
4328	} else {
4329		netif_carrier_off(bp->dev);
4330		netdev_err(bp->dev, "NIC Link is Down\n");
4331	}
4332}
4333
4334static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
4335{
4336	int rc = 0;
4337	struct bnxt_link_info *link_info = &bp->link_info;
4338	struct hwrm_port_phy_qcfg_input req = {0};
4339	struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
4340	u8 link_up = link_info->link_up;
4341
4342	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
4343
4344	mutex_lock(&bp->hwrm_cmd_lock);
4345	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4346	if (rc) {
4347		mutex_unlock(&bp->hwrm_cmd_lock);
4348		return rc;
4349	}
4350
4351	memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
4352	link_info->phy_link_status = resp->link;
4353	link_info->duplex =  resp->duplex;
4354	link_info->pause = resp->pause;
4355	link_info->auto_mode = resp->auto_mode;
4356	link_info->auto_pause_setting = resp->auto_pause;
4357	link_info->force_pause_setting = resp->force_pause;
4358	link_info->duplex_setting = resp->duplex_setting;
4359	if (link_info->phy_link_status == BNXT_LINK_LINK)
4360		link_info->link_speed = le16_to_cpu(resp->link_speed);
4361	else
4362		link_info->link_speed = 0;
4363	link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
4364	link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
4365	link_info->support_speeds = le16_to_cpu(resp->support_speeds);
4366	link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
4367	link_info->preemphasis = le32_to_cpu(resp->preemphasis);
4368	link_info->phy_ver[0] = resp->phy_maj;
4369	link_info->phy_ver[1] = resp->phy_min;
4370	link_info->phy_ver[2] = resp->phy_bld;
4371	link_info->media_type = resp->media_type;
4372	link_info->transceiver = resp->transceiver_type;
4373	link_info->phy_addr = resp->phy_addr;
4374
4375	/* TODO: need to add more logic to report VF link */
4376	if (chng_link_state) {
4377		if (link_info->phy_link_status == BNXT_LINK_LINK)
4378			link_info->link_up = 1;
4379		else
4380			link_info->link_up = 0;
4381		if (link_up != link_info->link_up)
4382			bnxt_report_link(bp);
4383	} else {
4384		/* alwasy link down if not require to update link state */
4385		link_info->link_up = 0;
4386	}
4387	mutex_unlock(&bp->hwrm_cmd_lock);
4388	return 0;
4389}
4390
4391static void
4392bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
4393{
4394	if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
4395		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4396			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4397		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4398			req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
4399		req->enables |=
4400			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
4401	} else {
4402		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
4403			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
4404		if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
4405			req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
4406		req->enables |=
4407			cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
4408	}
4409}
4410
4411static void bnxt_hwrm_set_link_common(struct bnxt *bp,
4412				      struct hwrm_port_phy_cfg_input *req)
4413{
4414	u8 autoneg = bp->link_info.autoneg;
4415	u16 fw_link_speed = bp->link_info.req_link_speed;
4416	u32 advertising = bp->link_info.advertising;
4417
4418	if (autoneg & BNXT_AUTONEG_SPEED) {
4419		req->auto_mode |=
4420			PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
4421
4422		req->enables |= cpu_to_le32(
4423			PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
4424		req->auto_link_speed_mask = cpu_to_le16(advertising);
4425
4426		req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
4427		req->flags |=
4428			cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
4429	} else {
4430		req->force_link_speed = cpu_to_le16(fw_link_speed);
4431		req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
4432	}
4433
4434	/* currently don't support half duplex */
4435	req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
4436	req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
4437	/* tell chimp that the setting takes effect immediately */
4438	req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
4439}
4440
4441int bnxt_hwrm_set_pause(struct bnxt *bp)
4442{
4443	struct hwrm_port_phy_cfg_input req = {0};
4444	int rc;
4445
4446	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4447	bnxt_hwrm_set_pause_common(bp, &req);
4448
4449	if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
4450	    bp->link_info.force_link_chng)
4451		bnxt_hwrm_set_link_common(bp, &req);
4452
4453	mutex_lock(&bp->hwrm_cmd_lock);
4454	rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4455	if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
4456		/* since changing of pause setting doesn't trigger any link
4457		 * change event, the driver needs to update the current pause
4458		 * result upon successfully return of the phy_cfg command
4459		 */
4460		bp->link_info.pause =
4461		bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
4462		bp->link_info.auto_pause_setting = 0;
4463		if (!bp->link_info.force_link_chng)
4464			bnxt_report_link(bp);
4465	}
4466	bp->link_info.force_link_chng = false;
4467	mutex_unlock(&bp->hwrm_cmd_lock);
4468	return rc;
4469}
4470
4471int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
4472{
4473	struct hwrm_port_phy_cfg_input req = {0};
4474
4475	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
4476	if (set_pause)
4477		bnxt_hwrm_set_pause_common(bp, &req);
4478
4479	bnxt_hwrm_set_link_common(bp, &req);
4480	return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4481}
4482
4483static int bnxt_update_phy_setting(struct bnxt *bp)
4484{
4485	int rc;
4486	bool update_link = false;
4487	bool update_pause = false;
4488	struct bnxt_link_info *link_info = &bp->link_info;
4489
4490	rc = bnxt_update_link(bp, true);
4491	if (rc) {
4492		netdev_err(bp->dev, "failed to update link (rc: %x)\n",
4493			   rc);
4494		return rc;
4495	}
4496	if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4497	    link_info->auto_pause_setting != link_info->req_flow_ctrl)
4498		update_pause = true;
4499	if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4500	    link_info->force_pause_setting != link_info->req_flow_ctrl)
4501		update_pause = true;
4502	if (link_info->req_duplex != link_info->duplex_setting)
4503		update_link = true;
4504	if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4505		if (BNXT_AUTO_MODE(link_info->auto_mode))
4506			update_link = true;
4507		if (link_info->req_link_speed != link_info->force_link_speed)
4508			update_link = true;
4509	} else {
4510		if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4511			update_link = true;
4512		if (link_info->advertising != link_info->auto_link_speeds)
4513			update_link = true;
4514		if (link_info->req_link_speed != link_info->auto_link_speed)
4515			update_link = true;
4516	}
4517
4518	if (update_link)
4519		rc = bnxt_hwrm_set_link_setting(bp, update_pause);
4520	else if (update_pause)
4521		rc = bnxt_hwrm_set_pause(bp);
4522	if (rc) {
4523		netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
4524			   rc);
4525		return rc;
4526	}
4527
4528	return rc;
4529}
4530
4531/* Common routine to pre-map certain register block to different GRC window.
4532 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
4533 * in PF and 3 windows in VF that can be customized to map in different
4534 * register blocks.
4535 */
4536static void bnxt_preset_reg_win(struct bnxt *bp)
4537{
4538	if (BNXT_PF(bp)) {
4539		/* CAG registers map to GRC window #4 */
4540		writel(BNXT_CAG_REG_BASE,
4541		       bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
4542	}
4543}
4544
4545static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4546{
4547	int rc = 0;
4548
4549	bnxt_preset_reg_win(bp);
4550	netif_carrier_off(bp->dev);
4551	if (irq_re_init) {
4552		rc = bnxt_setup_int_mode(bp);
4553		if (rc) {
4554			netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
4555				   rc);
4556			return rc;
4557		}
4558	}
4559	if ((bp->flags & BNXT_FLAG_RFS) &&
4560	    !(bp->flags & BNXT_FLAG_USING_MSIX)) {
4561		/* disable RFS if falling back to INTA */
4562		bp->dev->hw_features &= ~NETIF_F_NTUPLE;
4563		bp->flags &= ~BNXT_FLAG_RFS;
4564	}
4565
4566	rc = bnxt_alloc_mem(bp, irq_re_init);
4567	if (rc) {
4568		netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
4569		goto open_err_free_mem;
4570	}
4571
4572	if (irq_re_init) {
4573		bnxt_init_napi(bp);
4574		rc = bnxt_request_irq(bp);
4575		if (rc) {
4576			netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
4577			goto open_err;
4578		}
4579	}
4580
4581	bnxt_enable_napi(bp);
4582
4583	rc = bnxt_init_nic(bp, irq_re_init);
4584	if (rc) {
4585		netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
4586		goto open_err;
4587	}
4588
4589	if (link_re_init) {
4590		rc = bnxt_update_phy_setting(bp);
4591		if (rc)
4592			goto open_err;
4593	}
4594
4595	if (irq_re_init) {
4596#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
4597		vxlan_get_rx_port(bp->dev);
4598#endif
4599		if (!bnxt_hwrm_tunnel_dst_port_alloc(
4600				bp, htons(0x17c1),
4601				TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
4602			bp->nge_port_cnt = 1;
4603	}
4604
4605	set_bit(BNXT_STATE_OPEN, &bp->state);
4606	bnxt_enable_int(bp);
4607	/* Enable TX queues */
4608	bnxt_tx_enable(bp);
4609	mod_timer(&bp->timer, jiffies + bp->current_interval);
4610
4611	return 0;
4612
4613open_err:
4614	bnxt_disable_napi(bp);
4615	bnxt_del_napi(bp);
4616
4617open_err_free_mem:
4618	bnxt_free_skbs(bp);
4619	bnxt_free_irq(bp);
4620	bnxt_free_mem(bp, true);
4621	return rc;
4622}
4623
4624/* rtnl_lock held */
4625int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4626{
4627	int rc = 0;
4628
4629	rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
4630	if (rc) {
4631		netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
4632		dev_close(bp->dev);
4633	}
4634	return rc;
4635}
4636
4637static int bnxt_open(struct net_device *dev)
4638{
4639	struct bnxt *bp = netdev_priv(dev);
4640	int rc = 0;
4641
4642	rc = bnxt_hwrm_func_reset(bp);
4643	if (rc) {
4644		netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
4645			   rc);
4646		rc = -1;
4647		return rc;
4648	}
4649	return __bnxt_open_nic(bp, true, true);
4650}
4651
4652static void bnxt_disable_int_sync(struct bnxt *bp)
4653{
4654	int i;
4655
4656	atomic_inc(&bp->intr_sem);
4657	if (!netif_running(bp->dev))
4658		return;
4659
4660	bnxt_disable_int(bp);
4661	for (i = 0; i < bp->cp_nr_rings; i++)
4662		synchronize_irq(bp->irq_tbl[i].vector);
4663}
4664
4665int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4666{
4667	int rc = 0;
4668
4669#ifdef CONFIG_BNXT_SRIOV
4670	if (bp->sriov_cfg) {
4671		rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
4672						      !bp->sriov_cfg,
4673						      BNXT_SRIOV_CFG_WAIT_TMO);
4674		if (rc)
4675			netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
4676	}
4677#endif
4678	/* Change device state to avoid TX queue wake up's */
4679	bnxt_tx_disable(bp);
4680
4681	clear_bit(BNXT_STATE_OPEN, &bp->state);
4682	smp_mb__after_atomic();
4683	while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
4684		msleep(20);
4685
4686	/* Flush rings before disabling interrupts */
4687	bnxt_shutdown_nic(bp, irq_re_init);
4688
4689	/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
4690
4691	bnxt_disable_napi(bp);
4692	bnxt_disable_int_sync(bp);
4693	del_timer_sync(&bp->timer);
4694	bnxt_free_skbs(bp);
4695
4696	if (irq_re_init) {
4697		bnxt_free_irq(bp);
4698		bnxt_del_napi(bp);
4699	}
4700	bnxt_free_mem(bp, irq_re_init);
4701	return rc;
4702}
4703
4704static int bnxt_close(struct net_device *dev)
4705{
4706	struct bnxt *bp = netdev_priv(dev);
4707
4708	bnxt_close_nic(bp, true, true);
4709	return 0;
4710}
4711
4712/* rtnl_lock held */
4713static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4714{
4715	switch (cmd) {
4716	case SIOCGMIIPHY:
4717		/* fallthru */
4718	case SIOCGMIIREG: {
4719		if (!netif_running(dev))
4720			return -EAGAIN;
4721
4722		return 0;
4723	}
4724
4725	case SIOCSMIIREG:
4726		if (!netif_running(dev))
4727			return -EAGAIN;
4728
4729		return 0;
4730
4731	default:
4732		/* do nothing */
4733		break;
4734	}
4735	return -EOPNOTSUPP;
4736}
4737
4738static struct rtnl_link_stats64 *
4739bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4740{
4741	u32 i;
4742	struct bnxt *bp = netdev_priv(dev);
4743
4744	memset(stats, 0, sizeof(struct rtnl_link_stats64));
4745
4746	if (!bp->bnapi)
4747		return stats;
4748
4749	/* TODO check if we need to synchronize with bnxt_close path */
4750	for (i = 0; i < bp->cp_nr_rings; i++) {
4751		struct bnxt_napi *bnapi = bp->bnapi[i];
4752		struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4753		struct ctx_hw_stats *hw_stats = cpr->hw_stats;
4754
4755		stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
4756		stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
4757		stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
4758
4759		stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
4760		stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
4761		stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
4762
4763		stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
4764		stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
4765		stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
4766
4767		stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
4768		stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
4769		stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
4770
4771		stats->rx_missed_errors +=
4772			le64_to_cpu(hw_stats->rx_discard_pkts);
4773
4774		stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
4775
4776		stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
4777
4778		stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
4779	}
4780
4781	return stats;
4782}
4783
4784static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
4785{
4786	struct net_device *dev = bp->dev;
4787	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4788	struct netdev_hw_addr *ha;
4789	u8 *haddr;
4790	int mc_count = 0;
4791	bool update = false;
4792	int off = 0;
4793
4794	netdev_for_each_mc_addr(ha, dev) {
4795		if (mc_count >= BNXT_MAX_MC_ADDRS) {
4796			*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4797			vnic->mc_list_count = 0;
4798			return false;
4799		}
4800		haddr = ha->addr;
4801		if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
4802			memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
4803			update = true;
4804		}
4805		off += ETH_ALEN;
4806		mc_count++;
4807	}
4808	if (mc_count)
4809		*rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
4810
4811	if (mc_count != vnic->mc_list_count) {
4812		vnic->mc_list_count = mc_count;
4813		update = true;
4814	}
4815	return update;
4816}
4817
4818static bool bnxt_uc_list_updated(struct bnxt *bp)
4819{
4820	struct net_device *dev = bp->dev;
4821	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4822	struct netdev_hw_addr *ha;
4823	int off = 0;
4824
4825	if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
4826		return true;
4827
4828	netdev_for_each_uc_addr(ha, dev) {
4829		if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
4830			return true;
4831
4832		off += ETH_ALEN;
4833	}
4834	return false;
4835}
4836
4837static void bnxt_set_rx_mode(struct net_device *dev)
4838{
4839	struct bnxt *bp = netdev_priv(dev);
4840	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4841	u32 mask = vnic->rx_mask;
4842	bool mc_update = false;
4843	bool uc_update;
4844
4845	if (!netif_running(dev))
4846		return;
4847
4848	mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
4849		  CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
4850		  CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
4851
4852	/* Only allow PF to be in promiscuous mode */
4853	if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
4854		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4855
4856	uc_update = bnxt_uc_list_updated(bp);
4857
4858	if (dev->flags & IFF_ALLMULTI) {
4859		mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
4860		vnic->mc_list_count = 0;
4861	} else {
4862		mc_update = bnxt_mc_list_updated(bp, &mask);
4863	}
4864
4865	if (mask != vnic->rx_mask || uc_update || mc_update) {
4866		vnic->rx_mask = mask;
4867
4868		set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
4869		schedule_work(&bp->sp_task);
4870	}
4871}
4872
4873static int bnxt_cfg_rx_mode(struct bnxt *bp)
4874{
4875	struct net_device *dev = bp->dev;
4876	struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
4877	struct netdev_hw_addr *ha;
4878	int i, off = 0, rc;
4879	bool uc_update;
4880
4881	netif_addr_lock_bh(dev);
4882	uc_update = bnxt_uc_list_updated(bp);
4883	netif_addr_unlock_bh(dev);
4884
4885	if (!uc_update)
4886		goto skip_uc;
4887
4888	mutex_lock(&bp->hwrm_cmd_lock);
4889	for (i = 1; i < vnic->uc_filter_count; i++) {
4890		struct hwrm_cfa_l2_filter_free_input req = {0};
4891
4892		bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
4893				       -1);
4894
4895		req.l2_filter_id = vnic->fw_l2_filter_id[i];
4896
4897		rc = _hwrm_send_message(bp, &req, sizeof(req),
4898					HWRM_CMD_TIMEOUT);
4899	}
4900	mutex_unlock(&bp->hwrm_cmd_lock);
4901
4902	vnic->uc_filter_count = 1;
4903
4904	netif_addr_lock_bh(dev);
4905	if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
4906		vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
4907	} else {
4908		netdev_for_each_uc_addr(ha, dev) {
4909			memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
4910			off += ETH_ALEN;
4911			vnic->uc_filter_count++;
4912		}
4913	}
4914	netif_addr_unlock_bh(dev);
4915
4916	for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
4917		rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
4918		if (rc) {
4919			netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
4920				   rc);
4921			vnic->uc_filter_count = i;
4922			return rc;
4923		}
4924	}
4925
4926skip_uc:
4927	rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
4928	if (rc)
4929		netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
4930			   rc);
4931
4932	return rc;
4933}
4934
4935static netdev_features_t bnxt_fix_features(struct net_device *dev,
4936					   netdev_features_t features)
4937{
4938	return features;
4939}
4940
4941static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
4942{
4943	struct bnxt *bp = netdev_priv(dev);
4944	u32 flags = bp->flags;
4945	u32 changes;
4946	int rc = 0;
4947	bool re_init = false;
4948	bool update_tpa = false;
4949
4950	flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
4951	if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
4952		flags |= BNXT_FLAG_GRO;
4953	if (features & NETIF_F_LRO)
4954		flags |= BNXT_FLAG_LRO;
4955
4956	if (features & NETIF_F_HW_VLAN_CTAG_RX)
4957		flags |= BNXT_FLAG_STRIP_VLAN;
4958
4959	if (features & NETIF_F_NTUPLE)
4960		flags |= BNXT_FLAG_RFS;
4961
4962	changes = flags ^ bp->flags;
4963	if (changes & BNXT_FLAG_TPA) {
4964		update_tpa = true;
4965		if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
4966		    (flags & BNXT_FLAG_TPA) == 0)
4967			re_init = true;
4968	}
4969
4970	if (changes & ~BNXT_FLAG_TPA)
4971		re_init = true;
4972
4973	if (flags != bp->flags) {
4974		u32 old_flags = bp->flags;
4975
4976		bp->flags = flags;
4977
4978		if (!netif_running(dev)) {
4979			if (update_tpa)
4980				bnxt_set_ring_params(bp);
4981			return rc;
4982		}
4983
4984		if (re_init) {
4985			bnxt_close_nic(bp, false, false);
4986			if (update_tpa)
4987				bnxt_set_ring_params(bp);
4988
4989			return bnxt_open_nic(bp, false, false);
4990		}
4991		if (update_tpa) {
4992			rc = bnxt_set_tpa(bp,
4993					  (flags & BNXT_FLAG_TPA) ?
4994					  true : false);
4995			if (rc)
4996				bp->flags = old_flags;
4997		}
4998	}
4999	return rc;
5000}
5001
5002static void bnxt_dbg_dump_states(struct bnxt *bp)
5003{
5004	int i;
5005	struct bnxt_napi *bnapi;
5006	struct bnxt_tx_ring_info *txr;
5007	struct bnxt_rx_ring_info *rxr;
5008	struct bnxt_cp_ring_info *cpr;
5009
5010	for (i = 0; i < bp->cp_nr_rings; i++) {
5011		bnapi = bp->bnapi[i];
5012		txr = &bnapi->tx_ring;
5013		rxr = &bnapi->rx_ring;
5014		cpr = &bnapi->cp_ring;
5015		if (netif_msg_drv(bp)) {
5016			netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
5017				    i, txr->tx_ring_struct.fw_ring_id,
5018				    txr->tx_prod, txr->tx_cons);
5019			netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
5020				    i, rxr->rx_ring_struct.fw_ring_id,
5021				    rxr->rx_prod,
5022				    rxr->rx_agg_ring_struct.fw_ring_id,
5023				    rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
5024			netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
5025				    i, cpr->cp_ring_struct.fw_ring_id,
5026				    cpr->cp_raw_cons);
5027		}
5028	}
5029}
5030
5031static void bnxt_reset_task(struct bnxt *bp)
5032{
5033	bnxt_dbg_dump_states(bp);
5034	if (netif_running(bp->dev)) {
5035		bnxt_close_nic(bp, false, false);
5036		bnxt_open_nic(bp, false, false);
5037	}
5038}
5039
5040static void bnxt_tx_timeout(struct net_device *dev)
5041{
5042	struct bnxt *bp = netdev_priv(dev);
5043
5044	netdev_err(bp->dev,  "TX timeout detected, starting reset task!\n");
5045	set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
5046	schedule_work(&bp->sp_task);
5047}
5048
5049#ifdef CONFIG_NET_POLL_CONTROLLER
5050static void bnxt_poll_controller(struct net_device *dev)
5051{
5052	struct bnxt *bp = netdev_priv(dev);
5053	int i;
5054
5055	for (i = 0; i < bp->cp_nr_rings; i++) {
5056		struct bnxt_irq *irq = &bp->irq_tbl[i];
5057
5058		disable_irq(irq->vector);
5059		irq->handler(irq->vector, bp->bnapi[i]);
5060		enable_irq(irq->vector);
5061	}
5062}
5063#endif
5064
5065static void bnxt_timer(unsigned long data)
5066{
5067	struct bnxt *bp = (struct bnxt *)data;
5068	struct net_device *dev = bp->dev;
5069
5070	if (!netif_running(dev))
5071		return;
5072
5073	if (atomic_read(&bp->intr_sem) != 0)
5074		goto bnxt_restart_timer;
5075
5076bnxt_restart_timer:
5077	mod_timer(&bp->timer, jiffies + bp->current_interval);
5078}
5079
5080static void bnxt_cfg_ntp_filters(struct bnxt *);
5081
5082static void bnxt_sp_task(struct work_struct *work)
5083{
5084	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
5085	int rc;
5086
5087	set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5088	smp_mb__after_atomic();
5089	if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
5090		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5091		return;
5092	}
5093
5094	if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
5095		bnxt_cfg_rx_mode(bp);
5096
5097	if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
5098		bnxt_cfg_ntp_filters(bp);
5099	if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
5100		rc = bnxt_update_link(bp, true);
5101		if (rc)
5102			netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
5103				   rc);
5104	}
5105	if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
5106		bnxt_hwrm_exec_fwd_req(bp);
5107	if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
5108		bnxt_hwrm_tunnel_dst_port_alloc(
5109			bp, bp->vxlan_port,
5110			TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5111	}
5112	if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
5113		bnxt_hwrm_tunnel_dst_port_free(
5114			bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
5115	}
5116	if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
5117		/* bnxt_reset_task() calls bnxt_close_nic() which waits
5118		 * for BNXT_STATE_IN_SP_TASK to clear.
5119		 */
5120		clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5121		rtnl_lock();
5122		bnxt_reset_task(bp);
5123		set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5124		rtnl_unlock();
5125	}
5126
5127	smp_mb__before_atomic();
5128	clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
5129}
5130
5131static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
5132{
5133	int rc;
5134	struct bnxt *bp = netdev_priv(dev);
5135
5136	SET_NETDEV_DEV(dev, &pdev->dev);
5137
5138	/* enable device (incl. PCI PM wakeup), and bus-mastering */
5139	rc = pci_enable_device(pdev);
5140	if (rc) {
5141		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5142		goto init_err;
5143	}
5144
5145	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5146		dev_err(&pdev->dev,
5147			"Cannot find PCI device base address, aborting\n");
5148		rc = -ENODEV;
5149		goto init_err_disable;
5150	}
5151
5152	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5153	if (rc) {
5154		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5155		goto init_err_disable;
5156	}
5157
5158	if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
5159	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
5160		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
5161		goto init_err_disable;
5162	}
5163
5164	pci_set_master(pdev);
5165
5166	bp->dev = dev;
5167	bp->pdev = pdev;
5168
5169	bp->bar0 = pci_ioremap_bar(pdev, 0);
5170	if (!bp->bar0) {
5171		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5172		rc = -ENOMEM;
5173		goto init_err_release;
5174	}
5175
5176	bp->bar1 = pci_ioremap_bar(pdev, 2);
5177	if (!bp->bar1) {
5178		dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
5179		rc = -ENOMEM;
5180		goto init_err_release;
5181	}
5182
5183	bp->bar2 = pci_ioremap_bar(pdev, 4);
5184	if (!bp->bar2) {
5185		dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
5186		rc = -ENOMEM;
5187		goto init_err_release;
5188	}
5189
5190	INIT_WORK(&bp->sp_task, bnxt_sp_task);
5191
5192	spin_lock_init(&bp->ntp_fltr_lock);
5193
5194	bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
5195	bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
5196
5197	bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
5198	bp->coal_bufs = 20;
5199	bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
5200	bp->coal_bufs_irq = 2;
5201
5202	init_timer(&bp->timer);
5203	bp->timer.data = (unsigned long)bp;
5204	bp->timer.function = bnxt_timer;
5205	bp->current_interval = BNXT_TIMER_INTERVAL;
5206
5207	clear_bit(BNXT_STATE_OPEN, &bp->state);
5208
5209	return 0;
5210
5211init_err_release:
5212	if (bp->bar2) {
5213		pci_iounmap(pdev, bp->bar2);
5214		bp->bar2 = NULL;
5215	}
5216
5217	if (bp->bar1) {
5218		pci_iounmap(pdev, bp->bar1);
5219		bp->bar1 = NULL;
5220	}
5221
5222	if (bp->bar0) {
5223		pci_iounmap(pdev, bp->bar0);
5224		bp->bar0 = NULL;
5225	}
5226
5227	pci_release_regions(pdev);
5228
5229init_err_disable:
5230	pci_disable_device(pdev);
5231
5232init_err:
5233	return rc;
5234}
5235
5236/* rtnl_lock held */
5237static int bnxt_change_mac_addr(struct net_device *dev, void *p)
5238{
5239	struct sockaddr *addr = p;
5240	struct bnxt *bp = netdev_priv(dev);
5241	int rc = 0;
5242
5243	if (!is_valid_ether_addr(addr->sa_data))
5244		return -EADDRNOTAVAIL;
5245
5246#ifdef CONFIG_BNXT_SRIOV
5247	if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
5248		return -EADDRNOTAVAIL;
5249#endif
5250
5251	if (ether_addr_equal(addr->sa_data, dev->dev_addr))
5252		return 0;
5253
5254	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5255	if (netif_running(dev)) {
5256		bnxt_close_nic(bp, false, false);
5257		rc = bnxt_open_nic(bp, false, false);
5258	}
5259
5260	return rc;
5261}
5262
5263/* rtnl_lock held */
5264static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
5265{
5266	struct bnxt *bp = netdev_priv(dev);
5267
5268	if (new_mtu < 60 || new_mtu > 9000)
5269		return -EINVAL;
5270
5271	if (netif_running(dev))
5272		bnxt_close_nic(bp, false, false);
5273
5274	dev->mtu = new_mtu;
5275	bnxt_set_ring_params(bp);
5276
5277	if (netif_running(dev))
5278		return bnxt_open_nic(bp, false, false);
5279
5280	return 0;
5281}
5282
5283static int bnxt_setup_tc(struct net_device *dev, u8 tc)
5284{
5285	struct bnxt *bp = netdev_priv(dev);
5286
5287	if (tc > bp->max_tc) {
5288		netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
5289			   tc, bp->max_tc);
5290		return -EINVAL;
5291	}
5292
5293	if (netdev_get_num_tc(dev) == tc)
5294		return 0;
5295
5296	if (tc) {
5297		int max_rx_rings, max_tx_rings;
5298
5299		bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5300		if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
5301			return -ENOMEM;
5302	}
5303
5304	/* Needs to close the device and do hw resource re-allocations */
5305	if (netif_running(bp->dev))
5306		bnxt_close_nic(bp, true, false);
5307
5308	if (tc) {
5309		bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
5310		netdev_set_num_tc(dev, tc);
5311	} else {
5312		bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5313		netdev_reset_tc(dev);
5314	}
5315	bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
5316	bp->num_stat_ctxs = bp->cp_nr_rings;
5317
5318	if (netif_running(bp->dev))
5319		return bnxt_open_nic(bp, true, false);
5320
5321	return 0;
5322}
5323
5324#ifdef CONFIG_RFS_ACCEL
5325static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
5326			    struct bnxt_ntuple_filter *f2)
5327{
5328	struct flow_keys *keys1 = &f1->fkeys;
5329	struct flow_keys *keys2 = &f2->fkeys;
5330
5331	if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
5332	    keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
5333	    keys1->ports.ports == keys2->ports.ports &&
5334	    keys1->basic.ip_proto == keys2->basic.ip_proto &&
5335	    keys1->basic.n_proto == keys2->basic.n_proto &&
5336	    ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
5337		return true;
5338
5339	return false;
5340}
5341
5342static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
5343			      u16 rxq_index, u32 flow_id)
5344{
5345	struct bnxt *bp = netdev_priv(dev);
5346	struct bnxt_ntuple_filter *fltr, *new_fltr;
5347	struct flow_keys *fkeys;
5348	struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
5349	int rc = 0, idx, bit_id;
5350	struct hlist_head *head;
5351
5352	if (skb->encapsulation)
5353		return -EPROTONOSUPPORT;
5354
5355	new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
5356	if (!new_fltr)
5357		return -ENOMEM;
5358
5359	fkeys = &new_fltr->fkeys;
5360	if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
5361		rc = -EPROTONOSUPPORT;
5362		goto err_free;
5363	}
5364
5365	if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
5366	    ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
5367	     (fkeys->basic.ip_proto != IPPROTO_UDP))) {
5368		rc = -EPROTONOSUPPORT;
5369		goto err_free;
5370	}
5371
5372	memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
5373
5374	idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
5375	head = &bp->ntp_fltr_hash_tbl[idx];
5376	rcu_read_lock();
5377	hlist_for_each_entry_rcu(fltr, head, hash) {
5378		if (bnxt_fltr_match(fltr, new_fltr)) {
5379			rcu_read_unlock();
5380			rc = 0;
5381			goto err_free;
5382		}
5383	}
5384	rcu_read_unlock();
5385
5386	spin_lock_bh(&bp->ntp_fltr_lock);
5387	bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
5388					 BNXT_NTP_FLTR_MAX_FLTR, 0);
5389	if (bit_id < 0) {
5390		spin_unlock_bh(&bp->ntp_fltr_lock);
5391		rc = -ENOMEM;
5392		goto err_free;
5393	}
5394
5395	new_fltr->sw_id = (u16)bit_id;
5396	new_fltr->flow_id = flow_id;
5397	new_fltr->rxq = rxq_index;
5398	hlist_add_head_rcu(&new_fltr->hash, head);
5399	bp->ntp_fltr_count++;
5400	spin_unlock_bh(&bp->ntp_fltr_lock);
5401
5402	set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
5403	schedule_work(&bp->sp_task);
5404
5405	return new_fltr->sw_id;
5406
5407err_free:
5408	kfree(new_fltr);
5409	return rc;
5410}
5411
5412static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5413{
5414	int i;
5415
5416	for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
5417		struct hlist_head *head;
5418		struct hlist_node *tmp;
5419		struct bnxt_ntuple_filter *fltr;
5420		int rc;
5421
5422		head = &bp->ntp_fltr_hash_tbl[i];
5423		hlist_for_each_entry_safe(fltr, tmp, head, hash) {
5424			bool del = false;
5425
5426			if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
5427				if (rps_may_expire_flow(bp->dev, fltr->rxq,
5428							fltr->flow_id,
5429							fltr->sw_id)) {
5430					bnxt_hwrm_cfa_ntuple_filter_free(bp,
5431									 fltr);
5432					del = true;
5433				}
5434			} else {
5435				rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
5436								       fltr);
5437				if (rc)
5438					del = true;
5439				else
5440					set_bit(BNXT_FLTR_VALID, &fltr->state);
5441			}
5442
5443			if (del) {
5444				spin_lock_bh(&bp->ntp_fltr_lock);
5445				hlist_del_rcu(&fltr->hash);
5446				bp->ntp_fltr_count--;
5447				spin_unlock_bh(&bp->ntp_fltr_lock);
5448				synchronize_rcu();
5449				clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
5450				kfree(fltr);
5451			}
5452		}
5453	}
5454}
5455
5456#else
5457
5458static void bnxt_cfg_ntp_filters(struct bnxt *bp)
5459{
5460}
5461
5462#endif /* CONFIG_RFS_ACCEL */
5463
5464static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5465				__be16 port)
5466{
5467	struct bnxt *bp = netdev_priv(dev);
5468
5469	if (!netif_running(dev))
5470		return;
5471
5472	if (sa_family != AF_INET6 && sa_family != AF_INET)
5473		return;
5474
5475	if (bp->vxlan_port_cnt && bp->vxlan_port != port)
5476		return;
5477
5478	bp->vxlan_port_cnt++;
5479	if (bp->vxlan_port_cnt == 1) {
5480		bp->vxlan_port = port;
5481		set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
5482		schedule_work(&bp->sp_task);
5483	}
5484}
5485
5486static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
5487				__be16 port)
5488{
5489	struct bnxt *bp = netdev_priv(dev);
5490
5491	if (!netif_running(dev))
5492		return;
5493
5494	if (sa_family != AF_INET6 && sa_family != AF_INET)
5495		return;
5496
5497	if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
5498		bp->vxlan_port_cnt--;
5499
5500		if (bp->vxlan_port_cnt == 0) {
5501			set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
5502			schedule_work(&bp->sp_task);
5503		}
5504	}
5505}
5506
5507static const struct net_device_ops bnxt_netdev_ops = {
5508	.ndo_open		= bnxt_open,
5509	.ndo_start_xmit		= bnxt_start_xmit,
5510	.ndo_stop		= bnxt_close,
5511	.ndo_get_stats64	= bnxt_get_stats64,
5512	.ndo_set_rx_mode	= bnxt_set_rx_mode,
5513	.ndo_do_ioctl		= bnxt_ioctl,
5514	.ndo_validate_addr	= eth_validate_addr,
5515	.ndo_set_mac_address	= bnxt_change_mac_addr,
5516	.ndo_change_mtu		= bnxt_change_mtu,
5517	.ndo_fix_features	= bnxt_fix_features,
5518	.ndo_set_features	= bnxt_set_features,
5519	.ndo_tx_timeout		= bnxt_tx_timeout,
5520#ifdef CONFIG_BNXT_SRIOV
5521	.ndo_get_vf_config	= bnxt_get_vf_config,
5522	.ndo_set_vf_mac		= bnxt_set_vf_mac,
5523	.ndo_set_vf_vlan	= bnxt_set_vf_vlan,
5524	.ndo_set_vf_rate	= bnxt_set_vf_bw,
5525	.ndo_set_vf_link_state	= bnxt_set_vf_link_state,
5526	.ndo_set_vf_spoofchk	= bnxt_set_vf_spoofchk,
5527#endif
5528#ifdef CONFIG_NET_POLL_CONTROLLER
5529	.ndo_poll_controller	= bnxt_poll_controller,
5530#endif
5531	.ndo_setup_tc           = bnxt_setup_tc,
5532#ifdef CONFIG_RFS_ACCEL
5533	.ndo_rx_flow_steer	= bnxt_rx_flow_steer,
5534#endif
5535	.ndo_add_vxlan_port	= bnxt_add_vxlan_port,
5536	.ndo_del_vxlan_port	= bnxt_del_vxlan_port,
5537#ifdef CONFIG_NET_RX_BUSY_POLL
5538	.ndo_busy_poll		= bnxt_busy_poll,
5539#endif
5540};
5541
5542static void bnxt_remove_one(struct pci_dev *pdev)
5543{
5544	struct net_device *dev = pci_get_drvdata(pdev);
5545	struct bnxt *bp = netdev_priv(dev);
5546
5547	if (BNXT_PF(bp))
5548		bnxt_sriov_disable(bp);
5549
5550	unregister_netdev(dev);
5551	cancel_work_sync(&bp->sp_task);
5552	bp->sp_event = 0;
5553
5554	bnxt_free_hwrm_resources(bp);
5555	pci_iounmap(pdev, bp->bar2);
5556	pci_iounmap(pdev, bp->bar1);
5557	pci_iounmap(pdev, bp->bar0);
5558	free_netdev(dev);
5559
5560	pci_release_regions(pdev);
5561	pci_disable_device(pdev);
5562}
5563
5564static int bnxt_probe_phy(struct bnxt *bp)
5565{
5566	int rc = 0;
5567	struct bnxt_link_info *link_info = &bp->link_info;
5568	char phy_ver[PHY_VER_STR_LEN];
5569
5570	rc = bnxt_update_link(bp, false);
5571	if (rc) {
5572		netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
5573			   rc);
5574		return rc;
5575	}
5576
5577	/*initialize the ethool setting copy with NVM settings */
5578	if (BNXT_AUTO_MODE(link_info->auto_mode))
5579		link_info->autoneg |= BNXT_AUTONEG_SPEED;
5580
5581	if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5582		if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5583			link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5584		link_info->req_flow_ctrl = link_info->auto_pause_setting;
5585	} else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
5586		link_info->req_flow_ctrl = link_info->force_pause_setting;
5587	}
5588	link_info->req_duplex = link_info->duplex_setting;
5589	if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5590		link_info->req_link_speed = link_info->auto_link_speed;
5591	else
5592		link_info->req_link_speed = link_info->force_link_speed;
5593	link_info->advertising = link_info->auto_link_speeds;
5594	snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5595		 link_info->phy_ver[0],
5596		 link_info->phy_ver[1],
5597		 link_info->phy_ver[2]);
5598	strcat(bp->fw_ver_str, phy_ver);
5599	return rc;
5600}
5601
5602static int bnxt_get_max_irq(struct pci_dev *pdev)
5603{
5604	u16 ctrl;
5605
5606	if (!pdev->msix_cap)
5607		return 1;
5608
5609	pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
5610	return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
5611}
5612
5613void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
5614{
5615	int max_rings = 0;
5616
5617	if (BNXT_PF(bp)) {
5618		*max_tx = bp->pf.max_pf_tx_rings;
5619		*max_rx = bp->pf.max_pf_rx_rings;
5620		max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
5621		max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
5622	} else {
5623#ifdef CONFIG_BNXT_SRIOV
5624		*max_tx = bp->vf.max_tx_rings;
5625		*max_rx = bp->vf.max_rx_rings;
5626		max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
5627		max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
5628#endif
5629	}
5630	if (bp->flags & BNXT_FLAG_AGG_RINGS)
5631		*max_rx >>= 1;
5632
5633	*max_rx = min_t(int, *max_rx, max_rings);
5634	*max_tx = min_t(int, *max_tx, max_rings);
5635}
5636
5637static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5638{
5639	static int version_printed;
5640	struct net_device *dev;
5641	struct bnxt *bp;
5642	int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
5643
5644	if (version_printed++ == 0)
5645		pr_info("%s", version);
5646
5647	max_irqs = bnxt_get_max_irq(pdev);
5648	dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
5649	if (!dev)
5650		return -ENOMEM;
5651
5652	bp = netdev_priv(dev);
5653
5654	if (bnxt_vf_pciid(ent->driver_data))
5655		bp->flags |= BNXT_FLAG_VF;
5656
5657	if (pdev->msix_cap) {
5658		bp->flags |= BNXT_FLAG_MSIX_CAP;
5659		if (BNXT_PF(bp))
5660			bp->flags |= BNXT_FLAG_RFS;
5661	}
5662
5663	rc = bnxt_init_board(pdev, dev);
5664	if (rc < 0)
5665		goto init_err_free;
5666
5667	dev->netdev_ops = &bnxt_netdev_ops;
5668	dev->watchdog_timeo = BNXT_TX_TIMEOUT;
5669	dev->ethtool_ops = &bnxt_ethtool_ops;
5670
5671	pci_set_drvdata(pdev, dev);
5672
5673	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5674			   NETIF_F_TSO | NETIF_F_TSO6 |
5675			   NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5676			   NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
5677			   NETIF_F_RXHASH |
5678			   NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
5679
5680	if (bp->flags & BNXT_FLAG_RFS)
5681		dev->hw_features |= NETIF_F_NTUPLE;
5682
5683	dev->hw_enc_features =
5684			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
5685			NETIF_F_TSO | NETIF_F_TSO6 |
5686			NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
5687			NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
5688	dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
5689	dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
5690			    NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
5691	dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
5692	dev->priv_flags |= IFF_UNICAST_FLT;
5693
5694#ifdef CONFIG_BNXT_SRIOV
5695	init_waitqueue_head(&bp->sriov_cfg_wait);
5696#endif
5697	rc = bnxt_alloc_hwrm_resources(bp);
5698	if (rc)
5699		goto init_err;
5700
5701	mutex_init(&bp->hwrm_cmd_lock);
5702	bnxt_hwrm_ver_get(bp);
5703
5704	rc = bnxt_hwrm_func_drv_rgtr(bp);
5705	if (rc)
5706		goto init_err;
5707
5708	/* Get the MAX capabilities for this function */
5709	rc = bnxt_hwrm_func_qcaps(bp);
5710	if (rc) {
5711		netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
5712			   rc);
5713		rc = -1;
5714		goto init_err;
5715	}
5716
5717	rc = bnxt_hwrm_queue_qportcfg(bp);
5718	if (rc) {
5719		netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
5720			   rc);
5721		rc = -1;
5722		goto init_err;
5723	}
5724
5725	bnxt_set_tpa_flags(bp);
5726	bnxt_set_ring_params(bp);
5727	dflt_rings = netif_get_num_default_rss_queues();
5728	if (BNXT_PF(bp))
5729		bp->pf.max_irqs = max_irqs;
5730#if defined(CONFIG_BNXT_SRIOV)
5731	else
5732		bp->vf.max_irqs = max_irqs;
5733#endif
5734	bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
5735	bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
5736	bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
5737	bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
5738	bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
5739	bp->num_stat_ctxs = bp->cp_nr_rings;
5740
5741	if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
5742		bp->flags |= BNXT_FLAG_STRIP_VLAN;
5743
5744	rc = bnxt_probe_phy(bp);
5745	if (rc)
5746		goto init_err;
5747
5748	rc = register_netdev(dev);
5749	if (rc)
5750		goto init_err;
5751
5752	netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
5753		    board_info[ent->driver_data].name,
5754		    (long)pci_resource_start(pdev, 0), dev->dev_addr);
5755
5756	return 0;
5757
5758init_err:
5759	pci_iounmap(pdev, bp->bar0);
5760	pci_release_regions(pdev);
5761	pci_disable_device(pdev);
5762
5763init_err_free:
5764	free_netdev(dev);
5765	return rc;
5766}
5767
5768static struct pci_driver bnxt_pci_driver = {
5769	.name		= DRV_MODULE_NAME,
5770	.id_table	= bnxt_pci_tbl,
5771	.probe		= bnxt_init_one,
5772	.remove		= bnxt_remove_one,
5773#if defined(CONFIG_BNXT_SRIOV)
5774	.sriov_configure = bnxt_sriov_configure,
5775#endif
5776};
5777
5778module_pci_driver(bnxt_pci_driver);
5779