1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses.  You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 *     Redistribution and use in source and binary forms, with or
13 *     without modification, are permitted provided that the following
14 *     conditions are met:
15 *
16 *      - Redistributions of source code must retain the above
17 *        copyright notice, this list of conditions and the following
18 *        disclaimer.
19 *
20 *      - Redistributions in binary form must reproduce the above
21 *        copyright notice, this list of conditions and the following
22 *        disclaimer in the documentation and/or other materials
23 *        provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/if_vlan.h>
39#include <linux/ip.h>
40#include <linux/dma-mapping.h>
41#include <linux/jiffies.h>
42#include <linux/prefetch.h>
43#include <linux/export.h>
44#include <net/ipv6.h>
45#include <net/tcp.h>
46#ifdef CONFIG_NET_RX_BUSY_POLL
47#include <net/busy_poll.h>
48#endif /* CONFIG_NET_RX_BUSY_POLL */
49#ifdef CONFIG_CHELSIO_T4_FCOE
50#include <scsi/fc/fc_fcoe.h>
51#endif /* CONFIG_CHELSIO_T4_FCOE */
52#include "cxgb4.h"
53#include "t4_regs.h"
54#include "t4_values.h"
55#include "t4_msg.h"
56#include "t4fw_api.h"
57
58/*
59 * Rx buffer size.  We use largish buffers if possible but settle for single
60 * pages under memory shortage.
61 */
62#if PAGE_SHIFT >= 16
63# define FL_PG_ORDER 0
64#else
65# define FL_PG_ORDER (16 - PAGE_SHIFT)
66#endif
67
68/* RX_PULL_LEN should be <= RX_COPY_THRES */
69#define RX_COPY_THRES    256
70#define RX_PULL_LEN      128
71
72/*
73 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
74 * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
75 */
76#define RX_PKT_SKB_LEN   512
77
78/*
79 * Max number of Tx descriptors we clean up at a time.  Should be modest as
80 * freeing skbs isn't cheap and it happens while holding locks.  We just need
81 * to free packets faster than they arrive, we eventually catch up and keep
82 * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
83 */
84#define MAX_TX_RECLAIM 16
85
86/*
87 * Max number of Rx buffers we replenish at a time.  Again keep this modest,
88 * allocating buffers isn't cheap either.
89 */
90#define MAX_RX_REFILL 16U
91
92/*
93 * Period of the Rx queue check timer.  This timer is infrequent as it has
94 * something to do only when the system experiences severe memory shortage.
95 */
96#define RX_QCHECK_PERIOD (HZ / 2)
97
98/*
99 * Period of the Tx queue check timer.
100 */
101#define TX_QCHECK_PERIOD (HZ / 2)
102
103/* SGE Hung Ingress DMA Threshold Warning time (in Hz) and Warning Repeat Rate
104 * (in RX_QCHECK_PERIOD multiples).  If we find one of the SGE Ingress DMA
105 * State Machines in the same state for this amount of time (in HZ) then we'll
106 * issue a warning about a potential hang.  We'll repeat the warning as the
107 * SGE Ingress DMA Channel appears to be hung every N RX_QCHECK_PERIODs till
108 * the situation clears.  If the situation clears, we'll note that as well.
109 */
110#define SGE_IDMA_WARN_THRESH (1 * HZ)
111#define SGE_IDMA_WARN_REPEAT (20 * RX_QCHECK_PERIOD)
112
113/*
114 * Max number of Tx descriptors to be reclaimed by the Tx timer.
115 */
116#define MAX_TIMER_TX_RECLAIM 100
117
118/*
119 * Timer index used when backing off due to memory shortage.
120 */
121#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
122
123/*
124 * Suspend an Ethernet Tx queue with fewer available descriptors than this.
125 * This is the same as calc_tx_descs() for a TSO packet with
126 * nr_frags == MAX_SKB_FRAGS.
127 */
128#define ETHTXQ_STOP_THRES \
129	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
130
131/*
132 * Suspension threshold for non-Ethernet Tx queues.  We require enough room
133 * for a full sized WR.
134 */
135#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
136
137/*
138 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
139 * into a WR.
140 */
141#define MAX_IMM_TX_PKT_LEN 256
142
143/*
144 * Max size of a WR sent through a control Tx queue.
145 */
146#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
147
148struct tx_sw_desc {                /* SW state per Tx descriptor */
149	struct sk_buff *skb;
150	struct ulptx_sgl *sgl;
151};
152
153struct rx_sw_desc {                /* SW state per Rx descriptor */
154	struct page *page;
155	dma_addr_t dma_addr;
156};
157
158/*
159 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
160 * buffer).  We currently only support two sizes for 1500- and 9000-byte MTUs.
161 * We could easily support more but there doesn't seem to be much need for
162 * that ...
163 */
164#define FL_MTU_SMALL 1500
165#define FL_MTU_LARGE 9000
166
167static inline unsigned int fl_mtu_bufsize(struct adapter *adapter,
168					  unsigned int mtu)
169{
170	struct sge *s = &adapter->sge;
171
172	return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align);
173}
174
175#define FL_MTU_SMALL_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_SMALL)
176#define FL_MTU_LARGE_BUFSIZE(adapter) fl_mtu_bufsize(adapter, FL_MTU_LARGE)
177
178/*
179 * Bits 0..3 of rx_sw_desc.dma_addr have special meaning.  The hardware uses
180 * these to specify the buffer size as an index into the SGE Free List Buffer
181 * Size register array.  We also use bit 4, when the buffer has been unmapped
182 * for DMA, but this is of course never sent to the hardware and is only used
183 * to prevent double unmappings.  All of the above requires that the Free List
184 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
185 * 32-byte or or a power of 2 greater in alignment.  Since the SGE's minimal
186 * Free List Buffer alignment is 32 bytes, this works out for us ...
187 */
188enum {
189	RX_BUF_FLAGS     = 0x1f,   /* bottom five bits are special */
190	RX_BUF_SIZE      = 0x0f,   /* bottom three bits are for buf sizes */
191	RX_UNMAPPED_BUF  = 0x10,   /* buffer is not mapped */
192
193	/*
194	 * XXX We shouldn't depend on being able to use these indices.
195	 * XXX Especially when some other Master PF has initialized the
196	 * XXX adapter or we use the Firmware Configuration File.  We
197	 * XXX should really search through the Host Buffer Size register
198	 * XXX array for the appropriately sized buffer indices.
199	 */
200	RX_SMALL_PG_BUF  = 0x0,   /* small (PAGE_SIZE) page buffer */
201	RX_LARGE_PG_BUF  = 0x1,   /* buffer large (FL_PG_ORDER) page buffer */
202
203	RX_SMALL_MTU_BUF = 0x2,   /* small MTU buffer */
204	RX_LARGE_MTU_BUF = 0x3,   /* large MTU buffer */
205};
206
207static int timer_pkt_quota[] = {1, 1, 2, 3, 4, 5};
208#define MIN_NAPI_WORK  1
209
210static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
211{
212	return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS;
213}
214
215static inline bool is_buf_mapped(const struct rx_sw_desc *d)
216{
217	return !(d->dma_addr & RX_UNMAPPED_BUF);
218}
219
220/**
221 *	txq_avail - return the number of available slots in a Tx queue
222 *	@q: the Tx queue
223 *
224 *	Returns the number of descriptors in a Tx queue available to write new
225 *	packets.
226 */
227static inline unsigned int txq_avail(const struct sge_txq *q)
228{
229	return q->size - 1 - q->in_use;
230}
231
232/**
233 *	fl_cap - return the capacity of a free-buffer list
234 *	@fl: the FL
235 *
236 *	Returns the capacity of a free-buffer list.  The capacity is less than
237 *	the size because one descriptor needs to be left unpopulated, otherwise
238 *	HW will think the FL is empty.
239 */
240static inline unsigned int fl_cap(const struct sge_fl *fl)
241{
242	return fl->size - 8;   /* 1 descriptor = 8 buffers */
243}
244
245/**
246 *	fl_starving - return whether a Free List is starving.
247 *	@adapter: pointer to the adapter
248 *	@fl: the Free List
249 *
250 *	Tests specified Free List to see whether the number of buffers
251 *	available to the hardware has falled below our "starvation"
252 *	threshold.
253 */
254static inline bool fl_starving(const struct adapter *adapter,
255			       const struct sge_fl *fl)
256{
257	const struct sge *s = &adapter->sge;
258
259	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
260}
261
262static int map_skb(struct device *dev, const struct sk_buff *skb,
263		   dma_addr_t *addr)
264{
265	const skb_frag_t *fp, *end;
266	const struct skb_shared_info *si;
267
268	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
269	if (dma_mapping_error(dev, *addr))
270		goto out_err;
271
272	si = skb_shinfo(skb);
273	end = &si->frags[si->nr_frags];
274
275	for (fp = si->frags; fp < end; fp++) {
276		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
277					   DMA_TO_DEVICE);
278		if (dma_mapping_error(dev, *addr))
279			goto unwind;
280	}
281	return 0;
282
283unwind:
284	while (fp-- > si->frags)
285		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
286
287	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
288out_err:
289	return -ENOMEM;
290}
291
292#ifdef CONFIG_NEED_DMA_MAP_STATE
293static void unmap_skb(struct device *dev, const struct sk_buff *skb,
294		      const dma_addr_t *addr)
295{
296	const skb_frag_t *fp, *end;
297	const struct skb_shared_info *si;
298
299	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
300
301	si = skb_shinfo(skb);
302	end = &si->frags[si->nr_frags];
303	for (fp = si->frags; fp < end; fp++)
304		dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE);
305}
306
307/**
308 *	deferred_unmap_destructor - unmap a packet when it is freed
309 *	@skb: the packet
310 *
311 *	This is the packet destructor used for Tx packets that need to remain
312 *	mapped until they are freed rather than until their Tx descriptors are
313 *	freed.
314 */
315static void deferred_unmap_destructor(struct sk_buff *skb)
316{
317	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
318}
319#endif
320
321static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
322		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
323{
324	const struct ulptx_sge_pair *p;
325	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
326
327	if (likely(skb_headlen(skb)))
328		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
329				 DMA_TO_DEVICE);
330	else {
331		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
332			       DMA_TO_DEVICE);
333		nfrags--;
334	}
335
336	/*
337	 * the complexity below is because of the possibility of a wrap-around
338	 * in the middle of an SGL
339	 */
340	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
341		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
342unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
343				       ntohl(p->len[0]), DMA_TO_DEVICE);
344			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
345				       ntohl(p->len[1]), DMA_TO_DEVICE);
346			p++;
347		} else if ((u8 *)p == (u8 *)q->stat) {
348			p = (const struct ulptx_sge_pair *)q->desc;
349			goto unmap;
350		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
351			const __be64 *addr = (const __be64 *)q->desc;
352
353			dma_unmap_page(dev, be64_to_cpu(addr[0]),
354				       ntohl(p->len[0]), DMA_TO_DEVICE);
355			dma_unmap_page(dev, be64_to_cpu(addr[1]),
356				       ntohl(p->len[1]), DMA_TO_DEVICE);
357			p = (const struct ulptx_sge_pair *)&addr[2];
358		} else {
359			const __be64 *addr = (const __be64 *)q->desc;
360
361			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
362				       ntohl(p->len[0]), DMA_TO_DEVICE);
363			dma_unmap_page(dev, be64_to_cpu(addr[0]),
364				       ntohl(p->len[1]), DMA_TO_DEVICE);
365			p = (const struct ulptx_sge_pair *)&addr[1];
366		}
367	}
368	if (nfrags) {
369		__be64 addr;
370
371		if ((u8 *)p == (u8 *)q->stat)
372			p = (const struct ulptx_sge_pair *)q->desc;
373		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
374						       *(const __be64 *)q->desc;
375		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
376			       DMA_TO_DEVICE);
377	}
378}
379
380/**
381 *	free_tx_desc - reclaims Tx descriptors and their buffers
382 *	@adapter: the adapter
383 *	@q: the Tx queue to reclaim descriptors from
384 *	@n: the number of descriptors to reclaim
385 *	@unmap: whether the buffers should be unmapped for DMA
386 *
387 *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
388 *	Tx buffers.  Called with the Tx queue lock held.
389 */
390static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
391			 unsigned int n, bool unmap)
392{
393	struct tx_sw_desc *d;
394	unsigned int cidx = q->cidx;
395	struct device *dev = adap->pdev_dev;
396
397	d = &q->sdesc[cidx];
398	while (n--) {
399		if (d->skb) {                       /* an SGL is present */
400			if (unmap)
401				unmap_sgl(dev, d->skb, d->sgl, q);
402			dev_consume_skb_any(d->skb);
403			d->skb = NULL;
404		}
405		++d;
406		if (++cidx == q->size) {
407			cidx = 0;
408			d = q->sdesc;
409		}
410	}
411	q->cidx = cidx;
412}
413
414/*
415 * Return the number of reclaimable descriptors in a Tx queue.
416 */
417static inline int reclaimable(const struct sge_txq *q)
418{
419	int hw_cidx = ntohs(q->stat->cidx);
420	hw_cidx -= q->cidx;
421	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
422}
423
424/**
425 *	reclaim_completed_tx - reclaims completed Tx descriptors
426 *	@adap: the adapter
427 *	@q: the Tx queue to reclaim completed descriptors from
428 *	@unmap: whether the buffers should be unmapped for DMA
429 *
430 *	Reclaims Tx descriptors that the SGE has indicated it has processed,
431 *	and frees the associated buffers if possible.  Called with the Tx
432 *	queue locked.
433 */
434static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
435					bool unmap)
436{
437	int avail = reclaimable(q);
438
439	if (avail) {
440		/*
441		 * Limit the amount of clean up work we do at a time to keep
442		 * the Tx lock hold time O(1).
443		 */
444		if (avail > MAX_TX_RECLAIM)
445			avail = MAX_TX_RECLAIM;
446
447		free_tx_desc(adap, q, avail, unmap);
448		q->in_use -= avail;
449	}
450}
451
452static inline int get_buf_size(struct adapter *adapter,
453			       const struct rx_sw_desc *d)
454{
455	struct sge *s = &adapter->sge;
456	unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE;
457	int buf_size;
458
459	switch (rx_buf_size_idx) {
460	case RX_SMALL_PG_BUF:
461		buf_size = PAGE_SIZE;
462		break;
463
464	case RX_LARGE_PG_BUF:
465		buf_size = PAGE_SIZE << s->fl_pg_order;
466		break;
467
468	case RX_SMALL_MTU_BUF:
469		buf_size = FL_MTU_SMALL_BUFSIZE(adapter);
470		break;
471
472	case RX_LARGE_MTU_BUF:
473		buf_size = FL_MTU_LARGE_BUFSIZE(adapter);
474		break;
475
476	default:
477		BUG_ON(1);
478	}
479
480	return buf_size;
481}
482
483/**
484 *	free_rx_bufs - free the Rx buffers on an SGE free list
485 *	@adap: the adapter
486 *	@q: the SGE free list to free buffers from
487 *	@n: how many buffers to free
488 *
489 *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
490 *	buffers must be made inaccessible to HW before calling this function.
491 */
492static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
493{
494	while (n--) {
495		struct rx_sw_desc *d = &q->sdesc[q->cidx];
496
497		if (is_buf_mapped(d))
498			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
499				       get_buf_size(adap, d),
500				       PCI_DMA_FROMDEVICE);
501		put_page(d->page);
502		d->page = NULL;
503		if (++q->cidx == q->size)
504			q->cidx = 0;
505		q->avail--;
506	}
507}
508
509/**
510 *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
511 *	@adap: the adapter
512 *	@q: the SGE free list
513 *
514 *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
515 *	buffer must be made inaccessible to HW before calling this function.
516 *
517 *	This is similar to @free_rx_bufs above but does not free the buffer.
518 *	Do note that the FL still loses any further access to the buffer.
519 */
520static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
521{
522	struct rx_sw_desc *d = &q->sdesc[q->cidx];
523
524	if (is_buf_mapped(d))
525		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
526			       get_buf_size(adap, d), PCI_DMA_FROMDEVICE);
527	d->page = NULL;
528	if (++q->cidx == q->size)
529		q->cidx = 0;
530	q->avail--;
531}
532
533static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
534{
535	u32 val;
536	if (q->pend_cred >= 8) {
537		if (is_t4(adap->params.chip))
538			val = PIDX_V(q->pend_cred / 8);
539		else
540			val = PIDX_T5_V(q->pend_cred / 8) |
541				DBTYPE_F;
542		val |= DBPRIO_F;
543		wmb();
544
545		/* If we don't have access to the new User Doorbell (T5+), use
546		 * the old doorbell mechanism; otherwise use the new BAR2
547		 * mechanism.
548		 */
549		if (unlikely(q->bar2_addr == NULL)) {
550			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
551				     val | QID_V(q->cntxt_id));
552		} else {
553			writel(val | QID_V(q->bar2_qid),
554			       q->bar2_addr + SGE_UDB_KDOORBELL);
555
556			/* This Write memory Barrier will force the write to
557			 * the User Doorbell area to be flushed.
558			 */
559			wmb();
560		}
561		q->pend_cred &= 7;
562	}
563}
564
565static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
566				  dma_addr_t mapping)
567{
568	sd->page = pg;
569	sd->dma_addr = mapping;      /* includes size low bits */
570}
571
572/**
573 *	refill_fl - refill an SGE Rx buffer ring
574 *	@adap: the adapter
575 *	@q: the ring to refill
576 *	@n: the number of new buffers to allocate
577 *	@gfp: the gfp flags for the allocations
578 *
579 *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
580 *	allocated with the supplied gfp flags.  The caller must assure that
581 *	@n does not exceed the queue's capacity.  If afterwards the queue is
582 *	found critically low mark it as starving in the bitmap of starving FLs.
583 *
584 *	Returns the number of buffers allocated.
585 */
586static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
587			      gfp_t gfp)
588{
589	struct sge *s = &adap->sge;
590	struct page *pg;
591	dma_addr_t mapping;
592	unsigned int cred = q->avail;
593	__be64 *d = &q->desc[q->pidx];
594	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
595	int node;
596
597	gfp |= __GFP_NOWARN;
598	node = dev_to_node(adap->pdev_dev);
599
600	if (s->fl_pg_order == 0)
601		goto alloc_small_pages;
602
603	/*
604	 * Prefer large buffers
605	 */
606	while (n) {
607		pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order);
608		if (unlikely(!pg)) {
609			q->large_alloc_failed++;
610			break;       /* fall back to single pages */
611		}
612
613		mapping = dma_map_page(adap->pdev_dev, pg, 0,
614				       PAGE_SIZE << s->fl_pg_order,
615				       PCI_DMA_FROMDEVICE);
616		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
617			__free_pages(pg, s->fl_pg_order);
618			goto out;   /* do not try small pages for this error */
619		}
620		mapping |= RX_LARGE_PG_BUF;
621		*d++ = cpu_to_be64(mapping);
622
623		set_rx_sw_desc(sd, pg, mapping);
624		sd++;
625
626		q->avail++;
627		if (++q->pidx == q->size) {
628			q->pidx = 0;
629			sd = q->sdesc;
630			d = q->desc;
631		}
632		n--;
633	}
634
635alloc_small_pages:
636	while (n--) {
637		pg = alloc_pages_node(node, gfp, 0);
638		if (unlikely(!pg)) {
639			q->alloc_failed++;
640			break;
641		}
642
643		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
644				       PCI_DMA_FROMDEVICE);
645		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
646			put_page(pg);
647			goto out;
648		}
649		*d++ = cpu_to_be64(mapping);
650
651		set_rx_sw_desc(sd, pg, mapping);
652		sd++;
653
654		q->avail++;
655		if (++q->pidx == q->size) {
656			q->pidx = 0;
657			sd = q->sdesc;
658			d = q->desc;
659		}
660	}
661
662out:	cred = q->avail - cred;
663	q->pend_cred += cred;
664	ring_fl_db(adap, q);
665
666	if (unlikely(fl_starving(adap, q))) {
667		smp_wmb();
668		set_bit(q->cntxt_id - adap->sge.egr_start,
669			adap->sge.starving_fl);
670	}
671
672	return cred;
673}
674
675static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
676{
677	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
678		  GFP_ATOMIC);
679}
680
681/**
682 *	alloc_ring - allocate resources for an SGE descriptor ring
683 *	@dev: the PCI device's core device
684 *	@nelem: the number of descriptors
685 *	@elem_size: the size of each descriptor
686 *	@sw_size: the size of the SW state associated with each ring element
687 *	@phys: the physical address of the allocated ring
688 *	@metadata: address of the array holding the SW state for the ring
689 *	@stat_size: extra space in HW ring for status information
690 *	@node: preferred node for memory allocations
691 *
692 *	Allocates resources for an SGE descriptor ring, such as Tx queues,
693 *	free buffer lists, or response queues.  Each SGE ring requires
694 *	space for its HW descriptors plus, optionally, space for the SW state
695 *	associated with each HW entry (the metadata).  The function returns
696 *	three values: the virtual address for the HW ring (the return value
697 *	of the function), the bus address of the HW ring, and the address
698 *	of the SW ring.
699 */
700static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
701			size_t sw_size, dma_addr_t *phys, void *metadata,
702			size_t stat_size, int node)
703{
704	size_t len = nelem * elem_size + stat_size;
705	void *s = NULL;
706	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
707
708	if (!p)
709		return NULL;
710	if (sw_size) {
711		s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
712
713		if (!s) {
714			dma_free_coherent(dev, len, p, *phys);
715			return NULL;
716		}
717	}
718	if (metadata)
719		*(void **)metadata = s;
720	memset(p, 0, len);
721	return p;
722}
723
724/**
725 *	sgl_len - calculates the size of an SGL of the given capacity
726 *	@n: the number of SGL entries
727 *
728 *	Calculates the number of flits needed for a scatter/gather list that
729 *	can hold the given number of entries.
730 */
731static inline unsigned int sgl_len(unsigned int n)
732{
733	/* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
734	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
735	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
736	 * repeated sequences of { Length[i], Length[i+1], Address[i],
737	 * Address[i+1] } (this ensures that all addresses are on 64-bit
738	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
739	 * Address[N+1] is omitted.
740	 *
741	 * The following calculation incorporates all of the above.  It's
742	 * somewhat hard to follow but, briefly: the "+2" accounts for the
743	 * first two flits which include the DSGL header, Length0 and
744	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
745	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
746	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
747	 * (n-1) is odd ...
748	 */
749	n--;
750	return (3 * n) / 2 + (n & 1) + 2;
751}
752
753/**
754 *	flits_to_desc - returns the num of Tx descriptors for the given flits
755 *	@n: the number of flits
756 *
757 *	Returns the number of Tx descriptors needed for the supplied number
758 *	of flits.
759 */
760static inline unsigned int flits_to_desc(unsigned int n)
761{
762	BUG_ON(n > SGE_MAX_WR_LEN / 8);
763	return DIV_ROUND_UP(n, 8);
764}
765
766/**
767 *	is_eth_imm - can an Ethernet packet be sent as immediate data?
768 *	@skb: the packet
769 *
770 *	Returns whether an Ethernet packet is small enough to fit as
771 *	immediate data. Return value corresponds to headroom required.
772 */
773static inline int is_eth_imm(const struct sk_buff *skb)
774{
775	int hdrlen = skb_shinfo(skb)->gso_size ?
776			sizeof(struct cpl_tx_pkt_lso_core) : 0;
777
778	hdrlen += sizeof(struct cpl_tx_pkt);
779	if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
780		return hdrlen;
781	return 0;
782}
783
784/**
785 *	calc_tx_flits - calculate the number of flits for a packet Tx WR
786 *	@skb: the packet
787 *
788 *	Returns the number of flits needed for a Tx WR for the given Ethernet
789 *	packet, including the needed WR and CPL headers.
790 */
791static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
792{
793	unsigned int flits;
794	int hdrlen = is_eth_imm(skb);
795
796	/* If the skb is small enough, we can pump it out as a work request
797	 * with only immediate data.  In that case we just have to have the
798	 * TX Packet header plus the skb data in the Work Request.
799	 */
800
801	if (hdrlen)
802		return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
803
804	/* Otherwise, we're going to have to construct a Scatter gather list
805	 * of the skb body and fragments.  We also include the flits necessary
806	 * for the TX Packet Work Request and CPL.  We always have a firmware
807	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
808	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
809	 * message or, if we're doing a Large Send Offload, an LSO CPL message
810	 * with an embedded TX Packet Write CPL message.
811	 */
812	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
813	if (skb_shinfo(skb)->gso_size)
814		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
815			  sizeof(struct cpl_tx_pkt_lso_core) +
816			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
817	else
818		flits += (sizeof(struct fw_eth_tx_pkt_wr) +
819			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
820	return flits;
821}
822
823/**
824 *	calc_tx_descs - calculate the number of Tx descriptors for a packet
825 *	@skb: the packet
826 *
827 *	Returns the number of Tx descriptors needed for the given Ethernet
828 *	packet, including the needed WR and CPL headers.
829 */
830static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
831{
832	return flits_to_desc(calc_tx_flits(skb));
833}
834
835/**
836 *	write_sgl - populate a scatter/gather list for a packet
837 *	@skb: the packet
838 *	@q: the Tx queue we are writing into
839 *	@sgl: starting location for writing the SGL
840 *	@end: points right after the end of the SGL
841 *	@start: start offset into skb main-body data to include in the SGL
842 *	@addr: the list of bus addresses for the SGL elements
843 *
844 *	Generates a gather list for the buffers that make up a packet.
845 *	The caller must provide adequate space for the SGL that will be written.
846 *	The SGL includes all of the packet's page fragments and the data in its
847 *	main body except for the first @start bytes.  @sgl must be 16-byte
848 *	aligned and within a Tx descriptor with available space.  @end points
849 *	right after the end of the SGL but does not account for any potential
850 *	wrap around, i.e., @end > @sgl.
851 */
852static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
853		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
854		      const dma_addr_t *addr)
855{
856	unsigned int i, len;
857	struct ulptx_sge_pair *to;
858	const struct skb_shared_info *si = skb_shinfo(skb);
859	unsigned int nfrags = si->nr_frags;
860	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
861
862	len = skb_headlen(skb) - start;
863	if (likely(len)) {
864		sgl->len0 = htonl(len);
865		sgl->addr0 = cpu_to_be64(addr[0] + start);
866		nfrags++;
867	} else {
868		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
869		sgl->addr0 = cpu_to_be64(addr[1]);
870	}
871
872	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
873			      ULPTX_NSGE_V(nfrags));
874	if (likely(--nfrags == 0))
875		return;
876	/*
877	 * Most of the complexity below deals with the possibility we hit the
878	 * end of the queue in the middle of writing the SGL.  For this case
879	 * only we create the SGL in a temporary buffer and then copy it.
880	 */
881	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
882
883	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
884		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
885		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
886		to->addr[0] = cpu_to_be64(addr[i]);
887		to->addr[1] = cpu_to_be64(addr[++i]);
888	}
889	if (nfrags) {
890		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
891		to->len[1] = cpu_to_be32(0);
892		to->addr[0] = cpu_to_be64(addr[i + 1]);
893	}
894	if (unlikely((u8 *)end > (u8 *)q->stat)) {
895		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
896
897		if (likely(part0))
898			memcpy(sgl->sge, buf, part0);
899		part1 = (u8 *)end - (u8 *)q->stat;
900		memcpy(q->desc, (u8 *)buf + part0, part1);
901		end = (void *)q->desc + part1;
902	}
903	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
904		*end = 0;
905}
906
907/* This function copies 64 byte coalesced work request to
908 * memory mapped BAR2 space. For coalesced WR SGE fetches
909 * data from the FIFO instead of from Host.
910 */
911static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
912{
913	int count = 8;
914
915	while (count) {
916		writeq(*src, dst);
917		src++;
918		dst++;
919		count--;
920	}
921}
922
923/**
924 *	ring_tx_db - check and potentially ring a Tx queue's doorbell
925 *	@adap: the adapter
926 *	@q: the Tx queue
927 *	@n: number of new descriptors to give to HW
928 *
929 *	Ring the doorbel for a Tx queue.
930 */
931static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
932{
933	wmb();            /* write descriptors before telling HW */
934
935	/* If we don't have access to the new User Doorbell (T5+), use the old
936	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
937	 */
938	if (unlikely(q->bar2_addr == NULL)) {
939		u32 val = PIDX_V(n);
940		unsigned long flags;
941
942		/* For T4 we need to participate in the Doorbell Recovery
943		 * mechanism.
944		 */
945		spin_lock_irqsave(&q->db_lock, flags);
946		if (!q->db_disabled)
947			t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
948				     QID_V(q->cntxt_id) | val);
949		else
950			q->db_pidx_inc += n;
951		q->db_pidx = q->pidx;
952		spin_unlock_irqrestore(&q->db_lock, flags);
953	} else {
954		u32 val = PIDX_T5_V(n);
955
956		/* T4 and later chips share the same PIDX field offset within
957		 * the doorbell, but T5 and later shrank the field in order to
958		 * gain a bit for Doorbell Priority.  The field was absurdly
959		 * large in the first place (14 bits) so we just use the T5
960		 * and later limits and warn if a Queue ID is too large.
961		 */
962		WARN_ON(val & DBPRIO_F);
963
964		/* If we're only writing a single TX Descriptor and we can use
965		 * Inferred QID registers, we can use the Write Combining
966		 * Gather Buffer; otherwise we use the simple doorbell.
967		 */
968		if (n == 1 && q->bar2_qid == 0) {
969			int index = (q->pidx
970				     ? (q->pidx - 1)
971				     : (q->size - 1));
972			u64 *wr = (u64 *)&q->desc[index];
973
974			cxgb_pio_copy((u64 __iomem *)
975				      (q->bar2_addr + SGE_UDB_WCDOORBELL),
976				      wr);
977		} else {
978			writel(val | QID_V(q->bar2_qid),
979			       q->bar2_addr + SGE_UDB_KDOORBELL);
980		}
981
982		/* This Write Memory Barrier will force the write to the User
983		 * Doorbell area to be flushed.  This is needed to prevent
984		 * writes on different CPUs for the same queue from hitting
985		 * the adapter out of order.  This is required when some Work
986		 * Requests take the Write Combine Gather Buffer path (user
987		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
988		 * take the traditional path where we simply increment the
989		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
990		 * hardware DMA read the actual Work Request.
991		 */
992		wmb();
993	}
994}
995
996/**
997 *	inline_tx_skb - inline a packet's data into Tx descriptors
998 *	@skb: the packet
999 *	@q: the Tx queue where the packet will be inlined
1000 *	@pos: starting position in the Tx queue where to inline the packet
1001 *
1002 *	Inline a packet's contents directly into Tx descriptors, starting at
1003 *	the given position within the Tx DMA ring.
1004 *	Most of the complexity of this operation is dealing with wrap arounds
1005 *	in the middle of the packet we want to inline.
1006 */
1007static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
1008			  void *pos)
1009{
1010	u64 *p;
1011	int left = (void *)q->stat - pos;
1012
1013	if (likely(skb->len <= left)) {
1014		if (likely(!skb->data_len))
1015			skb_copy_from_linear_data(skb, pos, skb->len);
1016		else
1017			skb_copy_bits(skb, 0, pos, skb->len);
1018		pos += skb->len;
1019	} else {
1020		skb_copy_bits(skb, 0, pos, left);
1021		skb_copy_bits(skb, left, q->desc, skb->len - left);
1022		pos = (void *)q->desc + (skb->len - left);
1023	}
1024
1025	/* 0-pad to multiple of 16 */
1026	p = PTR_ALIGN(pos, 8);
1027	if ((uintptr_t)p & 8)
1028		*p = 0;
1029}
1030
1031/*
1032 * Figure out what HW csum a packet wants and return the appropriate control
1033 * bits.
1034 */
1035static u64 hwcsum(const struct sk_buff *skb)
1036{
1037	int csum_type;
1038	const struct iphdr *iph = ip_hdr(skb);
1039
1040	if (iph->version == 4) {
1041		if (iph->protocol == IPPROTO_TCP)
1042			csum_type = TX_CSUM_TCPIP;
1043		else if (iph->protocol == IPPROTO_UDP)
1044			csum_type = TX_CSUM_UDPIP;
1045		else {
1046nocsum:			/*
1047			 * unknown protocol, disable HW csum
1048			 * and hope a bad packet is detected
1049			 */
1050			return TXPKT_L4CSUM_DIS;
1051		}
1052	} else {
1053		/*
1054		 * this doesn't work with extension headers
1055		 */
1056		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1057
1058		if (ip6h->nexthdr == IPPROTO_TCP)
1059			csum_type = TX_CSUM_TCPIP6;
1060		else if (ip6h->nexthdr == IPPROTO_UDP)
1061			csum_type = TX_CSUM_UDPIP6;
1062		else
1063			goto nocsum;
1064	}
1065
1066	if (likely(csum_type >= TX_CSUM_TCPIP))
1067		return TXPKT_CSUM_TYPE(csum_type) |
1068			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1069			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1070	else {
1071		int start = skb_transport_offset(skb);
1072
1073		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
1074			TXPKT_CSUM_LOC(start + skb->csum_offset);
1075	}
1076}
1077
1078static void eth_txq_stop(struct sge_eth_txq *q)
1079{
1080	netif_tx_stop_queue(q->txq);
1081	q->q.stops++;
1082}
1083
1084static inline void txq_advance(struct sge_txq *q, unsigned int n)
1085{
1086	q->in_use += n;
1087	q->pidx += n;
1088	if (q->pidx >= q->size)
1089		q->pidx -= q->size;
1090}
1091
1092#ifdef CONFIG_CHELSIO_T4_FCOE
1093static inline int
1094cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
1095		  const struct port_info *pi, u64 *cntrl)
1096{
1097	const struct cxgb_fcoe *fcoe = &pi->fcoe;
1098
1099	if (!(fcoe->flags & CXGB_FCOE_ENABLED))
1100		return 0;
1101
1102	if (skb->protocol != htons(ETH_P_FCOE))
1103		return 0;
1104
1105	skb_reset_mac_header(skb);
1106	skb->mac_len = sizeof(struct ethhdr);
1107
1108	skb_set_network_header(skb, skb->mac_len);
1109	skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
1110
1111	if (!cxgb_fcoe_sof_eof_supported(adap, skb))
1112		return -ENOTSUPP;
1113
1114	/* FC CRC offload */
1115	*cntrl = TXPKT_CSUM_TYPE(TX_CSUM_FCOE) |
1116		     TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS |
1117		     TXPKT_CSUM_START(CXGB_FCOE_TXPKT_CSUM_START) |
1118		     TXPKT_CSUM_END(CXGB_FCOE_TXPKT_CSUM_END) |
1119		     TXPKT_CSUM_LOC(CXGB_FCOE_TXPKT_CSUM_END);
1120	return 0;
1121}
1122#endif /* CONFIG_CHELSIO_T4_FCOE */
1123
1124/**
1125 *	t4_eth_xmit - add a packet to an Ethernet Tx queue
1126 *	@skb: the packet
1127 *	@dev: the egress net device
1128 *
1129 *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
1130 */
1131netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1132{
1133	int len;
1134	u32 wr_mid;
1135	u64 cntrl, *end;
1136	int qidx, credits;
1137	unsigned int flits, ndesc;
1138	struct adapter *adap;
1139	struct sge_eth_txq *q;
1140	const struct port_info *pi;
1141	struct fw_eth_tx_pkt_wr *wr;
1142	struct cpl_tx_pkt_core *cpl;
1143	const struct skb_shared_info *ssi;
1144	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1145	bool immediate = false;
1146#ifdef CONFIG_CHELSIO_T4_FCOE
1147	int err;
1148#endif /* CONFIG_CHELSIO_T4_FCOE */
1149
1150	/*
1151	 * The chip min packet length is 10 octets but play safe and reject
1152	 * anything shorter than an Ethernet header.
1153	 */
1154	if (unlikely(skb->len < ETH_HLEN)) {
1155out_free:	dev_kfree_skb_any(skb);
1156		return NETDEV_TX_OK;
1157	}
1158
1159	pi = netdev_priv(dev);
1160	adap = pi->adapter;
1161	qidx = skb_get_queue_mapping(skb);
1162	q = &adap->sge.ethtxq[qidx + pi->first_qset];
1163
1164	reclaim_completed_tx(adap, &q->q, true);
1165	cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1166
1167#ifdef CONFIG_CHELSIO_T4_FCOE
1168	err = cxgb_fcoe_offload(skb, adap, pi, &cntrl);
1169	if (unlikely(err == -ENOTSUPP))
1170		goto out_free;
1171#endif /* CONFIG_CHELSIO_T4_FCOE */
1172
1173	flits = calc_tx_flits(skb);
1174	ndesc = flits_to_desc(flits);
1175	credits = txq_avail(&q->q) - ndesc;
1176
1177	if (unlikely(credits < 0)) {
1178		eth_txq_stop(q);
1179		dev_err(adap->pdev_dev,
1180			"%s: Tx ring %u full while queue awake!\n",
1181			dev->name, qidx);
1182		return NETDEV_TX_BUSY;
1183	}
1184
1185	if (is_eth_imm(skb))
1186		immediate = true;
1187
1188	if (!immediate &&
1189	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
1190		q->mapping_err++;
1191		goto out_free;
1192	}
1193
1194	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1195	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1196		eth_txq_stop(q);
1197		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1198	}
1199
1200	wr = (void *)&q->q.desc[q->q.pidx];
1201	wr->equiq_to_len16 = htonl(wr_mid);
1202	wr->r3 = cpu_to_be64(0);
1203	end = (u64 *)wr + flits;
1204
1205	len = immediate ? skb->len : 0;
1206	ssi = skb_shinfo(skb);
1207	if (ssi->gso_size) {
1208		struct cpl_tx_pkt_lso *lso = (void *)wr;
1209		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1210		int l3hdr_len = skb_network_header_len(skb);
1211		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1212
1213		len += sizeof(*lso);
1214		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1215				       FW_WR_IMMDLEN_V(len));
1216		lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
1217					LSO_FIRST_SLICE | LSO_LAST_SLICE |
1218					LSO_IPV6(v6) |
1219					LSO_ETHHDR_LEN(eth_xtra_len / 4) |
1220					LSO_IPHDR_LEN(l3hdr_len / 4) |
1221					LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1222		lso->c.ipid_ofst = htons(0);
1223		lso->c.mss = htons(ssi->gso_size);
1224		lso->c.seqno_offset = htonl(0);
1225		if (is_t4(adap->params.chip))
1226			lso->c.len = htonl(skb->len);
1227		else
1228			lso->c.len = htonl(LSO_T5_XFER_SIZE(skb->len));
1229		cpl = (void *)(lso + 1);
1230		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1231			TXPKT_IPHDR_LEN(l3hdr_len) |
1232			TXPKT_ETHHDR_LEN(eth_xtra_len);
1233		q->tso++;
1234		q->tx_cso += ssi->gso_segs;
1235	} else {
1236		len += sizeof(*cpl);
1237		wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
1238				       FW_WR_IMMDLEN_V(len));
1239		cpl = (void *)(wr + 1);
1240		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1241			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1242			q->tx_cso++;
1243		}
1244	}
1245
1246	if (skb_vlan_tag_present(skb)) {
1247		q->vlan_ins++;
1248		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1249#ifdef CONFIG_CHELSIO_T4_FCOE
1250		if (skb->protocol == htons(ETH_P_FCOE))
1251			cntrl |= TXPKT_VLAN(
1252				 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT));
1253#endif /* CONFIG_CHELSIO_T4_FCOE */
1254	}
1255
1256	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1257			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn));
1258	cpl->pack = htons(0);
1259	cpl->len = htons(skb->len);
1260	cpl->ctrl1 = cpu_to_be64(cntrl);
1261
1262	if (immediate) {
1263		inline_tx_skb(skb, &q->q, cpl + 1);
1264		dev_consume_skb_any(skb);
1265	} else {
1266		int last_desc;
1267
1268		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
1269			  addr);
1270		skb_orphan(skb);
1271
1272		last_desc = q->q.pidx + ndesc - 1;
1273		if (last_desc >= q->q.size)
1274			last_desc -= q->q.size;
1275		q->q.sdesc[last_desc].skb = skb;
1276		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
1277	}
1278
1279	txq_advance(&q->q, ndesc);
1280
1281	ring_tx_db(adap, &q->q, ndesc);
1282	return NETDEV_TX_OK;
1283}
1284
1285/**
1286 *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1287 *	@q: the SGE control Tx queue
1288 *
1289 *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1290 *	that send only immediate data (presently just the control queues) and
1291 *	thus do not have any sk_buffs to release.
1292 */
1293static inline void reclaim_completed_tx_imm(struct sge_txq *q)
1294{
1295	int hw_cidx = ntohs(q->stat->cidx);
1296	int reclaim = hw_cidx - q->cidx;
1297
1298	if (reclaim < 0)
1299		reclaim += q->size;
1300
1301	q->in_use -= reclaim;
1302	q->cidx = hw_cidx;
1303}
1304
1305/**
1306 *	is_imm - check whether a packet can be sent as immediate data
1307 *	@skb: the packet
1308 *
1309 *	Returns true if a packet can be sent as a WR with immediate data.
1310 */
1311static inline int is_imm(const struct sk_buff *skb)
1312{
1313	return skb->len <= MAX_CTRL_WR_LEN;
1314}
1315
1316/**
1317 *	ctrlq_check_stop - check if a control queue is full and should stop
1318 *	@q: the queue
1319 *	@wr: most recent WR written to the queue
1320 *
1321 *	Check if a control queue has become full and should be stopped.
1322 *	We clean up control queue descriptors very lazily, only when we are out.
1323 *	If the queue is still full after reclaiming any completed descriptors
1324 *	we suspend it and have the last WR wake it up.
1325 */
1326static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
1327{
1328	reclaim_completed_tx_imm(&q->q);
1329	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1330		wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1331		q->q.stops++;
1332		q->full = 1;
1333	}
1334}
1335
1336/**
1337 *	ctrl_xmit - send a packet through an SGE control Tx queue
1338 *	@q: the control queue
1339 *	@skb: the packet
1340 *
1341 *	Send a packet through an SGE control Tx queue.  Packets sent through
1342 *	a control queue must fit entirely as immediate data.
1343 */
1344static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
1345{
1346	unsigned int ndesc;
1347	struct fw_wr_hdr *wr;
1348
1349	if (unlikely(!is_imm(skb))) {
1350		WARN_ON(1);
1351		dev_kfree_skb(skb);
1352		return NET_XMIT_DROP;
1353	}
1354
1355	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
1356	spin_lock(&q->sendq.lock);
1357
1358	if (unlikely(q->full)) {
1359		skb->priority = ndesc;                  /* save for restart */
1360		__skb_queue_tail(&q->sendq, skb);
1361		spin_unlock(&q->sendq.lock);
1362		return NET_XMIT_CN;
1363	}
1364
1365	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1366	inline_tx_skb(skb, &q->q, wr);
1367
1368	txq_advance(&q->q, ndesc);
1369	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
1370		ctrlq_check_stop(q, wr);
1371
1372	ring_tx_db(q->adap, &q->q, ndesc);
1373	spin_unlock(&q->sendq.lock);
1374
1375	kfree_skb(skb);
1376	return NET_XMIT_SUCCESS;
1377}
1378
1379/**
1380 *	restart_ctrlq - restart a suspended control queue
1381 *	@data: the control queue to restart
1382 *
1383 *	Resumes transmission on a suspended Tx control queue.
1384 */
1385static void restart_ctrlq(unsigned long data)
1386{
1387	struct sk_buff *skb;
1388	unsigned int written = 0;
1389	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
1390
1391	spin_lock(&q->sendq.lock);
1392	reclaim_completed_tx_imm(&q->q);
1393	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
1394
1395	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
1396		struct fw_wr_hdr *wr;
1397		unsigned int ndesc = skb->priority;     /* previously saved */
1398
1399		/*
1400		 * Write descriptors and free skbs outside the lock to limit
1401		 * wait times.  q->full is still set so new skbs will be queued.
1402		 */
1403		spin_unlock(&q->sendq.lock);
1404
1405		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
1406		inline_tx_skb(skb, &q->q, wr);
1407		kfree_skb(skb);
1408
1409		written += ndesc;
1410		txq_advance(&q->q, ndesc);
1411		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
1412			unsigned long old = q->q.stops;
1413
1414			ctrlq_check_stop(q, wr);
1415			if (q->q.stops != old) {          /* suspended anew */
1416				spin_lock(&q->sendq.lock);
1417				goto ringdb;
1418			}
1419		}
1420		if (written > 16) {
1421			ring_tx_db(q->adap, &q->q, written);
1422			written = 0;
1423		}
1424		spin_lock(&q->sendq.lock);
1425	}
1426	q->full = 0;
1427ringdb: if (written)
1428		ring_tx_db(q->adap, &q->q, written);
1429	spin_unlock(&q->sendq.lock);
1430}
1431
1432/**
1433 *	t4_mgmt_tx - send a management message
1434 *	@adap: the adapter
1435 *	@skb: the packet containing the management message
1436 *
1437 *	Send a management message through control queue 0.
1438 */
1439int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
1440{
1441	int ret;
1442
1443	local_bh_disable();
1444	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
1445	local_bh_enable();
1446	return ret;
1447}
1448
1449/**
1450 *	is_ofld_imm - check whether a packet can be sent as immediate data
1451 *	@skb: the packet
1452 *
1453 *	Returns true if a packet can be sent as an offload WR with immediate
1454 *	data.  We currently use the same limit as for Ethernet packets.
1455 */
1456static inline int is_ofld_imm(const struct sk_buff *skb)
1457{
1458	return skb->len <= MAX_IMM_TX_PKT_LEN;
1459}
1460
1461/**
1462 *	calc_tx_flits_ofld - calculate # of flits for an offload packet
1463 *	@skb: the packet
1464 *
1465 *	Returns the number of flits needed for the given offload packet.
1466 *	These packets are already fully constructed and no additional headers
1467 *	will be added.
1468 */
1469static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
1470{
1471	unsigned int flits, cnt;
1472
1473	if (is_ofld_imm(skb))
1474		return DIV_ROUND_UP(skb->len, 8);
1475
1476	flits = skb_transport_offset(skb) / 8U;   /* headers */
1477	cnt = skb_shinfo(skb)->nr_frags;
1478	if (skb_tail_pointer(skb) != skb_transport_header(skb))
1479		cnt++;
1480	return flits + sgl_len(cnt);
1481}
1482
1483/**
1484 *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
1485 *	@adap: the adapter
1486 *	@q: the queue to stop
1487 *
1488 *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
1489 *	inability to map packets.  A periodic timer attempts to restart
1490 *	queues so marked.
1491 */
1492static void txq_stop_maperr(struct sge_ofld_txq *q)
1493{
1494	q->mapping_err++;
1495	q->q.stops++;
1496	set_bit(q->q.cntxt_id - q->adap->sge.egr_start,
1497		q->adap->sge.txq_maperr);
1498}
1499
1500/**
1501 *	ofldtxq_stop - stop an offload Tx queue that has become full
1502 *	@q: the queue to stop
1503 *	@skb: the packet causing the queue to become full
1504 *
1505 *	Stops an offload Tx queue that has become full and modifies the packet
1506 *	being written to request a wakeup.
1507 */
1508static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
1509{
1510	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
1511
1512	wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F);
1513	q->q.stops++;
1514	q->full = 1;
1515}
1516
1517/**
1518 *	service_ofldq - restart a suspended offload queue
1519 *	@q: the offload queue
1520 *
1521 *	Services an offload Tx queue by moving packets from its packet queue
1522 *	to the HW Tx ring.  The function starts and ends with the queue locked.
1523 */
1524static void service_ofldq(struct sge_ofld_txq *q)
1525{
1526	u64 *pos;
1527	int credits;
1528	struct sk_buff *skb;
1529	unsigned int written = 0;
1530	unsigned int flits, ndesc;
1531
1532	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
1533		/*
1534		 * We drop the lock but leave skb on sendq, thus retaining
1535		 * exclusive access to the state of the queue.
1536		 */
1537		spin_unlock(&q->sendq.lock);
1538
1539		reclaim_completed_tx(q->adap, &q->q, false);
1540
1541		flits = skb->priority;                /* previously saved */
1542		ndesc = flits_to_desc(flits);
1543		credits = txq_avail(&q->q) - ndesc;
1544		BUG_ON(credits < 0);
1545		if (unlikely(credits < TXQ_STOP_THRES))
1546			ofldtxq_stop(q, skb);
1547
1548		pos = (u64 *)&q->q.desc[q->q.pidx];
1549		if (is_ofld_imm(skb))
1550			inline_tx_skb(skb, &q->q, pos);
1551		else if (map_skb(q->adap->pdev_dev, skb,
1552				 (dma_addr_t *)skb->head)) {
1553			txq_stop_maperr(q);
1554			spin_lock(&q->sendq.lock);
1555			break;
1556		} else {
1557			int last_desc, hdr_len = skb_transport_offset(skb);
1558
1559			memcpy(pos, skb->data, hdr_len);
1560			write_sgl(skb, &q->q, (void *)pos + hdr_len,
1561				  pos + flits, hdr_len,
1562				  (dma_addr_t *)skb->head);
1563#ifdef CONFIG_NEED_DMA_MAP_STATE
1564			skb->dev = q->adap->port[0];
1565			skb->destructor = deferred_unmap_destructor;
1566#endif
1567			last_desc = q->q.pidx + ndesc - 1;
1568			if (last_desc >= q->q.size)
1569				last_desc -= q->q.size;
1570			q->q.sdesc[last_desc].skb = skb;
1571		}
1572
1573		txq_advance(&q->q, ndesc);
1574		written += ndesc;
1575		if (unlikely(written > 32)) {
1576			ring_tx_db(q->adap, &q->q, written);
1577			written = 0;
1578		}
1579
1580		spin_lock(&q->sendq.lock);
1581		__skb_unlink(skb, &q->sendq);
1582		if (is_ofld_imm(skb))
1583			kfree_skb(skb);
1584	}
1585	if (likely(written))
1586		ring_tx_db(q->adap, &q->q, written);
1587}
1588
1589/**
1590 *	ofld_xmit - send a packet through an offload queue
1591 *	@q: the Tx offload queue
1592 *	@skb: the packet
1593 *
1594 *	Send an offload packet through an SGE offload queue.
1595 */
1596static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
1597{
1598	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
1599	spin_lock(&q->sendq.lock);
1600	__skb_queue_tail(&q->sendq, skb);
1601	if (q->sendq.qlen == 1)
1602		service_ofldq(q);
1603	spin_unlock(&q->sendq.lock);
1604	return NET_XMIT_SUCCESS;
1605}
1606
1607/**
1608 *	restart_ofldq - restart a suspended offload queue
1609 *	@data: the offload queue to restart
1610 *
1611 *	Resumes transmission on a suspended Tx offload queue.
1612 */
1613static void restart_ofldq(unsigned long data)
1614{
1615	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
1616
1617	spin_lock(&q->sendq.lock);
1618	q->full = 0;            /* the queue actually is completely empty now */
1619	service_ofldq(q);
1620	spin_unlock(&q->sendq.lock);
1621}
1622
1623/**
1624 *	skb_txq - return the Tx queue an offload packet should use
1625 *	@skb: the packet
1626 *
1627 *	Returns the Tx queue an offload packet should use as indicated by bits
1628 *	1-15 in the packet's queue_mapping.
1629 */
1630static inline unsigned int skb_txq(const struct sk_buff *skb)
1631{
1632	return skb->queue_mapping >> 1;
1633}
1634
1635/**
1636 *	is_ctrl_pkt - return whether an offload packet is a control packet
1637 *	@skb: the packet
1638 *
1639 *	Returns whether an offload packet should use an OFLD or a CTRL
1640 *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
1641 */
1642static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
1643{
1644	return skb->queue_mapping & 1;
1645}
1646
1647static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
1648{
1649	unsigned int idx = skb_txq(skb);
1650
1651	if (unlikely(is_ctrl_pkt(skb))) {
1652		/* Single ctrl queue is a requirement for LE workaround path */
1653		if (adap->tids.nsftids)
1654			idx = 0;
1655		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
1656	}
1657	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
1658}
1659
1660/**
1661 *	t4_ofld_send - send an offload packet
1662 *	@adap: the adapter
1663 *	@skb: the packet
1664 *
1665 *	Sends an offload packet.  We use the packet queue_mapping to select the
1666 *	appropriate Tx queue as follows: bit 0 indicates whether the packet
1667 *	should be sent as regular or control, bits 1-15 select the queue.
1668 */
1669int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
1670{
1671	int ret;
1672
1673	local_bh_disable();
1674	ret = ofld_send(adap, skb);
1675	local_bh_enable();
1676	return ret;
1677}
1678
1679/**
1680 *	cxgb4_ofld_send - send an offload packet
1681 *	@dev: the net device
1682 *	@skb: the packet
1683 *
1684 *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
1685 *	intended for ULDs.
1686 */
1687int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
1688{
1689	return t4_ofld_send(netdev2adap(dev), skb);
1690}
1691EXPORT_SYMBOL(cxgb4_ofld_send);
1692
1693static inline void copy_frags(struct sk_buff *skb,
1694			      const struct pkt_gl *gl, unsigned int offset)
1695{
1696	int i;
1697
1698	/* usually there's just one frag */
1699	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1700			     gl->frags[0].offset + offset,
1701			     gl->frags[0].size - offset);
1702	skb_shinfo(skb)->nr_frags = gl->nfrags;
1703	for (i = 1; i < gl->nfrags; i++)
1704		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1705				     gl->frags[i].offset,
1706				     gl->frags[i].size);
1707
1708	/* get a reference to the last page, we don't own it */
1709	get_page(gl->frags[gl->nfrags - 1].page);
1710}
1711
1712/**
1713 *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
1714 *	@gl: the gather list
1715 *	@skb_len: size of sk_buff main body if it carries fragments
1716 *	@pull_len: amount of data to move to the sk_buff's main body
1717 *
1718 *	Builds an sk_buff from the given packet gather list.  Returns the
1719 *	sk_buff or %NULL if sk_buff allocation failed.
1720 */
1721struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
1722				   unsigned int skb_len, unsigned int pull_len)
1723{
1724	struct sk_buff *skb;
1725
1726	/*
1727	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
1728	 * size, which is expected since buffers are at least PAGE_SIZEd.
1729	 * In this case packets up to RX_COPY_THRES have only one fragment.
1730	 */
1731	if (gl->tot_len <= RX_COPY_THRES) {
1732		skb = dev_alloc_skb(gl->tot_len);
1733		if (unlikely(!skb))
1734			goto out;
1735		__skb_put(skb, gl->tot_len);
1736		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1737	} else {
1738		skb = dev_alloc_skb(skb_len);
1739		if (unlikely(!skb))
1740			goto out;
1741		__skb_put(skb, pull_len);
1742		skb_copy_to_linear_data(skb, gl->va, pull_len);
1743
1744		copy_frags(skb, gl, pull_len);
1745		skb->len = gl->tot_len;
1746		skb->data_len = skb->len - pull_len;
1747		skb->truesize += skb->data_len;
1748	}
1749out:	return skb;
1750}
1751EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1752
1753/**
1754 *	t4_pktgl_free - free a packet gather list
1755 *	@gl: the gather list
1756 *
1757 *	Releases the pages of a packet gather list.  We do not own the last
1758 *	page on the list and do not free it.
1759 */
1760static void t4_pktgl_free(const struct pkt_gl *gl)
1761{
1762	int n;
1763	const struct page_frag *p;
1764
1765	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
1766		put_page(p->page);
1767}
1768
1769/*
1770 * Process an MPS trace packet.  Give it an unused protocol number so it won't
1771 * be delivered to anyone and send it to the stack for capture.
1772 */
1773static noinline int handle_trace_pkt(struct adapter *adap,
1774				     const struct pkt_gl *gl)
1775{
1776	struct sk_buff *skb;
1777
1778	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1779	if (unlikely(!skb)) {
1780		t4_pktgl_free(gl);
1781		return 0;
1782	}
1783
1784	if (is_t4(adap->params.chip))
1785		__skb_pull(skb, sizeof(struct cpl_trace_pkt));
1786	else
1787		__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1788
1789	skb_reset_mac_header(skb);
1790	skb->protocol = htons(0xffff);
1791	skb->dev = adap->port[0];
1792	netif_receive_skb(skb);
1793	return 0;
1794}
1795
1796static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1797		   const struct cpl_rx_pkt *pkt)
1798{
1799	struct adapter *adapter = rxq->rspq.adap;
1800	struct sge *s = &adapter->sge;
1801	int ret;
1802	struct sk_buff *skb;
1803
1804	skb = napi_get_frags(&rxq->rspq.napi);
1805	if (unlikely(!skb)) {
1806		t4_pktgl_free(gl);
1807		rxq->stats.rx_drops++;
1808		return;
1809	}
1810
1811	copy_frags(skb, gl, s->pktshift);
1812	skb->len = gl->tot_len - s->pktshift;
1813	skb->data_len = skb->len;
1814	skb->truesize += skb->data_len;
1815	skb->ip_summed = CHECKSUM_UNNECESSARY;
1816	skb_record_rx_queue(skb, rxq->rspq.idx);
1817	skb_mark_napi_id(skb, &rxq->rspq.napi);
1818	if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1819		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1820			     PKT_HASH_TYPE_L3);
1821
1822	if (unlikely(pkt->vlan_ex)) {
1823		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1824		rxq->stats.vlan_ex++;
1825	}
1826	ret = napi_gro_frags(&rxq->rspq.napi);
1827	if (ret == GRO_HELD)
1828		rxq->stats.lro_pkts++;
1829	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1830		rxq->stats.lro_merged++;
1831	rxq->stats.pkts++;
1832	rxq->stats.rx_cso++;
1833}
1834
1835/**
1836 *	t4_ethrx_handler - process an ingress ethernet packet
1837 *	@q: the response queue that received the packet
1838 *	@rsp: the response queue descriptor holding the RX_PKT message
1839 *	@si: the gather list of packet fragments
1840 *
1841 *	Process an ingress ethernet packet and deliver it to the stack.
1842 */
1843int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1844		     const struct pkt_gl *si)
1845{
1846	bool csum_ok;
1847	struct sk_buff *skb;
1848	const struct cpl_rx_pkt *pkt;
1849	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1850	struct sge *s = &q->adap->sge;
1851	int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
1852			    CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1853#ifdef CONFIG_CHELSIO_T4_FCOE
1854	struct port_info *pi;
1855#endif
1856
1857	if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1858		return handle_trace_pkt(q->adap, si);
1859
1860	pkt = (const struct cpl_rx_pkt *)rsp;
1861	csum_ok = pkt->csum_calc && !pkt->err_vec &&
1862		  (q->netdev->features & NETIF_F_RXCSUM);
1863	if ((pkt->l2info & htonl(RXF_TCP_F)) &&
1864	    !(cxgb_poll_busy_polling(q)) &&
1865	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
1866		do_gro(rxq, si, pkt);
1867		return 0;
1868	}
1869
1870	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
1871	if (unlikely(!skb)) {
1872		t4_pktgl_free(si);
1873		rxq->stats.rx_drops++;
1874		return 0;
1875	}
1876
1877	__skb_pull(skb, s->pktshift);      /* remove ethernet header padding */
1878	skb->protocol = eth_type_trans(skb, q->netdev);
1879	skb_record_rx_queue(skb, q->idx);
1880	if (skb->dev->features & NETIF_F_RXHASH)
1881		skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val,
1882			     PKT_HASH_TYPE_L3);
1883
1884	rxq->stats.pkts++;
1885
1886	if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) {
1887		if (!pkt->ip_frag) {
1888			skb->ip_summed = CHECKSUM_UNNECESSARY;
1889			rxq->stats.rx_cso++;
1890		} else if (pkt->l2info & htonl(RXF_IP_F)) {
1891			__sum16 c = (__force __sum16)pkt->csum;
1892			skb->csum = csum_unfold(c);
1893			skb->ip_summed = CHECKSUM_COMPLETE;
1894			rxq->stats.rx_cso++;
1895		}
1896	} else {
1897		skb_checksum_none_assert(skb);
1898#ifdef CONFIG_CHELSIO_T4_FCOE
1899#define CPL_RX_PKT_FLAGS (RXF_PSH_F | RXF_SYN_F | RXF_UDP_F | \
1900			  RXF_TCP_F | RXF_IP_F | RXF_IP6_F | RXF_LRO_F)
1901
1902		pi = netdev_priv(skb->dev);
1903		if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) {
1904			if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) &&
1905			    (pi->fcoe.flags & CXGB_FCOE_ENABLED)) {
1906				if (!(pkt->err_vec & cpu_to_be16(RXERR_CSUM_F)))
1907					skb->ip_summed = CHECKSUM_UNNECESSARY;
1908			}
1909		}
1910
1911#undef CPL_RX_PKT_FLAGS
1912#endif /* CONFIG_CHELSIO_T4_FCOE */
1913	}
1914
1915	if (unlikely(pkt->vlan_ex)) {
1916		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan));
1917		rxq->stats.vlan_ex++;
1918	}
1919	skb_mark_napi_id(skb, &q->napi);
1920	netif_receive_skb(skb);
1921	return 0;
1922}
1923
1924/**
1925 *	restore_rx_bufs - put back a packet's Rx buffers
1926 *	@si: the packet gather list
1927 *	@q: the SGE free list
1928 *	@frags: number of FL buffers to restore
1929 *
1930 *	Puts back on an FL the Rx buffers associated with @si.  The buffers
1931 *	have already been unmapped and are left unmapped, we mark them so to
1932 *	prevent further unmapping attempts.
1933 *
1934 *	This function undoes a series of @unmap_rx_buf calls when we find out
1935 *	that the current packet can't be processed right away afterall and we
1936 *	need to come back to it later.  This is a very rare event and there's
1937 *	no effort to make this particularly efficient.
1938 */
1939static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
1940			    int frags)
1941{
1942	struct rx_sw_desc *d;
1943
1944	while (frags--) {
1945		if (q->cidx == 0)
1946			q->cidx = q->size - 1;
1947		else
1948			q->cidx--;
1949		d = &q->sdesc[q->cidx];
1950		d->page = si->frags[frags].page;
1951		d->dma_addr |= RX_UNMAPPED_BUF;
1952		q->avail++;
1953	}
1954}
1955
1956/**
1957 *	is_new_response - check if a response is newly written
1958 *	@r: the response descriptor
1959 *	@q: the response queue
1960 *
1961 *	Returns true if a response descriptor contains a yet unprocessed
1962 *	response.
1963 */
1964static inline bool is_new_response(const struct rsp_ctrl *r,
1965				   const struct sge_rspq *q)
1966{
1967	return RSPD_GEN(r->type_gen) == q->gen;
1968}
1969
1970/**
1971 *	rspq_next - advance to the next entry in a response queue
1972 *	@q: the queue
1973 *
1974 *	Updates the state of a response queue to advance it to the next entry.
1975 */
1976static inline void rspq_next(struct sge_rspq *q)
1977{
1978	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
1979	if (unlikely(++q->cidx == q->size)) {
1980		q->cidx = 0;
1981		q->gen ^= 1;
1982		q->cur_desc = q->desc;
1983	}
1984}
1985
1986/**
1987 *	process_responses - process responses from an SGE response queue
1988 *	@q: the ingress queue to process
1989 *	@budget: how many responses can be processed in this round
1990 *
1991 *	Process responses from an SGE response queue up to the supplied budget.
1992 *	Responses include received packets as well as control messages from FW
1993 *	or HW.
1994 *
1995 *	Additionally choose the interrupt holdoff time for the next interrupt
1996 *	on this queue.  If the system is under memory shortage use a fairly
1997 *	long delay to help recovery.
1998 */
1999static int process_responses(struct sge_rspq *q, int budget)
2000{
2001	int ret, rsp_type;
2002	int budget_left = budget;
2003	const struct rsp_ctrl *rc;
2004	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
2005	struct adapter *adapter = q->adap;
2006	struct sge *s = &adapter->sge;
2007
2008	while (likely(budget_left)) {
2009		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2010		if (!is_new_response(rc, q))
2011			break;
2012
2013		dma_rmb();
2014		rsp_type = RSPD_TYPE(rc->type_gen);
2015		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
2016			struct page_frag *fp;
2017			struct pkt_gl si;
2018			const struct rx_sw_desc *rsd;
2019			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
2020
2021			if (len & RSPD_NEWBUF) {
2022				if (likely(q->offset > 0)) {
2023					free_rx_bufs(q->adap, &rxq->fl, 1);
2024					q->offset = 0;
2025				}
2026				len = RSPD_LEN(len);
2027			}
2028			si.tot_len = len;
2029
2030			/* gather packet fragments */
2031			for (frags = 0, fp = si.frags; ; frags++, fp++) {
2032				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
2033				bufsz = get_buf_size(adapter, rsd);
2034				fp->page = rsd->page;
2035				fp->offset = q->offset;
2036				fp->size = min(bufsz, len);
2037				len -= fp->size;
2038				if (!len)
2039					break;
2040				unmap_rx_buf(q->adap, &rxq->fl);
2041			}
2042
2043			/*
2044			 * Last buffer remains mapped so explicitly make it
2045			 * coherent for CPU access.
2046			 */
2047			dma_sync_single_for_cpu(q->adap->pdev_dev,
2048						get_buf_addr(rsd),
2049						fp->size, DMA_FROM_DEVICE);
2050
2051			si.va = page_address(si.frags[0].page) +
2052				si.frags[0].offset;
2053			prefetch(si.va);
2054
2055			si.nfrags = frags + 1;
2056			ret = q->handler(q, q->cur_desc, &si);
2057			if (likely(ret == 0))
2058				q->offset += ALIGN(fp->size, s->fl_align);
2059			else
2060				restore_rx_bufs(&si, &rxq->fl, frags);
2061		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
2062			ret = q->handler(q, q->cur_desc, NULL);
2063		} else {
2064			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
2065		}
2066
2067		if (unlikely(ret)) {
2068			/* couldn't process descriptor, back off for recovery */
2069			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
2070			break;
2071		}
2072
2073		rspq_next(q);
2074		budget_left--;
2075	}
2076
2077	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
2078		__refill_fl(q->adap, &rxq->fl);
2079	return budget - budget_left;
2080}
2081
2082#ifdef CONFIG_NET_RX_BUSY_POLL
2083int cxgb_busy_poll(struct napi_struct *napi)
2084{
2085	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2086	unsigned int params, work_done;
2087	u32 val;
2088
2089	if (!cxgb_poll_lock_poll(q))
2090		return LL_FLUSH_BUSY;
2091
2092	work_done = process_responses(q, 4);
2093	params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN;
2094	q->next_intr_params = params;
2095	val = CIDXINC_V(work_done) | SEINTARM_V(params);
2096
2097	/* If we don't have access to the new User GTS (T5+), use the old
2098	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2099	 */
2100	if (unlikely(!q->bar2_addr))
2101		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2102			     val | INGRESSQID_V((u32)q->cntxt_id));
2103	else {
2104		writel(val | INGRESSQID_V(q->bar2_qid),
2105		       q->bar2_addr + SGE_UDB_GTS);
2106		wmb();
2107	}
2108
2109	cxgb_poll_unlock_poll(q);
2110	return work_done;
2111}
2112#endif /* CONFIG_NET_RX_BUSY_POLL */
2113
2114/**
2115 *	napi_rx_handler - the NAPI handler for Rx processing
2116 *	@napi: the napi instance
2117 *	@budget: how many packets we can process in this round
2118 *
2119 *	Handler for new data events when using NAPI.  This does not need any
2120 *	locking or protection from interrupts as data interrupts are off at
2121 *	this point and other adapter interrupts do not interfere (the latter
2122 *	in not a concern at all with MSI-X as non-data interrupts then have
2123 *	a separate handler).
2124 */
2125static int napi_rx_handler(struct napi_struct *napi, int budget)
2126{
2127	unsigned int params;
2128	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
2129	int work_done;
2130	u32 val;
2131
2132	if (!cxgb_poll_lock_napi(q))
2133		return budget;
2134
2135	work_done = process_responses(q, budget);
2136	if (likely(work_done < budget)) {
2137		int timer_index;
2138
2139		napi_complete(napi);
2140		timer_index = QINTR_TIMER_IDX_GET(q->next_intr_params);
2141
2142		if (q->adaptive_rx) {
2143			if (work_done > max(timer_pkt_quota[timer_index],
2144					    MIN_NAPI_WORK))
2145				timer_index = (timer_index + 1);
2146			else
2147				timer_index = timer_index - 1;
2148
2149			timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1);
2150			q->next_intr_params = QINTR_TIMER_IDX(timer_index) |
2151							      V_QINTR_CNT_EN;
2152			params = q->next_intr_params;
2153		} else {
2154			params = q->next_intr_params;
2155			q->next_intr_params = q->intr_params;
2156		}
2157	} else
2158		params = QINTR_TIMER_IDX(7);
2159
2160	val = CIDXINC_V(work_done) | SEINTARM_V(params);
2161
2162	/* If we don't have access to the new User GTS (T5+), use the old
2163	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2164	 */
2165	if (unlikely(q->bar2_addr == NULL)) {
2166		t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
2167			     val | INGRESSQID_V((u32)q->cntxt_id));
2168	} else {
2169		writel(val | INGRESSQID_V(q->bar2_qid),
2170		       q->bar2_addr + SGE_UDB_GTS);
2171		wmb();
2172	}
2173	cxgb_poll_unlock_napi(q);
2174	return work_done;
2175}
2176
2177/*
2178 * The MSI-X interrupt handler for an SGE response queue.
2179 */
2180irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
2181{
2182	struct sge_rspq *q = cookie;
2183
2184	napi_schedule(&q->napi);
2185	return IRQ_HANDLED;
2186}
2187
2188/*
2189 * Process the indirect interrupt entries in the interrupt queue and kick off
2190 * NAPI for each queue that has generated an entry.
2191 */
2192static unsigned int process_intrq(struct adapter *adap)
2193{
2194	unsigned int credits;
2195	const struct rsp_ctrl *rc;
2196	struct sge_rspq *q = &adap->sge.intrq;
2197	u32 val;
2198
2199	spin_lock(&adap->sge.intrq_lock);
2200	for (credits = 0; ; credits++) {
2201		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
2202		if (!is_new_response(rc, q))
2203			break;
2204
2205		dma_rmb();
2206		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
2207			unsigned int qid = ntohl(rc->pldbuflen_qid);
2208
2209			qid -= adap->sge.ingr_start;
2210			napi_schedule(&adap->sge.ingr_map[qid]->napi);
2211		}
2212
2213		rspq_next(q);
2214	}
2215
2216	val =  CIDXINC_V(credits) | SEINTARM_V(q->intr_params);
2217
2218	/* If we don't have access to the new User GTS (T5+), use the old
2219	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
2220	 */
2221	if (unlikely(q->bar2_addr == NULL)) {
2222		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
2223			     val | INGRESSQID_V(q->cntxt_id));
2224	} else {
2225		writel(val | INGRESSQID_V(q->bar2_qid),
2226		       q->bar2_addr + SGE_UDB_GTS);
2227		wmb();
2228	}
2229	spin_unlock(&adap->sge.intrq_lock);
2230	return credits;
2231}
2232
2233/*
2234 * The MSI interrupt handler, which handles data events from SGE response queues
2235 * as well as error and other async events as they all use the same MSI vector.
2236 */
2237static irqreturn_t t4_intr_msi(int irq, void *cookie)
2238{
2239	struct adapter *adap = cookie;
2240
2241	if (adap->flags & MASTER_PF)
2242		t4_slow_intr_handler(adap);
2243	process_intrq(adap);
2244	return IRQ_HANDLED;
2245}
2246
2247/*
2248 * Interrupt handler for legacy INTx interrupts.
2249 * Handles data events from SGE response queues as well as error and other
2250 * async events as they all use the same interrupt line.
2251 */
2252static irqreturn_t t4_intr_intx(int irq, void *cookie)
2253{
2254	struct adapter *adap = cookie;
2255
2256	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0);
2257	if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) |
2258	    process_intrq(adap))
2259		return IRQ_HANDLED;
2260	return IRQ_NONE;             /* probably shared interrupt */
2261}
2262
2263/**
2264 *	t4_intr_handler - select the top-level interrupt handler
2265 *	@adap: the adapter
2266 *
2267 *	Selects the top-level interrupt handler based on the type of interrupts
2268 *	(MSI-X, MSI, or INTx).
2269 */
2270irq_handler_t t4_intr_handler(struct adapter *adap)
2271{
2272	if (adap->flags & USING_MSIX)
2273		return t4_sge_intr_msix;
2274	if (adap->flags & USING_MSI)
2275		return t4_intr_msi;
2276	return t4_intr_intx;
2277}
2278
2279static void sge_rx_timer_cb(unsigned long data)
2280{
2281	unsigned long m;
2282	unsigned int i, idma_same_state_cnt[2];
2283	struct adapter *adap = (struct adapter *)data;
2284	struct sge *s = &adap->sge;
2285
2286	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2287		for (m = s->starving_fl[i]; m; m &= m - 1) {
2288			struct sge_eth_rxq *rxq;
2289			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2290			struct sge_fl *fl = s->egr_map[id];
2291
2292			clear_bit(id, s->starving_fl);
2293			smp_mb__after_atomic();
2294
2295			if (fl_starving(adap, fl)) {
2296				rxq = container_of(fl, struct sge_eth_rxq, fl);
2297				if (napi_reschedule(&rxq->rspq.napi))
2298					fl->starving++;
2299				else
2300					set_bit(id, s->starving_fl);
2301			}
2302		}
2303
2304	t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13);
2305	idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A);
2306	idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2307
2308	for (i = 0; i < 2; i++) {
2309		u32 debug0, debug11;
2310
2311		/* If the Ingress DMA Same State Counter ("timer") is less
2312		 * than 1s, then we can reset our synthesized Stall Timer and
2313		 * continue.  If we have previously emitted warnings about a
2314		 * potential stalled Ingress Queue, issue a note indicating
2315		 * that the Ingress Queue has resumed forward progress.
2316		 */
2317		if (idma_same_state_cnt[i] < s->idma_1s_thresh) {
2318			if (s->idma_stalled[i] >= SGE_IDMA_WARN_THRESH)
2319				CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n",
2320					i, s->idma_qid[i],
2321					s->idma_stalled[i]/HZ);
2322			s->idma_stalled[i] = 0;
2323			continue;
2324		}
2325
2326		/* Synthesize an SGE Ingress DMA Same State Timer in the Hz
2327		 * domain.  The first time we get here it'll be because we
2328		 * passed the 1s Threshold; each additional time it'll be
2329		 * because the RX Timer Callback is being fired on its regular
2330		 * schedule.
2331		 *
2332		 * If the stall is below our Potential Hung Ingress Queue
2333		 * Warning Threshold, continue.
2334		 */
2335		if (s->idma_stalled[i] == 0)
2336			s->idma_stalled[i] = HZ;
2337		else
2338			s->idma_stalled[i] += RX_QCHECK_PERIOD;
2339
2340		if (s->idma_stalled[i] < SGE_IDMA_WARN_THRESH)
2341			continue;
2342
2343		/* We'll issue a warning every SGE_IDMA_WARN_REPEAT Hz */
2344		if (((s->idma_stalled[i] - HZ) % SGE_IDMA_WARN_REPEAT) != 0)
2345			continue;
2346
2347		/* Read and save the SGE IDMA State and Queue ID information.
2348		 * We do this every time in case it changes across time ...
2349		 */
2350		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0);
2351		debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2352		s->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
2353
2354		t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11);
2355		debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A);
2356		s->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
2357
2358		CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n",
2359			i, s->idma_qid[i], s->idma_state[i],
2360			s->idma_stalled[i]/HZ, debug0, debug11);
2361		t4_sge_decode_idma_state(adap, s->idma_state[i]);
2362	}
2363
2364	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2365}
2366
2367static void sge_tx_timer_cb(unsigned long data)
2368{
2369	unsigned long m;
2370	unsigned int i, budget;
2371	struct adapter *adap = (struct adapter *)data;
2372	struct sge *s = &adap->sge;
2373
2374	for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++)
2375		for (m = s->txq_maperr[i]; m; m &= m - 1) {
2376			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
2377			struct sge_ofld_txq *txq = s->egr_map[id];
2378
2379			clear_bit(id, s->txq_maperr);
2380			tasklet_schedule(&txq->qresume_tsk);
2381		}
2382
2383	budget = MAX_TIMER_TX_RECLAIM;
2384	i = s->ethtxq_rover;
2385	do {
2386		struct sge_eth_txq *q = &s->ethtxq[i];
2387
2388		if (q->q.in_use &&
2389		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
2390		    __netif_tx_trylock(q->txq)) {
2391			int avail = reclaimable(&q->q);
2392
2393			if (avail) {
2394				if (avail > budget)
2395					avail = budget;
2396
2397				free_tx_desc(adap, &q->q, avail, true);
2398				q->q.in_use -= avail;
2399				budget -= avail;
2400			}
2401			__netif_tx_unlock(q->txq);
2402		}
2403
2404		if (++i >= s->ethqsets)
2405			i = 0;
2406	} while (budget && i != s->ethtxq_rover);
2407	s->ethtxq_rover = i;
2408	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2409}
2410
2411/**
2412 *	bar2_address - return the BAR2 address for an SGE Queue's Registers
2413 *	@adapter: the adapter
2414 *	@qid: the SGE Queue ID
2415 *	@qtype: the SGE Queue Type (Egress or Ingress)
2416 *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2417 *
2418 *	Returns the BAR2 address for the SGE Queue Registers associated with
2419 *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2420 *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2421 *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2422 *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
2423 */
2424static void __iomem *bar2_address(struct adapter *adapter,
2425				  unsigned int qid,
2426				  enum t4_bar2_qtype qtype,
2427				  unsigned int *pbar2_qid)
2428{
2429	u64 bar2_qoffset;
2430	int ret;
2431
2432	ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
2433				&bar2_qoffset, pbar2_qid);
2434	if (ret)
2435		return NULL;
2436
2437	return adapter->bar2 + bar2_qoffset;
2438}
2439
2440int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2441		     struct net_device *dev, int intr_idx,
2442		     struct sge_fl *fl, rspq_handler_t hnd)
2443{
2444	int ret, flsz = 0;
2445	struct fw_iq_cmd c;
2446	struct sge *s = &adap->sge;
2447	struct port_info *pi = netdev_priv(dev);
2448
2449	/* Size needs to be multiple of 16, including status entry. */
2450	iq->size = roundup(iq->size, 16);
2451
2452	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
2453			      &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
2454	if (!iq->desc)
2455		return -ENOMEM;
2456
2457	memset(&c, 0, sizeof(c));
2458	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
2459			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2460			    FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
2461	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
2462				 FW_LEN16(c));
2463	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2464		FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) |
2465		FW_IQ_CMD_IQANDST_V(intr_idx < 0) | FW_IQ_CMD_IQANUD_V(1) |
2466		FW_IQ_CMD_IQANDSTINDEX_V(intr_idx >= 0 ? intr_idx :
2467							-intr_idx - 1));
2468	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) |
2469		FW_IQ_CMD_IQGTSMODE_F |
2470		FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) |
2471		FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4));
2472	c.iqsize = htons(iq->size);
2473	c.iqaddr = cpu_to_be64(iq->phys_addr);
2474
2475	if (fl) {
2476		fl->size = roundup(fl->size, 8);
2477		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2478				      sizeof(struct rx_sw_desc), &fl->addr,
2479				      &fl->sdesc, s->stat_len, NUMA_NO_NODE);
2480		if (!fl->desc)
2481			goto fl_nomem;
2482
2483		flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc);
2484		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN_F |
2485					    FW_IQ_CMD_FL0FETCHRO_F |
2486					    FW_IQ_CMD_FL0DATARO_F |
2487					    FW_IQ_CMD_FL0PADEN_F);
2488		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN_V(2) |
2489				FW_IQ_CMD_FL0FBMAX_V(3));
2490		c.fl0size = htons(flsz);
2491		c.fl0addr = cpu_to_be64(fl->addr);
2492	}
2493
2494	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2495	if (ret)
2496		goto err;
2497
2498	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
2499	napi_hash_add(&iq->napi);
2500	iq->cur_desc = iq->desc;
2501	iq->cidx = 0;
2502	iq->gen = 1;
2503	iq->next_intr_params = iq->intr_params;
2504	iq->cntxt_id = ntohs(c.iqid);
2505	iq->abs_id = ntohs(c.physiqid);
2506	iq->bar2_addr = bar2_address(adap,
2507				     iq->cntxt_id,
2508				     T4_BAR2_QTYPE_INGRESS,
2509				     &iq->bar2_qid);
2510	iq->size--;                           /* subtract status entry */
2511	iq->netdev = dev;
2512	iq->handler = hnd;
2513
2514	/* set offset to -1 to distinguish ingress queues without FL */
2515	iq->offset = fl ? 0 : -1;
2516
2517	adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq;
2518
2519	if (fl) {
2520		fl->cntxt_id = ntohs(c.fl0id);
2521		fl->avail = fl->pend_cred = 0;
2522		fl->pidx = fl->cidx = 0;
2523		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
2524		adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl;
2525
2526		/* Note, we must initialize the BAR2 Free List User Doorbell
2527		 * information before refilling the Free List!
2528		 */
2529		fl->bar2_addr = bar2_address(adap,
2530					     fl->cntxt_id,
2531					     T4_BAR2_QTYPE_EGRESS,
2532					     &fl->bar2_qid);
2533		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
2534	}
2535	return 0;
2536
2537fl_nomem:
2538	ret = -ENOMEM;
2539err:
2540	if (iq->desc) {
2541		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
2542				  iq->desc, iq->phys_addr);
2543		iq->desc = NULL;
2544	}
2545	if (fl && fl->desc) {
2546		kfree(fl->sdesc);
2547		fl->sdesc = NULL;
2548		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
2549				  fl->desc, fl->addr);
2550		fl->desc = NULL;
2551	}
2552	return ret;
2553}
2554
2555static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2556{
2557	q->cntxt_id = id;
2558	q->bar2_addr = bar2_address(adap,
2559				    q->cntxt_id,
2560				    T4_BAR2_QTYPE_EGRESS,
2561				    &q->bar2_qid);
2562	q->in_use = 0;
2563	q->cidx = q->pidx = 0;
2564	q->stops = q->restarts = 0;
2565	q->stat = (void *)&q->desc[q->size];
2566	spin_lock_init(&q->db_lock);
2567	adap->sge.egr_map[id - adap->sge.egr_start] = q;
2568}
2569
2570int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2571			 struct net_device *dev, struct netdev_queue *netdevq,
2572			 unsigned int iqid)
2573{
2574	int ret, nentries;
2575	struct fw_eq_eth_cmd c;
2576	struct sge *s = &adap->sge;
2577	struct port_info *pi = netdev_priv(dev);
2578
2579	/* Add status entries */
2580	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2581
2582	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2583			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2584			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2585			netdev_queue_numa_node_read(netdevq));
2586	if (!txq->q.desc)
2587		return -ENOMEM;
2588
2589	memset(&c, 0, sizeof(c));
2590	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
2591			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2592			    FW_EQ_ETH_CMD_PFN_V(adap->fn) |
2593			    FW_EQ_ETH_CMD_VFN_V(0));
2594	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
2595				 FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
2596	c.viid_pkd = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2597			   FW_EQ_ETH_CMD_VIID_V(pi->viid));
2598	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(2) |
2599				   FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
2600				   FW_EQ_ETH_CMD_FETCHRO_V(1) |
2601				   FW_EQ_ETH_CMD_IQID_V(iqid));
2602	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN_V(2) |
2603				  FW_EQ_ETH_CMD_FBMAX_V(3) |
2604				  FW_EQ_ETH_CMD_CIDXFTHRESH_V(5) |
2605				  FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2606	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2607
2608	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2609	if (ret) {
2610		kfree(txq->q.sdesc);
2611		txq->q.sdesc = NULL;
2612		dma_free_coherent(adap->pdev_dev,
2613				  nentries * sizeof(struct tx_desc),
2614				  txq->q.desc, txq->q.phys_addr);
2615		txq->q.desc = NULL;
2616		return ret;
2617	}
2618
2619	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd)));
2620	txq->txq = netdevq;
2621	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
2622	txq->mapping_err = 0;
2623	return 0;
2624}
2625
2626int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2627			  struct net_device *dev, unsigned int iqid,
2628			  unsigned int cmplqid)
2629{
2630	int ret, nentries;
2631	struct fw_eq_ctrl_cmd c;
2632	struct sge *s = &adap->sge;
2633	struct port_info *pi = netdev_priv(dev);
2634
2635	/* Add status entries */
2636	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2637
2638	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2639				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2640				 NULL, 0, NUMA_NO_NODE);
2641	if (!txq->q.desc)
2642		return -ENOMEM;
2643
2644	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
2645			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2646			    FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
2647			    FW_EQ_CTRL_CMD_VFN_V(0));
2648	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
2649				 FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
2650	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID_V(cmplqid));
2651	c.physeqid_pkd = htonl(0);
2652	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE_V(2) |
2653				   FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) |
2654				   FW_EQ_CTRL_CMD_FETCHRO_F |
2655				   FW_EQ_CTRL_CMD_IQID_V(iqid));
2656	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN_V(2) |
2657				  FW_EQ_CTRL_CMD_FBMAX_V(3) |
2658				  FW_EQ_CTRL_CMD_CIDXFTHRESH_V(5) |
2659				  FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
2660	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2661
2662	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2663	if (ret) {
2664		dma_free_coherent(adap->pdev_dev,
2665				  nentries * sizeof(struct tx_desc),
2666				  txq->q.desc, txq->q.phys_addr);
2667		txq->q.desc = NULL;
2668		return ret;
2669	}
2670
2671	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
2672	txq->adap = adap;
2673	skb_queue_head_init(&txq->sendq);
2674	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
2675	txq->full = 0;
2676	return 0;
2677}
2678
2679int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2680			  struct net_device *dev, unsigned int iqid)
2681{
2682	int ret, nentries;
2683	struct fw_eq_ofld_cmd c;
2684	struct sge *s = &adap->sge;
2685	struct port_info *pi = netdev_priv(dev);
2686
2687	/* Add status entries */
2688	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2689
2690	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2691			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2692			&txq->q.phys_addr, &txq->q.sdesc, s->stat_len,
2693			NUMA_NO_NODE);
2694	if (!txq->q.desc)
2695		return -ENOMEM;
2696
2697	memset(&c, 0, sizeof(c));
2698	c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
2699			    FW_CMD_WRITE_F | FW_CMD_EXEC_F |
2700			    FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
2701			    FW_EQ_OFLD_CMD_VFN_V(0));
2702	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
2703				 FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
2704	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE_V(2) |
2705				   FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) |
2706				   FW_EQ_OFLD_CMD_FETCHRO_F |
2707				   FW_EQ_OFLD_CMD_IQID_V(iqid));
2708	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN_V(2) |
2709				  FW_EQ_OFLD_CMD_FBMAX_V(3) |
2710				  FW_EQ_OFLD_CMD_CIDXFTHRESH_V(5) |
2711				  FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
2712	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
2713
2714	ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
2715	if (ret) {
2716		kfree(txq->q.sdesc);
2717		txq->q.sdesc = NULL;
2718		dma_free_coherent(adap->pdev_dev,
2719				  nentries * sizeof(struct tx_desc),
2720				  txq->q.desc, txq->q.phys_addr);
2721		txq->q.desc = NULL;
2722		return ret;
2723	}
2724
2725	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd)));
2726	txq->adap = adap;
2727	skb_queue_head_init(&txq->sendq);
2728	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
2729	txq->full = 0;
2730	txq->mapping_err = 0;
2731	return 0;
2732}
2733
2734static void free_txq(struct adapter *adap, struct sge_txq *q)
2735{
2736	struct sge *s = &adap->sge;
2737
2738	dma_free_coherent(adap->pdev_dev,
2739			  q->size * sizeof(struct tx_desc) + s->stat_len,
2740			  q->desc, q->phys_addr);
2741	q->cntxt_id = 0;
2742	q->sdesc = NULL;
2743	q->desc = NULL;
2744}
2745
2746static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
2747			 struct sge_fl *fl)
2748{
2749	struct sge *s = &adap->sge;
2750	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
2751
2752	adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
2753	t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
2754		   rq->cntxt_id, fl_id, 0xffff);
2755	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
2756			  rq->desc, rq->phys_addr);
2757	napi_hash_del(&rq->napi);
2758	netif_napi_del(&rq->napi);
2759	rq->netdev = NULL;
2760	rq->cntxt_id = rq->abs_id = 0;
2761	rq->desc = NULL;
2762
2763	if (fl) {
2764		free_rx_bufs(adap, fl, fl->avail);
2765		dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len,
2766				  fl->desc, fl->addr);
2767		kfree(fl->sdesc);
2768		fl->sdesc = NULL;
2769		fl->cntxt_id = 0;
2770		fl->desc = NULL;
2771	}
2772}
2773
2774/**
2775 *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
2776 *      @adap: the adapter
2777 *      @n: number of queues
2778 *      @q: pointer to first queue
2779 *
2780 *      Release the resources of a consecutive block of offload Rx queues.
2781 */
2782void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
2783{
2784	for ( ; n; n--, q++)
2785		if (q->rspq.desc)
2786			free_rspq_fl(adap, &q->rspq,
2787				     q->fl.size ? &q->fl : NULL);
2788}
2789
2790/**
2791 *	t4_free_sge_resources - free SGE resources
2792 *	@adap: the adapter
2793 *
2794 *	Frees resources used by the SGE queue sets.
2795 */
2796void t4_free_sge_resources(struct adapter *adap)
2797{
2798	int i;
2799	struct sge_eth_rxq *eq = adap->sge.ethrxq;
2800	struct sge_eth_txq *etq = adap->sge.ethtxq;
2801
2802	/* clean up Ethernet Tx/Rx queues */
2803	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
2804		if (eq->rspq.desc)
2805			free_rspq_fl(adap, &eq->rspq,
2806				     eq->fl.size ? &eq->fl : NULL);
2807		if (etq->q.desc) {
2808			t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
2809				       etq->q.cntxt_id);
2810			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
2811			kfree(etq->q.sdesc);
2812			free_txq(adap, &etq->q);
2813		}
2814	}
2815
2816	/* clean up RDMA and iSCSI Rx queues */
2817	t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
2818	t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
2819	t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
2820
2821	/* clean up offload Tx queues */
2822	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
2823		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
2824
2825		if (q->q.desc) {
2826			tasklet_kill(&q->qresume_tsk);
2827			t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
2828					q->q.cntxt_id);
2829			free_tx_desc(adap, &q->q, q->q.in_use, false);
2830			kfree(q->q.sdesc);
2831			__skb_queue_purge(&q->sendq);
2832			free_txq(adap, &q->q);
2833		}
2834	}
2835
2836	/* clean up control Tx queues */
2837	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
2838		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
2839
2840		if (cq->q.desc) {
2841			tasklet_kill(&cq->qresume_tsk);
2842			t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
2843					cq->q.cntxt_id);
2844			__skb_queue_purge(&cq->sendq);
2845			free_txq(adap, &cq->q);
2846		}
2847	}
2848
2849	if (adap->sge.fw_evtq.desc)
2850		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
2851
2852	if (adap->sge.intrq.desc)
2853		free_rspq_fl(adap, &adap->sge.intrq, NULL);
2854
2855	/* clear the reverse egress queue map */
2856	memset(adap->sge.egr_map, 0,
2857	       adap->sge.egr_sz * sizeof(*adap->sge.egr_map));
2858}
2859
2860void t4_sge_start(struct adapter *adap)
2861{
2862	adap->sge.ethtxq_rover = 0;
2863	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2864	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2865}
2866
2867/**
2868 *	t4_sge_stop - disable SGE operation
2869 *	@adap: the adapter
2870 *
2871 *	Stop tasklets and timers associated with the DMA engine.  Note that
2872 *	this is effective only if measures have been taken to disable any HW
2873 *	events that may restart them.
2874 */
2875void t4_sge_stop(struct adapter *adap)
2876{
2877	int i;
2878	struct sge *s = &adap->sge;
2879
2880	if (in_interrupt())  /* actions below require waiting */
2881		return;
2882
2883	if (s->rx_timer.function)
2884		del_timer_sync(&s->rx_timer);
2885	if (s->tx_timer.function)
2886		del_timer_sync(&s->tx_timer);
2887
2888	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
2889		struct sge_ofld_txq *q = &s->ofldtxq[i];
2890
2891		if (q->q.desc)
2892			tasklet_kill(&q->qresume_tsk);
2893	}
2894	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
2895		struct sge_ctrl_txq *cq = &s->ctrlq[i];
2896
2897		if (cq->q.desc)
2898			tasklet_kill(&cq->qresume_tsk);
2899	}
2900}
2901
2902/**
2903 *	t4_sge_init_soft - grab core SGE values needed by SGE code
2904 *	@adap: the adapter
2905 *
2906 *	We need to grab the SGE operating parameters that we need to have
2907 *	in order to do our job and make sure we can live with them.
2908 */
2909
2910static int t4_sge_init_soft(struct adapter *adap)
2911{
2912	struct sge *s = &adap->sge;
2913	u32 fl_small_pg, fl_large_pg, fl_small_mtu, fl_large_mtu;
2914	u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
2915	u32 ingress_rx_threshold;
2916
2917	/*
2918	 * Verify that CPL messages are going to the Ingress Queue for
2919	 * process_responses() and that only packet data is going to the
2920	 * Free Lists.
2921	 */
2922	if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) !=
2923	    RXPKTCPLMODE_V(RXPKTCPLMODE_SPLIT_X)) {
2924		dev_err(adap->pdev_dev, "bad SGE CPL MODE\n");
2925		return -EINVAL;
2926	}
2927
2928	/*
2929	 * Validate the Host Buffer Register Array indices that we want to
2930	 * use ...
2931	 *
2932	 * XXX Note that we should really read through the Host Buffer Size
2933	 * XXX register array and find the indices of the Buffer Sizes which
2934	 * XXX meet our needs!
2935	 */
2936	#define READ_FL_BUF(x) \
2937		t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32))
2938
2939	fl_small_pg = READ_FL_BUF(RX_SMALL_PG_BUF);
2940	fl_large_pg = READ_FL_BUF(RX_LARGE_PG_BUF);
2941	fl_small_mtu = READ_FL_BUF(RX_SMALL_MTU_BUF);
2942	fl_large_mtu = READ_FL_BUF(RX_LARGE_MTU_BUF);
2943
2944	/* We only bother using the Large Page logic if the Large Page Buffer
2945	 * is larger than our Page Size Buffer.
2946	 */
2947	if (fl_large_pg <= fl_small_pg)
2948		fl_large_pg = 0;
2949
2950	#undef READ_FL_BUF
2951
2952	/* The Page Size Buffer must be exactly equal to our Page Size and the
2953	 * Large Page Size Buffer should be 0 (per above) or a power of 2.
2954	 */
2955	if (fl_small_pg != PAGE_SIZE ||
2956	    (fl_large_pg & (fl_large_pg-1)) != 0) {
2957		dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
2958			fl_small_pg, fl_large_pg);
2959		return -EINVAL;
2960	}
2961	if (fl_large_pg)
2962		s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT;
2963
2964	if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) ||
2965	    fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) {
2966		dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n",
2967			fl_small_mtu, fl_large_mtu);
2968		return -EINVAL;
2969	}
2970
2971	/*
2972	 * Retrieve our RX interrupt holdoff timer values and counter
2973	 * threshold values from the SGE parameters.
2974	 */
2975	timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A);
2976	timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A);
2977	timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A);
2978	s->timer_val[0] = core_ticks_to_us(adap,
2979		TIMERVALUE0_G(timer_value_0_and_1));
2980	s->timer_val[1] = core_ticks_to_us(adap,
2981		TIMERVALUE1_G(timer_value_0_and_1));
2982	s->timer_val[2] = core_ticks_to_us(adap,
2983		TIMERVALUE2_G(timer_value_2_and_3));
2984	s->timer_val[3] = core_ticks_to_us(adap,
2985		TIMERVALUE3_G(timer_value_2_and_3));
2986	s->timer_val[4] = core_ticks_to_us(adap,
2987		TIMERVALUE4_G(timer_value_4_and_5));
2988	s->timer_val[5] = core_ticks_to_us(adap,
2989		TIMERVALUE5_G(timer_value_4_and_5));
2990
2991	ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A);
2992	s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold);
2993	s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold);
2994	s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold);
2995	s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold);
2996
2997	return 0;
2998}
2999
3000/**
3001 *     t4_sge_init - initialize SGE
3002 *     @adap: the adapter
3003 *
3004 *     Perform low-level SGE code initialization needed every time after a
3005 *     chip reset.
3006 */
3007int t4_sge_init(struct adapter *adap)
3008{
3009	struct sge *s = &adap->sge;
3010	u32 sge_control, sge_control2, sge_conm_ctrl;
3011	unsigned int ingpadboundary, ingpackboundary;
3012	int ret, egress_threshold;
3013
3014	/*
3015	 * Ingress Padding Boundary and Egress Status Page Size are set up by
3016	 * t4_fixup_host_params().
3017	 */
3018	sge_control = t4_read_reg(adap, SGE_CONTROL_A);
3019	s->pktshift = PKTSHIFT_G(sge_control);
3020	s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64;
3021
3022	/* T4 uses a single control field to specify both the PCIe Padding and
3023	 * Packing Boundary.  T5 introduced the ability to specify these
3024	 * separately.  The actual Ingress Packet Data alignment boundary
3025	 * within Packed Buffer Mode is the maximum of these two
3026	 * specifications.
3027	 */
3028	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) +
3029			       INGPADBOUNDARY_SHIFT_X);
3030	if (is_t4(adap->params.chip)) {
3031		s->fl_align = ingpadboundary;
3032	} else {
3033		/* T5 has a different interpretation of one of the PCIe Packing
3034		 * Boundary values.
3035		 */
3036		sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
3037		ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
3038		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
3039			ingpackboundary = 16;
3040		else
3041			ingpackboundary = 1 << (ingpackboundary +
3042						INGPACKBOUNDARY_SHIFT_X);
3043
3044		s->fl_align = max(ingpadboundary, ingpackboundary);
3045	}
3046
3047	ret = t4_sge_init_soft(adap);
3048	if (ret < 0)
3049		return ret;
3050
3051	/*
3052	 * A FL with <= fl_starve_thres buffers is starving and a periodic
3053	 * timer will attempt to refill it.  This needs to be larger than the
3054	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
3055	 * stuck waiting for new packets while the SGE is waiting for us to
3056	 * give it more Free List entries.  (Note that the SGE's Egress
3057	 * Congestion Threshold is in units of 2 Free List pointers.) For T4,
3058	 * there was only a single field to control this.  For T5 there's the
3059	 * original field which now only applies to Unpacked Mode Free List
3060	 * buffers and a new field which only applies to Packed Mode Free List
3061	 * buffers.
3062	 */
3063	sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A);
3064	if (is_t4(adap->params.chip))
3065		egress_threshold = EGRTHRESHOLD_G(sge_conm_ctrl);
3066	else
3067		egress_threshold = EGRTHRESHOLDPACKING_G(sge_conm_ctrl);
3068	s->fl_starve_thres = 2*egress_threshold + 1;
3069
3070	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
3071	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
3072	s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
3073	s->idma_stalled[0] = 0;
3074	s->idma_stalled[1] = 0;
3075	spin_lock_init(&s->intrq_lock);
3076
3077	return 0;
3078}
3079