1/*
2 * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3 * driver for Linux.
4 *
5 * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses.  You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 *     Redistribution and use in source and binary forms, with or
14 *     without modification, are permitted provided that the following
15 *     conditions are met:
16 *
17 *      - Redistributions of source code must retain the above
18 *        copyright notice, this list of conditions and the following
19 *        disclaimer.
20 *
21 *      - Redistributions in binary form must reproduce the above
22 *        copyright notice, this list of conditions and the following
23 *        disclaimer in the documentation and/or other materials
24 *        provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/skbuff.h>
37#include <linux/netdevice.h>
38#include <linux/etherdevice.h>
39#include <linux/if_vlan.h>
40#include <linux/ip.h>
41#include <net/ipv6.h>
42#include <net/tcp.h>
43#include <linux/dma-mapping.h>
44#include <linux/prefetch.h>
45
46#include "t4vf_common.h"
47#include "t4vf_defs.h"
48
49#include "../cxgb4/t4_regs.h"
50#include "../cxgb4/t4_values.h"
51#include "../cxgb4/t4fw_api.h"
52#include "../cxgb4/t4_msg.h"
53
54/*
55 * Constants ...
56 */
57enum {
58	/*
59	 * Egress Queue sizes, producer and consumer indices are all in units
60	 * of Egress Context Units bytes.  Note that as far as the hardware is
61	 * concerned, the free list is an Egress Queue (the host produces free
62	 * buffers which the hardware consumes) and free list entries are
63	 * 64-bit PCI DMA addresses.
64	 */
65	EQ_UNIT = SGE_EQ_IDXSIZE,
66	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
67	TXD_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
68
69	/*
70	 * Max number of TX descriptors we clean up at a time.  Should be
71	 * modest as freeing skbs isn't cheap and it happens while holding
72	 * locks.  We just need to free packets faster than they arrive, we
73	 * eventually catch up and keep the amortized cost reasonable.
74	 */
75	MAX_TX_RECLAIM = 16,
76
77	/*
78	 * Max number of Rx buffers we replenish at a time.  Again keep this
79	 * modest, allocating buffers isn't cheap either.
80	 */
81	MAX_RX_REFILL = 16,
82
83	/*
84	 * Period of the Rx queue check timer.  This timer is infrequent as it
85	 * has something to do only when the system experiences severe memory
86	 * shortage.
87	 */
88	RX_QCHECK_PERIOD = (HZ / 2),
89
90	/*
91	 * Period of the TX queue check timer and the maximum number of TX
92	 * descriptors to be reclaimed by the TX timer.
93	 */
94	TX_QCHECK_PERIOD = (HZ / 2),
95	MAX_TIMER_TX_RECLAIM = 100,
96
97	/*
98	 * Suspend an Ethernet TX queue with fewer available descriptors than
99	 * this.  We always want to have room for a maximum sized packet:
100	 * inline immediate data + MAX_SKB_FRAGS. This is the same as
101	 * calc_tx_flits() for a TSO packet with nr_frags == MAX_SKB_FRAGS
102	 * (see that function and its helpers for a description of the
103	 * calculation).
104	 */
105	ETHTXQ_MAX_FRAGS = MAX_SKB_FRAGS + 1,
106	ETHTXQ_MAX_SGL_LEN = ((3 * (ETHTXQ_MAX_FRAGS-1))/2 +
107				   ((ETHTXQ_MAX_FRAGS-1) & 1) +
108				   2),
109	ETHTXQ_MAX_HDR = (sizeof(struct fw_eth_tx_pkt_vm_wr) +
110			  sizeof(struct cpl_tx_pkt_lso_core) +
111			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64),
112	ETHTXQ_MAX_FLITS = ETHTXQ_MAX_SGL_LEN + ETHTXQ_MAX_HDR,
113
114	ETHTXQ_STOP_THRES = 1 + DIV_ROUND_UP(ETHTXQ_MAX_FLITS, TXD_PER_EQ_UNIT),
115
116	/*
117	 * Max TX descriptor space we allow for an Ethernet packet to be
118	 * inlined into a WR.  This is limited by the maximum value which
119	 * we can specify for immediate data in the firmware Ethernet TX
120	 * Work Request.
121	 */
122	MAX_IMM_TX_PKT_LEN = FW_WR_IMMDLEN_M,
123
124	/*
125	 * Max size of a WR sent through a control TX queue.
126	 */
127	MAX_CTRL_WR_LEN = 256,
128
129	/*
130	 * Maximum amount of data which we'll ever need to inline into a
131	 * TX ring: max(MAX_IMM_TX_PKT_LEN, MAX_CTRL_WR_LEN).
132	 */
133	MAX_IMM_TX_LEN = (MAX_IMM_TX_PKT_LEN > MAX_CTRL_WR_LEN
134			  ? MAX_IMM_TX_PKT_LEN
135			  : MAX_CTRL_WR_LEN),
136
137	/*
138	 * For incoming packets less than RX_COPY_THRES, we copy the data into
139	 * an skb rather than referencing the data.  We allocate enough
140	 * in-line room in skb's to accommodate pulling in RX_PULL_LEN bytes
141	 * of the data (header).
142	 */
143	RX_COPY_THRES = 256,
144	RX_PULL_LEN = 128,
145
146	/*
147	 * Main body length for sk_buffs used for RX Ethernet packets with
148	 * fragments.  Should be >= RX_PULL_LEN but possibly bigger to give
149	 * pskb_may_pull() some room.
150	 */
151	RX_SKB_LEN = 512,
152};
153
154/*
155 * Software state per TX descriptor.
156 */
157struct tx_sw_desc {
158	struct sk_buff *skb;		/* socket buffer of TX data source */
159	struct ulptx_sgl *sgl;		/* scatter/gather list in TX Queue */
160};
161
162/*
163 * Software state per RX Free List descriptor.  We keep track of the allocated
164 * FL page, its size, and its PCI DMA address (if the page is mapped).  The FL
165 * page size and its PCI DMA mapped state are stored in the low bits of the
166 * PCI DMA address as per below.
167 */
168struct rx_sw_desc {
169	struct page *page;		/* Free List page buffer */
170	dma_addr_t dma_addr;		/* PCI DMA address (if mapped) */
171					/*   and flags (see below) */
172};
173
174/*
175 * The low bits of rx_sw_desc.dma_addr have special meaning.  Note that the
176 * SGE also uses the low 4 bits to determine the size of the buffer.  It uses
177 * those bits to index into the SGE_FL_BUFFER_SIZE[index] register array.
178 * Since we only use SGE_FL_BUFFER_SIZE0 and SGE_FL_BUFFER_SIZE1, these low 4
179 * bits can only contain a 0 or a 1 to indicate which size buffer we're giving
180 * to the SGE.  Thus, our software state of "is the buffer mapped for DMA" is
181 * maintained in an inverse sense so the hardware never sees that bit high.
182 */
183enum {
184	RX_LARGE_BUF    = 1 << 0,	/* buffer is SGE_FL_BUFFER_SIZE[1] */
185	RX_UNMAPPED_BUF = 1 << 1,	/* buffer is not mapped */
186};
187
188/**
189 *	get_buf_addr - return DMA buffer address of software descriptor
190 *	@sdesc: pointer to the software buffer descriptor
191 *
192 *	Return the DMA buffer address of a software descriptor (stripping out
193 *	our low-order flag bits).
194 */
195static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *sdesc)
196{
197	return sdesc->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
198}
199
200/**
201 *	is_buf_mapped - is buffer mapped for DMA?
202 *	@sdesc: pointer to the software buffer descriptor
203 *
204 *	Determine whether the buffer associated with a software descriptor in
205 *	mapped for DMA or not.
206 */
207static inline bool is_buf_mapped(const struct rx_sw_desc *sdesc)
208{
209	return !(sdesc->dma_addr & RX_UNMAPPED_BUF);
210}
211
212/**
213 *	need_skb_unmap - does the platform need unmapping of sk_buffs?
214 *
215 *	Returns true if the platform needs sk_buff unmapping.  The compiler
216 *	optimizes away unnecessary code if this returns true.
217 */
218static inline int need_skb_unmap(void)
219{
220#ifdef CONFIG_NEED_DMA_MAP_STATE
221	return 1;
222#else
223	return 0;
224#endif
225}
226
227/**
228 *	txq_avail - return the number of available slots in a TX queue
229 *	@tq: the TX queue
230 *
231 *	Returns the number of available descriptors in a TX queue.
232 */
233static inline unsigned int txq_avail(const struct sge_txq *tq)
234{
235	return tq->size - 1 - tq->in_use;
236}
237
238/**
239 *	fl_cap - return the capacity of a Free List
240 *	@fl: the Free List
241 *
242 *	Returns the capacity of a Free List.  The capacity is less than the
243 *	size because an Egress Queue Index Unit worth of descriptors needs to
244 *	be left unpopulated, otherwise the Producer and Consumer indices PIDX
245 *	and CIDX will match and the hardware will think the FL is empty.
246 */
247static inline unsigned int fl_cap(const struct sge_fl *fl)
248{
249	return fl->size - FL_PER_EQ_UNIT;
250}
251
252/**
253 *	fl_starving - return whether a Free List is starving.
254 *	@adapter: pointer to the adapter
255 *	@fl: the Free List
256 *
257 *	Tests specified Free List to see whether the number of buffers
258 *	available to the hardware has falled below our "starvation"
259 *	threshold.
260 */
261static inline bool fl_starving(const struct adapter *adapter,
262			       const struct sge_fl *fl)
263{
264	const struct sge *s = &adapter->sge;
265
266	return fl->avail - fl->pend_cred <= s->fl_starve_thres;
267}
268
269/**
270 *	map_skb -  map an skb for DMA to the device
271 *	@dev: the egress net device
272 *	@skb: the packet to map
273 *	@addr: a pointer to the base of the DMA mapping array
274 *
275 *	Map an skb for DMA to the device and return an array of DMA addresses.
276 */
277static int map_skb(struct device *dev, const struct sk_buff *skb,
278		   dma_addr_t *addr)
279{
280	const skb_frag_t *fp, *end;
281	const struct skb_shared_info *si;
282
283	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
284	if (dma_mapping_error(dev, *addr))
285		goto out_err;
286
287	si = skb_shinfo(skb);
288	end = &si->frags[si->nr_frags];
289	for (fp = si->frags; fp < end; fp++) {
290		*++addr = skb_frag_dma_map(dev, fp, 0, skb_frag_size(fp),
291					   DMA_TO_DEVICE);
292		if (dma_mapping_error(dev, *addr))
293			goto unwind;
294	}
295	return 0;
296
297unwind:
298	while (fp-- > si->frags)
299		dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE);
300	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
301
302out_err:
303	return -ENOMEM;
304}
305
306static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
307		      const struct ulptx_sgl *sgl, const struct sge_txq *tq)
308{
309	const struct ulptx_sge_pair *p;
310	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
311
312	if (likely(skb_headlen(skb)))
313		dma_unmap_single(dev, be64_to_cpu(sgl->addr0),
314				 be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
315	else {
316		dma_unmap_page(dev, be64_to_cpu(sgl->addr0),
317			       be32_to_cpu(sgl->len0), DMA_TO_DEVICE);
318		nfrags--;
319	}
320
321	/*
322	 * the complexity below is because of the possibility of a wrap-around
323	 * in the middle of an SGL
324	 */
325	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
326		if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) {
327unmap:
328			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
329				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
330			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
331				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
332			p++;
333		} else if ((u8 *)p == (u8 *)tq->stat) {
334			p = (const struct ulptx_sge_pair *)tq->desc;
335			goto unmap;
336		} else if ((u8 *)p + 8 == (u8 *)tq->stat) {
337			const __be64 *addr = (const __be64 *)tq->desc;
338
339			dma_unmap_page(dev, be64_to_cpu(addr[0]),
340				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
341			dma_unmap_page(dev, be64_to_cpu(addr[1]),
342				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
343			p = (const struct ulptx_sge_pair *)&addr[2];
344		} else {
345			const __be64 *addr = (const __be64 *)tq->desc;
346
347			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
348				       be32_to_cpu(p->len[0]), DMA_TO_DEVICE);
349			dma_unmap_page(dev, be64_to_cpu(addr[0]),
350				       be32_to_cpu(p->len[1]), DMA_TO_DEVICE);
351			p = (const struct ulptx_sge_pair *)&addr[1];
352		}
353	}
354	if (nfrags) {
355		__be64 addr;
356
357		if ((u8 *)p == (u8 *)tq->stat)
358			p = (const struct ulptx_sge_pair *)tq->desc;
359		addr = ((u8 *)p + 16 <= (u8 *)tq->stat
360			? p->addr[0]
361			: *(const __be64 *)tq->desc);
362		dma_unmap_page(dev, be64_to_cpu(addr), be32_to_cpu(p->len[0]),
363			       DMA_TO_DEVICE);
364	}
365}
366
367/**
368 *	free_tx_desc - reclaims TX descriptors and their buffers
369 *	@adapter: the adapter
370 *	@tq: the TX queue to reclaim descriptors from
371 *	@n: the number of descriptors to reclaim
372 *	@unmap: whether the buffers should be unmapped for DMA
373 *
374 *	Reclaims TX descriptors from an SGE TX queue and frees the associated
375 *	TX buffers.  Called with the TX queue lock held.
376 */
377static void free_tx_desc(struct adapter *adapter, struct sge_txq *tq,
378			 unsigned int n, bool unmap)
379{
380	struct tx_sw_desc *sdesc;
381	unsigned int cidx = tq->cidx;
382	struct device *dev = adapter->pdev_dev;
383
384	const int need_unmap = need_skb_unmap() && unmap;
385
386	sdesc = &tq->sdesc[cidx];
387	while (n--) {
388		/*
389		 * If we kept a reference to the original TX skb, we need to
390		 * unmap it from PCI DMA space (if required) and free it.
391		 */
392		if (sdesc->skb) {
393			if (need_unmap)
394				unmap_sgl(dev, sdesc->skb, sdesc->sgl, tq);
395			dev_consume_skb_any(sdesc->skb);
396			sdesc->skb = NULL;
397		}
398
399		sdesc++;
400		if (++cidx == tq->size) {
401			cidx = 0;
402			sdesc = tq->sdesc;
403		}
404	}
405	tq->cidx = cidx;
406}
407
408/*
409 * Return the number of reclaimable descriptors in a TX queue.
410 */
411static inline int reclaimable(const struct sge_txq *tq)
412{
413	int hw_cidx = be16_to_cpu(tq->stat->cidx);
414	int reclaimable = hw_cidx - tq->cidx;
415	if (reclaimable < 0)
416		reclaimable += tq->size;
417	return reclaimable;
418}
419
420/**
421 *	reclaim_completed_tx - reclaims completed TX descriptors
422 *	@adapter: the adapter
423 *	@tq: the TX queue to reclaim completed descriptors from
424 *	@unmap: whether the buffers should be unmapped for DMA
425 *
426 *	Reclaims TX descriptors that the SGE has indicated it has processed,
427 *	and frees the associated buffers if possible.  Called with the TX
428 *	queue locked.
429 */
430static inline void reclaim_completed_tx(struct adapter *adapter,
431					struct sge_txq *tq,
432					bool unmap)
433{
434	int avail = reclaimable(tq);
435
436	if (avail) {
437		/*
438		 * Limit the amount of clean up work we do at a time to keep
439		 * the TX lock hold time O(1).
440		 */
441		if (avail > MAX_TX_RECLAIM)
442			avail = MAX_TX_RECLAIM;
443
444		free_tx_desc(adapter, tq, avail, unmap);
445		tq->in_use -= avail;
446	}
447}
448
449/**
450 *	get_buf_size - return the size of an RX Free List buffer.
451 *	@adapter: pointer to the associated adapter
452 *	@sdesc: pointer to the software buffer descriptor
453 */
454static inline int get_buf_size(const struct adapter *adapter,
455			       const struct rx_sw_desc *sdesc)
456{
457	const struct sge *s = &adapter->sge;
458
459	return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
460		? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
461}
462
463/**
464 *	free_rx_bufs - free RX buffers on an SGE Free List
465 *	@adapter: the adapter
466 *	@fl: the SGE Free List to free buffers from
467 *	@n: how many buffers to free
468 *
469 *	Release the next @n buffers on an SGE Free List RX queue.   The
470 *	buffers must be made inaccessible to hardware before calling this
471 *	function.
472 */
473static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
474{
475	while (n--) {
476		struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
477
478		if (is_buf_mapped(sdesc))
479			dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
480				       get_buf_size(adapter, sdesc),
481				       PCI_DMA_FROMDEVICE);
482		put_page(sdesc->page);
483		sdesc->page = NULL;
484		if (++fl->cidx == fl->size)
485			fl->cidx = 0;
486		fl->avail--;
487	}
488}
489
490/**
491 *	unmap_rx_buf - unmap the current RX buffer on an SGE Free List
492 *	@adapter: the adapter
493 *	@fl: the SGE Free List
494 *
495 *	Unmap the current buffer on an SGE Free List RX queue.   The
496 *	buffer must be made inaccessible to HW before calling this function.
497 *
498 *	This is similar to @free_rx_bufs above but does not free the buffer.
499 *	Do note that the FL still loses any further access to the buffer.
500 *	This is used predominantly to "transfer ownership" of an FL buffer
501 *	to another entity (typically an skb's fragment list).
502 */
503static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
504{
505	struct rx_sw_desc *sdesc = &fl->sdesc[fl->cidx];
506
507	if (is_buf_mapped(sdesc))
508		dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
509			       get_buf_size(adapter, sdesc),
510			       PCI_DMA_FROMDEVICE);
511	sdesc->page = NULL;
512	if (++fl->cidx == fl->size)
513		fl->cidx = 0;
514	fl->avail--;
515}
516
517/**
518 *	ring_fl_db - righ doorbell on free list
519 *	@adapter: the adapter
520 *	@fl: the Free List whose doorbell should be rung ...
521 *
522 *	Tell the Scatter Gather Engine that there are new free list entries
523 *	available.
524 */
525static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
526{
527	u32 val;
528
529	/* The SGE keeps track of its Producer and Consumer Indices in terms
530	 * of Egress Queue Units so we can only tell it about integral numbers
531	 * of multiples of Free List Entries per Egress Queue Units ...
532	 */
533	if (fl->pend_cred >= FL_PER_EQ_UNIT) {
534		if (is_t4(adapter->params.chip))
535			val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
536		else
537			val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
538			      DBTYPE_F;
539		val |= DBPRIO_F;
540
541		/* Make sure all memory writes to the Free List queue are
542		 * committed before we tell the hardware about them.
543		 */
544		wmb();
545
546		/* If we don't have access to the new User Doorbell (T5+), use
547		 * the old doorbell mechanism; otherwise use the new BAR2
548		 * mechanism.
549		 */
550		if (unlikely(fl->bar2_addr == NULL)) {
551			t4_write_reg(adapter,
552				     T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
553				     QID_V(fl->cntxt_id) | val);
554		} else {
555			writel(val | QID_V(fl->bar2_qid),
556			       fl->bar2_addr + SGE_UDB_KDOORBELL);
557
558			/* This Write memory Barrier will force the write to
559			 * the User Doorbell area to be flushed.
560			 */
561			wmb();
562		}
563		fl->pend_cred %= FL_PER_EQ_UNIT;
564	}
565}
566
567/**
568 *	set_rx_sw_desc - initialize software RX buffer descriptor
569 *	@sdesc: pointer to the softwore RX buffer descriptor
570 *	@page: pointer to the page data structure backing the RX buffer
571 *	@dma_addr: PCI DMA address (possibly with low-bit flags)
572 */
573static inline void set_rx_sw_desc(struct rx_sw_desc *sdesc, struct page *page,
574				  dma_addr_t dma_addr)
575{
576	sdesc->page = page;
577	sdesc->dma_addr = dma_addr;
578}
579
580/*
581 * Support for poisoning RX buffers ...
582 */
583#define POISON_BUF_VAL -1
584
585static inline void poison_buf(struct page *page, size_t sz)
586{
587#if POISON_BUF_VAL >= 0
588	memset(page_address(page), POISON_BUF_VAL, sz);
589#endif
590}
591
592/**
593 *	refill_fl - refill an SGE RX buffer ring
594 *	@adapter: the adapter
595 *	@fl: the Free List ring to refill
596 *	@n: the number of new buffers to allocate
597 *	@gfp: the gfp flags for the allocations
598 *
599 *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
600 *	allocated with the supplied gfp flags.  The caller must assure that
601 *	@n does not exceed the queue's capacity -- i.e. (cidx == pidx) _IN
602 *	EGRESS QUEUE UNITS_ indicates an empty Free List!  Returns the number
603 *	of buffers allocated.  If afterwards the queue is found critically low,
604 *	mark it as starving in the bitmap of starving FLs.
605 */
606static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
607			      int n, gfp_t gfp)
608{
609	struct sge *s = &adapter->sge;
610	struct page *page;
611	dma_addr_t dma_addr;
612	unsigned int cred = fl->avail;
613	__be64 *d = &fl->desc[fl->pidx];
614	struct rx_sw_desc *sdesc = &fl->sdesc[fl->pidx];
615
616	/*
617	 * Sanity: ensure that the result of adding n Free List buffers
618	 * won't result in wrapping the SGE's Producer Index around to
619	 * it's Consumer Index thereby indicating an empty Free List ...
620	 */
621	BUG_ON(fl->avail + n > fl->size - FL_PER_EQ_UNIT);
622
623	gfp |= __GFP_NOWARN;
624
625	/*
626	 * If we support large pages, prefer large buffers and fail over to
627	 * small pages if we can't allocate large pages to satisfy the refill.
628	 * If we don't support large pages, drop directly into the small page
629	 * allocation code.
630	 */
631	if (s->fl_pg_order == 0)
632		goto alloc_small_pages;
633
634	while (n) {
635		page = __dev_alloc_pages(gfp, s->fl_pg_order);
636		if (unlikely(!page)) {
637			/*
638			 * We've failed inour attempt to allocate a "large
639			 * page".  Fail over to the "small page" allocation
640			 * below.
641			 */
642			fl->large_alloc_failed++;
643			break;
644		}
645		poison_buf(page, PAGE_SIZE << s->fl_pg_order);
646
647		dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
648					PAGE_SIZE << s->fl_pg_order,
649					PCI_DMA_FROMDEVICE);
650		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
651			/*
652			 * We've run out of DMA mapping space.  Free up the
653			 * buffer and return with what we've managed to put
654			 * into the free list.  We don't want to fail over to
655			 * the small page allocation below in this case
656			 * because DMA mapping resources are typically
657			 * critical resources once they become scarse.
658			 */
659			__free_pages(page, s->fl_pg_order);
660			goto out;
661		}
662		dma_addr |= RX_LARGE_BUF;
663		*d++ = cpu_to_be64(dma_addr);
664
665		set_rx_sw_desc(sdesc, page, dma_addr);
666		sdesc++;
667
668		fl->avail++;
669		if (++fl->pidx == fl->size) {
670			fl->pidx = 0;
671			sdesc = fl->sdesc;
672			d = fl->desc;
673		}
674		n--;
675	}
676
677alloc_small_pages:
678	while (n--) {
679		page = __dev_alloc_page(gfp);
680		if (unlikely(!page)) {
681			fl->alloc_failed++;
682			break;
683		}
684		poison_buf(page, PAGE_SIZE);
685
686		dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
687				       PCI_DMA_FROMDEVICE);
688		if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
689			put_page(page);
690			break;
691		}
692		*d++ = cpu_to_be64(dma_addr);
693
694		set_rx_sw_desc(sdesc, page, dma_addr);
695		sdesc++;
696
697		fl->avail++;
698		if (++fl->pidx == fl->size) {
699			fl->pidx = 0;
700			sdesc = fl->sdesc;
701			d = fl->desc;
702		}
703	}
704
705out:
706	/*
707	 * Update our accounting state to incorporate the new Free List
708	 * buffers, tell the hardware about them and return the number of
709	 * buffers which we were able to allocate.
710	 */
711	cred = fl->avail - cred;
712	fl->pend_cred += cred;
713	ring_fl_db(adapter, fl);
714
715	if (unlikely(fl_starving(adapter, fl))) {
716		smp_wmb();
717		set_bit(fl->cntxt_id, adapter->sge.starving_fl);
718	}
719
720	return cred;
721}
722
723/*
724 * Refill a Free List to its capacity or the Maximum Refill Increment,
725 * whichever is smaller ...
726 */
727static inline void __refill_fl(struct adapter *adapter, struct sge_fl *fl)
728{
729	refill_fl(adapter, fl,
730		  min((unsigned int)MAX_RX_REFILL, fl_cap(fl) - fl->avail),
731		  GFP_ATOMIC);
732}
733
734/**
735 *	alloc_ring - allocate resources for an SGE descriptor ring
736 *	@dev: the PCI device's core device
737 *	@nelem: the number of descriptors
738 *	@hwsize: the size of each hardware descriptor
739 *	@swsize: the size of each software descriptor
740 *	@busaddrp: the physical PCI bus address of the allocated ring
741 *	@swringp: return address pointer for software ring
742 *	@stat_size: extra space in hardware ring for status information
743 *
744 *	Allocates resources for an SGE descriptor ring, such as TX queues,
745 *	free buffer lists, response queues, etc.  Each SGE ring requires
746 *	space for its hardware descriptors plus, optionally, space for software
747 *	state associated with each hardware entry (the metadata).  The function
748 *	returns three values: the virtual address for the hardware ring (the
749 *	return value of the function), the PCI bus address of the hardware
750 *	ring (in *busaddrp), and the address of the software ring (in swringp).
751 *	Both the hardware and software rings are returned zeroed out.
752 */
753static void *alloc_ring(struct device *dev, size_t nelem, size_t hwsize,
754			size_t swsize, dma_addr_t *busaddrp, void *swringp,
755			size_t stat_size)
756{
757	/*
758	 * Allocate the hardware ring and PCI DMA bus address space for said.
759	 */
760	size_t hwlen = nelem * hwsize + stat_size;
761	void *hwring = dma_alloc_coherent(dev, hwlen, busaddrp, GFP_KERNEL);
762
763	if (!hwring)
764		return NULL;
765
766	/*
767	 * If the caller wants a software ring, allocate it and return a
768	 * pointer to it in *swringp.
769	 */
770	BUG_ON((swsize != 0) != (swringp != NULL));
771	if (swsize) {
772		void *swring = kcalloc(nelem, swsize, GFP_KERNEL);
773
774		if (!swring) {
775			dma_free_coherent(dev, hwlen, hwring, *busaddrp);
776			return NULL;
777		}
778		*(void **)swringp = swring;
779	}
780
781	/*
782	 * Zero out the hardware ring and return its address as our function
783	 * value.
784	 */
785	memset(hwring, 0, hwlen);
786	return hwring;
787}
788
789/**
790 *	sgl_len - calculates the size of an SGL of the given capacity
791 *	@n: the number of SGL entries
792 *
793 *	Calculates the number of flits (8-byte units) needed for a Direct
794 *	Scatter/Gather List that can hold the given number of entries.
795 */
796static inline unsigned int sgl_len(unsigned int n)
797{
798	/*
799	 * A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA
800	 * addresses.  The DSGL Work Request starts off with a 32-bit DSGL
801	 * ULPTX header, then Length0, then Address0, then, for 1 <= i <= N,
802	 * repeated sequences of { Length[i], Length[i+1], Address[i],
803	 * Address[i+1] } (this ensures that all addresses are on 64-bit
804	 * boundaries).  If N is even, then Length[N+1] should be set to 0 and
805	 * Address[N+1] is omitted.
806	 *
807	 * The following calculation incorporates all of the above.  It's
808	 * somewhat hard to follow but, briefly: the "+2" accounts for the
809	 * first two flits which include the DSGL header, Length0 and
810	 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3
811	 * flits for every pair of the remaining N) +1 if (n-1) is odd; and
812	 * finally the "+((n-1)&1)" adds the one remaining flit needed if
813	 * (n-1) is odd ...
814	 */
815	n--;
816	return (3 * n) / 2 + (n & 1) + 2;
817}
818
819/**
820 *	flits_to_desc - returns the num of TX descriptors for the given flits
821 *	@flits: the number of flits
822 *
823 *	Returns the number of TX descriptors needed for the supplied number
824 *	of flits.
825 */
826static inline unsigned int flits_to_desc(unsigned int flits)
827{
828	BUG_ON(flits > SGE_MAX_WR_LEN / sizeof(__be64));
829	return DIV_ROUND_UP(flits, TXD_PER_EQ_UNIT);
830}
831
832/**
833 *	is_eth_imm - can an Ethernet packet be sent as immediate data?
834 *	@skb: the packet
835 *
836 *	Returns whether an Ethernet packet is small enough to fit completely as
837 *	immediate data.
838 */
839static inline int is_eth_imm(const struct sk_buff *skb)
840{
841	/*
842	 * The VF Driver uses the FW_ETH_TX_PKT_VM_WR firmware Work Request
843	 * which does not accommodate immediate data.  We could dike out all
844	 * of the support code for immediate data but that would tie our hands
845	 * too much if we ever want to enhace the firmware.  It would also
846	 * create more differences between the PF and VF Drivers.
847	 */
848	return false;
849}
850
851/**
852 *	calc_tx_flits - calculate the number of flits for a packet TX WR
853 *	@skb: the packet
854 *
855 *	Returns the number of flits needed for a TX Work Request for the
856 *	given Ethernet packet, including the needed WR and CPL headers.
857 */
858static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
859{
860	unsigned int flits;
861
862	/*
863	 * If the skb is small enough, we can pump it out as a work request
864	 * with only immediate data.  In that case we just have to have the
865	 * TX Packet header plus the skb data in the Work Request.
866	 */
867	if (is_eth_imm(skb))
868		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt),
869				    sizeof(__be64));
870
871	/*
872	 * Otherwise, we're going to have to construct a Scatter gather list
873	 * of the skb body and fragments.  We also include the flits necessary
874	 * for the TX Packet Work Request and CPL.  We always have a firmware
875	 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
876	 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
877	 * message or, if we're doing a Large Send Offload, an LSO CPL message
878	 * with an embedded TX Packet Write CPL message.
879	 */
880	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
881	if (skb_shinfo(skb)->gso_size)
882		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
883			  sizeof(struct cpl_tx_pkt_lso_core) +
884			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
885	else
886		flits += (sizeof(struct fw_eth_tx_pkt_vm_wr) +
887			  sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
888	return flits;
889}
890
891/**
892 *	write_sgl - populate a Scatter/Gather List for a packet
893 *	@skb: the packet
894 *	@tq: the TX queue we are writing into
895 *	@sgl: starting location for writing the SGL
896 *	@end: points right after the end of the SGL
897 *	@start: start offset into skb main-body data to include in the SGL
898 *	@addr: the list of DMA bus addresses for the SGL elements
899 *
900 *	Generates a Scatter/Gather List for the buffers that make up a packet.
901 *	The caller must provide adequate space for the SGL that will be written.
902 *	The SGL includes all of the packet's page fragments and the data in its
903 *	main body except for the first @start bytes.  @pos must be 16-byte
904 *	aligned and within a TX descriptor with available space.  @end points
905 *	write after the end of the SGL but does not account for any potential
906 *	wrap around, i.e., @end > @tq->stat.
907 */
908static void write_sgl(const struct sk_buff *skb, struct sge_txq *tq,
909		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
910		      const dma_addr_t *addr)
911{
912	unsigned int i, len;
913	struct ulptx_sge_pair *to;
914	const struct skb_shared_info *si = skb_shinfo(skb);
915	unsigned int nfrags = si->nr_frags;
916	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
917
918	len = skb_headlen(skb) - start;
919	if (likely(len)) {
920		sgl->len0 = htonl(len);
921		sgl->addr0 = cpu_to_be64(addr[0] + start);
922		nfrags++;
923	} else {
924		sgl->len0 = htonl(skb_frag_size(&si->frags[0]));
925		sgl->addr0 = cpu_to_be64(addr[1]);
926	}
927
928	sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
929			      ULPTX_NSGE_V(nfrags));
930	if (likely(--nfrags == 0))
931		return;
932	/*
933	 * Most of the complexity below deals with the possibility we hit the
934	 * end of the queue in the middle of writing the SGL.  For this case
935	 * only we create the SGL in a temporary buffer and then copy it.
936	 */
937	to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge;
938
939	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
940		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
941		to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i]));
942		to->addr[0] = cpu_to_be64(addr[i]);
943		to->addr[1] = cpu_to_be64(addr[++i]);
944	}
945	if (nfrags) {
946		to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i]));
947		to->len[1] = cpu_to_be32(0);
948		to->addr[0] = cpu_to_be64(addr[i + 1]);
949	}
950	if (unlikely((u8 *)end > (u8 *)tq->stat)) {
951		unsigned int part0 = (u8 *)tq->stat - (u8 *)sgl->sge, part1;
952
953		if (likely(part0))
954			memcpy(sgl->sge, buf, part0);
955		part1 = (u8 *)end - (u8 *)tq->stat;
956		memcpy(tq->desc, (u8 *)buf + part0, part1);
957		end = (void *)tq->desc + part1;
958	}
959	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
960		*end = 0;
961}
962
963/**
964 *	check_ring_tx_db - check and potentially ring a TX queue's doorbell
965 *	@adapter: the adapter
966 *	@tq: the TX queue
967 *	@n: number of new descriptors to give to HW
968 *
969 *	Ring the doorbel for a TX queue.
970 */
971static inline void ring_tx_db(struct adapter *adapter, struct sge_txq *tq,
972			      int n)
973{
974	/* Make sure that all writes to the TX Descriptors are committed
975	 * before we tell the hardware about them.
976	 */
977	wmb();
978
979	/* If we don't have access to the new User Doorbell (T5+), use the old
980	 * doorbell mechanism; otherwise use the new BAR2 mechanism.
981	 */
982	if (unlikely(tq->bar2_addr == NULL)) {
983		u32 val = PIDX_V(n);
984
985		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
986			     QID_V(tq->cntxt_id) | val);
987	} else {
988		u32 val = PIDX_T5_V(n);
989
990		/* T4 and later chips share the same PIDX field offset within
991		 * the doorbell, but T5 and later shrank the field in order to
992		 * gain a bit for Doorbell Priority.  The field was absurdly
993		 * large in the first place (14 bits) so we just use the T5
994		 * and later limits and warn if a Queue ID is too large.
995		 */
996		WARN_ON(val & DBPRIO_F);
997
998		/* If we're only writing a single Egress Unit and the BAR2
999		 * Queue ID is 0, we can use the Write Combining Doorbell
1000		 * Gather Buffer; otherwise we use the simple doorbell.
1001		 */
1002		if (n == 1 && tq->bar2_qid == 0) {
1003			unsigned int index = (tq->pidx
1004					      ? (tq->pidx - 1)
1005					      : (tq->size - 1));
1006			__be64 *src = (__be64 *)&tq->desc[index];
1007			__be64 __iomem *dst = (__be64 __iomem *)(tq->bar2_addr +
1008							 SGE_UDB_WCDOORBELL);
1009			unsigned int count = EQ_UNIT / sizeof(__be64);
1010
1011			/* Copy the TX Descriptor in a tight loop in order to
1012			 * try to get it to the adapter in a single Write
1013			 * Combined transfer on the PCI-E Bus.  If the Write
1014			 * Combine fails (say because of an interrupt, etc.)
1015			 * the hardware will simply take the last write as a
1016			 * simple doorbell write with a PIDX Increment of 1
1017			 * and will fetch the TX Descriptor from memory via
1018			 * DMA.
1019			 */
1020			while (count) {
1021				/* the (__force u64) is because the compiler
1022				 * doesn't understand the endian swizzling
1023				 * going on
1024				 */
1025				writeq((__force u64)*src, dst);
1026				src++;
1027				dst++;
1028				count--;
1029			}
1030		} else
1031			writel(val | QID_V(tq->bar2_qid),
1032			       tq->bar2_addr + SGE_UDB_KDOORBELL);
1033
1034		/* This Write Memory Barrier will force the write to the User
1035		 * Doorbell area to be flushed.  This is needed to prevent
1036		 * writes on different CPUs for the same queue from hitting
1037		 * the adapter out of order.  This is required when some Work
1038		 * Requests take the Write Combine Gather Buffer path (user
1039		 * doorbell area offset [SGE_UDB_WCDOORBELL..+63]) and some
1040		 * take the traditional path where we simply increment the
1041		 * PIDX (User Doorbell area SGE_UDB_KDOORBELL) and have the
1042		 * hardware DMA read the actual Work Request.
1043		 */
1044		wmb();
1045	}
1046}
1047
1048/**
1049 *	inline_tx_skb - inline a packet's data into TX descriptors
1050 *	@skb: the packet
1051 *	@tq: the TX queue where the packet will be inlined
1052 *	@pos: starting position in the TX queue to inline the packet
1053 *
1054 *	Inline a packet's contents directly into TX descriptors, starting at
1055 *	the given position within the TX DMA ring.
1056 *	Most of the complexity of this operation is dealing with wrap arounds
1057 *	in the middle of the packet we want to inline.
1058 */
1059static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
1060			  void *pos)
1061{
1062	u64 *p;
1063	int left = (void *)tq->stat - pos;
1064
1065	if (likely(skb->len <= left)) {
1066		if (likely(!skb->data_len))
1067			skb_copy_from_linear_data(skb, pos, skb->len);
1068		else
1069			skb_copy_bits(skb, 0, pos, skb->len);
1070		pos += skb->len;
1071	} else {
1072		skb_copy_bits(skb, 0, pos, left);
1073		skb_copy_bits(skb, left, tq->desc, skb->len - left);
1074		pos = (void *)tq->desc + (skb->len - left);
1075	}
1076
1077	/* 0-pad to multiple of 16 */
1078	p = PTR_ALIGN(pos, 8);
1079	if ((uintptr_t)p & 8)
1080		*p = 0;
1081}
1082
1083/*
1084 * Figure out what HW csum a packet wants and return the appropriate control
1085 * bits.
1086 */
1087static u64 hwcsum(const struct sk_buff *skb)
1088{
1089	int csum_type;
1090	const struct iphdr *iph = ip_hdr(skb);
1091
1092	if (iph->version == 4) {
1093		if (iph->protocol == IPPROTO_TCP)
1094			csum_type = TX_CSUM_TCPIP;
1095		else if (iph->protocol == IPPROTO_UDP)
1096			csum_type = TX_CSUM_UDPIP;
1097		else {
1098nocsum:
1099			/*
1100			 * unknown protocol, disable HW csum
1101			 * and hope a bad packet is detected
1102			 */
1103			return TXPKT_L4CSUM_DIS;
1104		}
1105	} else {
1106		/*
1107		 * this doesn't work with extension headers
1108		 */
1109		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
1110
1111		if (ip6h->nexthdr == IPPROTO_TCP)
1112			csum_type = TX_CSUM_TCPIP6;
1113		else if (ip6h->nexthdr == IPPROTO_UDP)
1114			csum_type = TX_CSUM_UDPIP6;
1115		else
1116			goto nocsum;
1117	}
1118
1119	if (likely(csum_type >= TX_CSUM_TCPIP))
1120		return TXPKT_CSUM_TYPE(csum_type) |
1121			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
1122			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
1123	else {
1124		int start = skb_transport_offset(skb);
1125
1126		return TXPKT_CSUM_TYPE(csum_type) |
1127			TXPKT_CSUM_START(start) |
1128			TXPKT_CSUM_LOC(start + skb->csum_offset);
1129	}
1130}
1131
1132/*
1133 * Stop an Ethernet TX queue and record that state change.
1134 */
1135static void txq_stop(struct sge_eth_txq *txq)
1136{
1137	netif_tx_stop_queue(txq->txq);
1138	txq->q.stops++;
1139}
1140
1141/*
1142 * Advance our software state for a TX queue by adding n in use descriptors.
1143 */
1144static inline void txq_advance(struct sge_txq *tq, unsigned int n)
1145{
1146	tq->in_use += n;
1147	tq->pidx += n;
1148	if (tq->pidx >= tq->size)
1149		tq->pidx -= tq->size;
1150}
1151
1152/**
1153 *	t4vf_eth_xmit - add a packet to an Ethernet TX queue
1154 *	@skb: the packet
1155 *	@dev: the egress net device
1156 *
1157 *	Add a packet to an SGE Ethernet TX queue.  Runs with softirqs disabled.
1158 */
1159int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1160{
1161	u32 wr_mid;
1162	u64 cntrl, *end;
1163	int qidx, credits;
1164	unsigned int flits, ndesc;
1165	struct adapter *adapter;
1166	struct sge_eth_txq *txq;
1167	const struct port_info *pi;
1168	struct fw_eth_tx_pkt_vm_wr *wr;
1169	struct cpl_tx_pkt_core *cpl;
1170	const struct skb_shared_info *ssi;
1171	dma_addr_t addr[MAX_SKB_FRAGS + 1];
1172	const size_t fw_hdr_copy_len = (sizeof(wr->ethmacdst) +
1173					sizeof(wr->ethmacsrc) +
1174					sizeof(wr->ethtype) +
1175					sizeof(wr->vlantci));
1176
1177	/*
1178	 * The chip minimum packet length is 10 octets but the firmware
1179	 * command that we are using requires that we copy the Ethernet header
1180	 * (including the VLAN tag) into the header so we reject anything
1181	 * smaller than that ...
1182	 */
1183	if (unlikely(skb->len < fw_hdr_copy_len))
1184		goto out_free;
1185
1186	/*
1187	 * Figure out which TX Queue we're going to use.
1188	 */
1189	pi = netdev_priv(dev);
1190	adapter = pi->adapter;
1191	qidx = skb_get_queue_mapping(skb);
1192	BUG_ON(qidx >= pi->nqsets);
1193	txq = &adapter->sge.ethtxq[pi->first_qset + qidx];
1194
1195	/*
1196	 * Take this opportunity to reclaim any TX Descriptors whose DMA
1197	 * transfers have completed.
1198	 */
1199	reclaim_completed_tx(adapter, &txq->q, true);
1200
1201	/*
1202	 * Calculate the number of flits and TX Descriptors we're going to
1203	 * need along with how many TX Descriptors will be left over after
1204	 * we inject our Work Request.
1205	 */
1206	flits = calc_tx_flits(skb);
1207	ndesc = flits_to_desc(flits);
1208	credits = txq_avail(&txq->q) - ndesc;
1209
1210	if (unlikely(credits < 0)) {
1211		/*
1212		 * Not enough room for this packet's Work Request.  Stop the
1213		 * TX Queue and return a "busy" condition.  The queue will get
1214		 * started later on when the firmware informs us that space
1215		 * has opened up.
1216		 */
1217		txq_stop(txq);
1218		dev_err(adapter->pdev_dev,
1219			"%s: TX ring %u full while queue awake!\n",
1220			dev->name, qidx);
1221		return NETDEV_TX_BUSY;
1222	}
1223
1224	if (!is_eth_imm(skb) &&
1225	    unlikely(map_skb(adapter->pdev_dev, skb, addr) < 0)) {
1226		/*
1227		 * We need to map the skb into PCI DMA space (because it can't
1228		 * be in-lined directly into the Work Request) and the mapping
1229		 * operation failed.  Record the error and drop the packet.
1230		 */
1231		txq->mapping_err++;
1232		goto out_free;
1233	}
1234
1235	wr_mid = FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2));
1236	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
1237		/*
1238		 * After we're done injecting the Work Request for this
1239		 * packet, we'll be below our "stop threshold" so stop the TX
1240		 * Queue now and schedule a request for an SGE Egress Queue
1241		 * Update message.  The queue will get started later on when
1242		 * the firmware processes this Work Request and sends us an
1243		 * Egress Queue Status Update message indicating that space
1244		 * has opened up.
1245		 */
1246		txq_stop(txq);
1247		wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
1248	}
1249
1250	/*
1251	 * Start filling in our Work Request.  Note that we do _not_ handle
1252	 * the WR Header wrapping around the TX Descriptor Ring.  If our
1253	 * maximum header size ever exceeds one TX Descriptor, we'll need to
1254	 * do something else here.
1255	 */
1256	BUG_ON(DIV_ROUND_UP(ETHTXQ_MAX_HDR, TXD_PER_EQ_UNIT) > 1);
1257	wr = (void *)&txq->q.desc[txq->q.pidx];
1258	wr->equiq_to_len16 = cpu_to_be32(wr_mid);
1259	wr->r3[0] = cpu_to_be32(0);
1260	wr->r3[1] = cpu_to_be32(0);
1261	skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len);
1262	end = (u64 *)wr + flits;
1263
1264	/*
1265	 * If this is a Large Send Offload packet we'll put in an LSO CPL
1266	 * message with an encapsulated TX Packet CPL message.  Otherwise we
1267	 * just use a TX Packet CPL message.
1268	 */
1269	ssi = skb_shinfo(skb);
1270	if (ssi->gso_size) {
1271		struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1);
1272		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
1273		int l3hdr_len = skb_network_header_len(skb);
1274		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
1275
1276		wr->op_immdlen =
1277			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1278				    FW_WR_IMMDLEN_V(sizeof(*lso) +
1279						    sizeof(*cpl)));
1280		/*
1281		 * Fill in the LSO CPL message.
1282		 */
1283		lso->lso_ctrl =
1284			cpu_to_be32(LSO_OPCODE(CPL_TX_PKT_LSO) |
1285				    LSO_FIRST_SLICE |
1286				    LSO_LAST_SLICE |
1287				    LSO_IPV6(v6) |
1288				    LSO_ETHHDR_LEN(eth_xtra_len/4) |
1289				    LSO_IPHDR_LEN(l3hdr_len/4) |
1290				    LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
1291		lso->ipid_ofst = cpu_to_be16(0);
1292		lso->mss = cpu_to_be16(ssi->gso_size);
1293		lso->seqno_offset = cpu_to_be32(0);
1294		if (is_t4(adapter->params.chip))
1295			lso->len = cpu_to_be32(skb->len);
1296		else
1297			lso->len = cpu_to_be32(LSO_T5_XFER_SIZE(skb->len));
1298
1299		/*
1300		 * Set up TX Packet CPL pointer, control word and perform
1301		 * accounting.
1302		 */
1303		cpl = (void *)(lso + 1);
1304		cntrl = (TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
1305			 TXPKT_IPHDR_LEN(l3hdr_len) |
1306			 TXPKT_ETHHDR_LEN(eth_xtra_len));
1307		txq->tso++;
1308		txq->tx_cso += ssi->gso_segs;
1309	} else {
1310		int len;
1311
1312		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
1313		wr->op_immdlen =
1314			cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_PKT_VM_WR) |
1315				    FW_WR_IMMDLEN_V(len));
1316
1317		/*
1318		 * Set up TX Packet CPL pointer, control word and perform
1319		 * accounting.
1320		 */
1321		cpl = (void *)(wr + 1);
1322		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1323			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
1324			txq->tx_cso++;
1325		} else
1326			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
1327	}
1328
1329	/*
1330	 * If there's a VLAN tag present, add that to the list of things to
1331	 * do in this Work Request.
1332	 */
1333	if (skb_vlan_tag_present(skb)) {
1334		txq->vlan_ins++;
1335		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(skb_vlan_tag_get(skb));
1336	}
1337
1338	/*
1339	 * Fill in the TX Packet CPL message header.
1340	 */
1341	cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE(CPL_TX_PKT_XT) |
1342				 TXPKT_INTF(pi->port_id) |
1343				 TXPKT_PF(0));
1344	cpl->pack = cpu_to_be16(0);
1345	cpl->len = cpu_to_be16(skb->len);
1346	cpl->ctrl1 = cpu_to_be64(cntrl);
1347
1348#ifdef T4_TRACE
1349	T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7],
1350		  "eth_xmit: ndesc %u, credits %u, pidx %u, len %u, frags %u",
1351		  ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags);
1352#endif
1353
1354	/*
1355	 * Fill in the body of the TX Packet CPL message with either in-lined
1356	 * data or a Scatter/Gather List.
1357	 */
1358	if (is_eth_imm(skb)) {
1359		/*
1360		 * In-line the packet's data and free the skb since we don't
1361		 * need it any longer.
1362		 */
1363		inline_tx_skb(skb, &txq->q, cpl + 1);
1364		dev_consume_skb_any(skb);
1365	} else {
1366		/*
1367		 * Write the skb's Scatter/Gather list into the TX Packet CPL
1368		 * message and retain a pointer to the skb so we can free it
1369		 * later when its DMA completes.  (We store the skb pointer
1370		 * in the Software Descriptor corresponding to the last TX
1371		 * Descriptor used by the Work Request.)
1372		 *
1373		 * The retained skb will be freed when the corresponding TX
1374		 * Descriptors are reclaimed after their DMAs complete.
1375		 * However, this could take quite a while since, in general,
1376		 * the hardware is set up to be lazy about sending DMA
1377		 * completion notifications to us and we mostly perform TX
1378		 * reclaims in the transmit routine.
1379		 *
1380		 * This is good for performamce but means that we rely on new
1381		 * TX packets arriving to run the destructors of completed
1382		 * packets, which open up space in their sockets' send queues.
1383		 * Sometimes we do not get such new packets causing TX to
1384		 * stall.  A single UDP transmitter is a good example of this
1385		 * situation.  We have a clean up timer that periodically
1386		 * reclaims completed packets but it doesn't run often enough
1387		 * (nor do we want it to) to prevent lengthy stalls.  A
1388		 * solution to this problem is to run the destructor early,
1389		 * after the packet is queued but before it's DMAd.  A con is
1390		 * that we lie to socket memory accounting, but the amount of
1391		 * extra memory is reasonable (limited by the number of TX
1392		 * descriptors), the packets do actually get freed quickly by
1393		 * new packets almost always, and for protocols like TCP that
1394		 * wait for acks to really free up the data the extra memory
1395		 * is even less.  On the positive side we run the destructors
1396		 * on the sending CPU rather than on a potentially different
1397		 * completing CPU, usually a good thing.
1398		 *
1399		 * Run the destructor before telling the DMA engine about the
1400		 * packet to make sure it doesn't complete and get freed
1401		 * prematurely.
1402		 */
1403		struct ulptx_sgl *sgl = (struct ulptx_sgl *)(cpl + 1);
1404		struct sge_txq *tq = &txq->q;
1405		int last_desc;
1406
1407		/*
1408		 * If the Work Request header was an exact multiple of our TX
1409		 * Descriptor length, then it's possible that the starting SGL
1410		 * pointer lines up exactly with the end of our TX Descriptor
1411		 * ring.  If that's the case, wrap around to the beginning
1412		 * here ...
1413		 */
1414		if (unlikely((void *)sgl == (void *)tq->stat)) {
1415			sgl = (void *)tq->desc;
1416			end = ((void *)tq->desc + ((void *)end - (void *)tq->stat));
1417		}
1418
1419		write_sgl(skb, tq, sgl, end, 0, addr);
1420		skb_orphan(skb);
1421
1422		last_desc = tq->pidx + ndesc - 1;
1423		if (last_desc >= tq->size)
1424			last_desc -= tq->size;
1425		tq->sdesc[last_desc].skb = skb;
1426		tq->sdesc[last_desc].sgl = sgl;
1427	}
1428
1429	/*
1430	 * Advance our internal TX Queue state, tell the hardware about
1431	 * the new TX descriptors and return success.
1432	 */
1433	txq_advance(&txq->q, ndesc);
1434	dev->trans_start = jiffies;
1435	ring_tx_db(adapter, &txq->q, ndesc);
1436	return NETDEV_TX_OK;
1437
1438out_free:
1439	/*
1440	 * An error of some sort happened.  Free the TX skb and tell the
1441	 * OS that we've "dealt" with the packet ...
1442	 */
1443	dev_kfree_skb_any(skb);
1444	return NETDEV_TX_OK;
1445}
1446
1447/**
1448 *	copy_frags - copy fragments from gather list into skb_shared_info
1449 *	@skb: destination skb
1450 *	@gl: source internal packet gather list
1451 *	@offset: packet start offset in first page
1452 *
1453 *	Copy an internal packet gather list into a Linux skb_shared_info
1454 *	structure.
1455 */
1456static inline void copy_frags(struct sk_buff *skb,
1457			      const struct pkt_gl *gl,
1458			      unsigned int offset)
1459{
1460	int i;
1461
1462	/* usually there's just one frag */
1463	__skb_fill_page_desc(skb, 0, gl->frags[0].page,
1464			     gl->frags[0].offset + offset,
1465			     gl->frags[0].size - offset);
1466	skb_shinfo(skb)->nr_frags = gl->nfrags;
1467	for (i = 1; i < gl->nfrags; i++)
1468		__skb_fill_page_desc(skb, i, gl->frags[i].page,
1469				     gl->frags[i].offset,
1470				     gl->frags[i].size);
1471
1472	/* get a reference to the last page, we don't own it */
1473	get_page(gl->frags[gl->nfrags - 1].page);
1474}
1475
1476/**
1477 *	t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
1478 *	@gl: the gather list
1479 *	@skb_len: size of sk_buff main body if it carries fragments
1480 *	@pull_len: amount of data to move to the sk_buff's main body
1481 *
1482 *	Builds an sk_buff from the given packet gather list.  Returns the
1483 *	sk_buff or %NULL if sk_buff allocation failed.
1484 */
1485static struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
1486					 unsigned int skb_len,
1487					 unsigned int pull_len)
1488{
1489	struct sk_buff *skb;
1490
1491	/*
1492	 * If the ingress packet is small enough, allocate an skb large enough
1493	 * for all of the data and copy it inline.  Otherwise, allocate an skb
1494	 * with enough room to pull in the header and reference the rest of
1495	 * the data via the skb fragment list.
1496	 *
1497	 * Below we rely on RX_COPY_THRES being less than the smallest Rx
1498	 * buff!  size, which is expected since buffers are at least
1499	 * PAGE_SIZEd.  In this case packets up to RX_COPY_THRES have only one
1500	 * fragment.
1501	 */
1502	if (gl->tot_len <= RX_COPY_THRES) {
1503		/* small packets have only one fragment */
1504		skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
1505		if (unlikely(!skb))
1506			goto out;
1507		__skb_put(skb, gl->tot_len);
1508		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
1509	} else {
1510		skb = alloc_skb(skb_len, GFP_ATOMIC);
1511		if (unlikely(!skb))
1512			goto out;
1513		__skb_put(skb, pull_len);
1514		skb_copy_to_linear_data(skb, gl->va, pull_len);
1515
1516		copy_frags(skb, gl, pull_len);
1517		skb->len = gl->tot_len;
1518		skb->data_len = skb->len - pull_len;
1519		skb->truesize += skb->data_len;
1520	}
1521
1522out:
1523	return skb;
1524}
1525
1526/**
1527 *	t4vf_pktgl_free - free a packet gather list
1528 *	@gl: the gather list
1529 *
1530 *	Releases the pages of a packet gather list.  We do not own the last
1531 *	page on the list and do not free it.
1532 */
1533static void t4vf_pktgl_free(const struct pkt_gl *gl)
1534{
1535	int frag;
1536
1537	frag = gl->nfrags - 1;
1538	while (frag--)
1539		put_page(gl->frags[frag].page);
1540}
1541
1542/**
1543 *	do_gro - perform Generic Receive Offload ingress packet processing
1544 *	@rxq: ingress RX Ethernet Queue
1545 *	@gl: gather list for ingress packet
1546 *	@pkt: CPL header for last packet fragment
1547 *
1548 *	Perform Generic Receive Offload (GRO) ingress packet processing.
1549 *	We use the standard Linux GRO interfaces for this.
1550 */
1551static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1552		   const struct cpl_rx_pkt *pkt)
1553{
1554	struct adapter *adapter = rxq->rspq.adapter;
1555	struct sge *s = &adapter->sge;
1556	int ret;
1557	struct sk_buff *skb;
1558
1559	skb = napi_get_frags(&rxq->rspq.napi);
1560	if (unlikely(!skb)) {
1561		t4vf_pktgl_free(gl);
1562		rxq->stats.rx_drops++;
1563		return;
1564	}
1565
1566	copy_frags(skb, gl, s->pktshift);
1567	skb->len = gl->tot_len - s->pktshift;
1568	skb->data_len = skb->len;
1569	skb->truesize += skb->data_len;
1570	skb->ip_summed = CHECKSUM_UNNECESSARY;
1571	skb_record_rx_queue(skb, rxq->rspq.idx);
1572
1573	if (pkt->vlan_ex) {
1574		__vlan_hwaccel_put_tag(skb, cpu_to_be16(ETH_P_8021Q),
1575					be16_to_cpu(pkt->vlan));
1576		rxq->stats.vlan_ex++;
1577	}
1578	ret = napi_gro_frags(&rxq->rspq.napi);
1579
1580	if (ret == GRO_HELD)
1581		rxq->stats.lro_pkts++;
1582	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
1583		rxq->stats.lro_merged++;
1584	rxq->stats.pkts++;
1585	rxq->stats.rx_cso++;
1586}
1587
1588/**
1589 *	t4vf_ethrx_handler - process an ingress ethernet packet
1590 *	@rspq: the response queue that received the packet
1591 *	@rsp: the response queue descriptor holding the RX_PKT message
1592 *	@gl: the gather list of packet fragments
1593 *
1594 *	Process an ingress ethernet packet and deliver it to the stack.
1595 */
1596int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1597		       const struct pkt_gl *gl)
1598{
1599	struct sk_buff *skb;
1600	const struct cpl_rx_pkt *pkt = (void *)rsp;
1601	bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1602		       (rspq->netdev->features & NETIF_F_RXCSUM);
1603	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1604	struct adapter *adapter = rspq->adapter;
1605	struct sge *s = &adapter->sge;
1606
1607	/*
1608	 * If this is a good TCP packet and we have Generic Receive Offload
1609	 * enabled, handle the packet in the GRO path.
1610	 */
1611	if ((pkt->l2info & cpu_to_be32(RXF_TCP_F)) &&
1612	    (rspq->netdev->features & NETIF_F_GRO) && csum_ok &&
1613	    !pkt->ip_frag) {
1614		do_gro(rxq, gl, pkt);
1615		return 0;
1616	}
1617
1618	/*
1619	 * Convert the Packet Gather List into an skb.
1620	 */
1621	skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
1622	if (unlikely(!skb)) {
1623		t4vf_pktgl_free(gl);
1624		rxq->stats.rx_drops++;
1625		return 0;
1626	}
1627	__skb_pull(skb, s->pktshift);
1628	skb->protocol = eth_type_trans(skb, rspq->netdev);
1629	skb_record_rx_queue(skb, rspq->idx);
1630	rxq->stats.pkts++;
1631
1632	if (csum_ok && !pkt->err_vec &&
1633	    (be32_to_cpu(pkt->l2info) & (RXF_UDP_F | RXF_TCP_F))) {
1634		if (!pkt->ip_frag)
1635			skb->ip_summed = CHECKSUM_UNNECESSARY;
1636		else {
1637			__sum16 c = (__force __sum16)pkt->csum;
1638			skb->csum = csum_unfold(c);
1639			skb->ip_summed = CHECKSUM_COMPLETE;
1640		}
1641		rxq->stats.rx_cso++;
1642	} else
1643		skb_checksum_none_assert(skb);
1644
1645	if (pkt->vlan_ex) {
1646		rxq->stats.vlan_ex++;
1647		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), be16_to_cpu(pkt->vlan));
1648	}
1649
1650	netif_receive_skb(skb);
1651
1652	return 0;
1653}
1654
1655/**
1656 *	is_new_response - check if a response is newly written
1657 *	@rc: the response control descriptor
1658 *	@rspq: the response queue
1659 *
1660 *	Returns true if a response descriptor contains a yet unprocessed
1661 *	response.
1662 */
1663static inline bool is_new_response(const struct rsp_ctrl *rc,
1664				   const struct sge_rspq *rspq)
1665{
1666	return RSPD_GEN(rc->type_gen) == rspq->gen;
1667}
1668
1669/**
1670 *	restore_rx_bufs - put back a packet's RX buffers
1671 *	@gl: the packet gather list
1672 *	@fl: the SGE Free List
1673 *	@nfrags: how many fragments in @si
1674 *
1675 *	Called when we find out that the current packet, @si, can't be
1676 *	processed right away for some reason.  This is a very rare event and
1677 *	there's no effort to make this suspension/resumption process
1678 *	particularly efficient.
1679 *
1680 *	We implement the suspension by putting all of the RX buffers associated
1681 *	with the current packet back on the original Free List.  The buffers
1682 *	have already been unmapped and are left unmapped, we mark them as
1683 *	unmapped in order to prevent further unmapping attempts.  (Effectively
1684 *	this function undoes the series of @unmap_rx_buf calls which were done
1685 *	to create the current packet's gather list.)  This leaves us ready to
1686 *	restart processing of the packet the next time we start processing the
1687 *	RX Queue ...
1688 */
1689static void restore_rx_bufs(const struct pkt_gl *gl, struct sge_fl *fl,
1690			    int frags)
1691{
1692	struct rx_sw_desc *sdesc;
1693
1694	while (frags--) {
1695		if (fl->cidx == 0)
1696			fl->cidx = fl->size - 1;
1697		else
1698			fl->cidx--;
1699		sdesc = &fl->sdesc[fl->cidx];
1700		sdesc->page = gl->frags[frags].page;
1701		sdesc->dma_addr |= RX_UNMAPPED_BUF;
1702		fl->avail++;
1703	}
1704}
1705
1706/**
1707 *	rspq_next - advance to the next entry in a response queue
1708 *	@rspq: the queue
1709 *
1710 *	Updates the state of a response queue to advance it to the next entry.
1711 */
1712static inline void rspq_next(struct sge_rspq *rspq)
1713{
1714	rspq->cur_desc = (void *)rspq->cur_desc + rspq->iqe_len;
1715	if (unlikely(++rspq->cidx == rspq->size)) {
1716		rspq->cidx = 0;
1717		rspq->gen ^= 1;
1718		rspq->cur_desc = rspq->desc;
1719	}
1720}
1721
1722/**
1723 *	process_responses - process responses from an SGE response queue
1724 *	@rspq: the ingress response queue to process
1725 *	@budget: how many responses can be processed in this round
1726 *
1727 *	Process responses from a Scatter Gather Engine response queue up to
1728 *	the supplied budget.  Responses include received packets as well as
1729 *	control messages from firmware or hardware.
1730 *
1731 *	Additionally choose the interrupt holdoff time for the next interrupt
1732 *	on this queue.  If the system is under memory shortage use a fairly
1733 *	long delay to help recovery.
1734 */
1735static int process_responses(struct sge_rspq *rspq, int budget)
1736{
1737	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1738	struct adapter *adapter = rspq->adapter;
1739	struct sge *s = &adapter->sge;
1740	int budget_left = budget;
1741
1742	while (likely(budget_left)) {
1743		int ret, rsp_type;
1744		const struct rsp_ctrl *rc;
1745
1746		rc = (void *)rspq->cur_desc + (rspq->iqe_len - sizeof(*rc));
1747		if (!is_new_response(rc, rspq))
1748			break;
1749
1750		/*
1751		 * Figure out what kind of response we've received from the
1752		 * SGE.
1753		 */
1754		dma_rmb();
1755		rsp_type = RSPD_TYPE(rc->type_gen);
1756		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
1757			struct page_frag *fp;
1758			struct pkt_gl gl;
1759			const struct rx_sw_desc *sdesc;
1760			u32 bufsz, frag;
1761			u32 len = be32_to_cpu(rc->pldbuflen_qid);
1762
1763			/*
1764			 * If we get a "new buffer" message from the SGE we
1765			 * need to move on to the next Free List buffer.
1766			 */
1767			if (len & RSPD_NEWBUF) {
1768				/*
1769				 * We get one "new buffer" message when we
1770				 * first start up a queue so we need to ignore
1771				 * it when our offset into the buffer is 0.
1772				 */
1773				if (likely(rspq->offset > 0)) {
1774					free_rx_bufs(rspq->adapter, &rxq->fl,
1775						     1);
1776					rspq->offset = 0;
1777				}
1778				len = RSPD_LEN(len);
1779			}
1780			gl.tot_len = len;
1781
1782			/*
1783			 * Gather packet fragments.
1784			 */
1785			for (frag = 0, fp = gl.frags; /**/; frag++, fp++) {
1786				BUG_ON(frag >= MAX_SKB_FRAGS);
1787				BUG_ON(rxq->fl.avail == 0);
1788				sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1789				bufsz = get_buf_size(adapter, sdesc);
1790				fp->page = sdesc->page;
1791				fp->offset = rspq->offset;
1792				fp->size = min(bufsz, len);
1793				len -= fp->size;
1794				if (!len)
1795					break;
1796				unmap_rx_buf(rspq->adapter, &rxq->fl);
1797			}
1798			gl.nfrags = frag+1;
1799
1800			/*
1801			 * Last buffer remains mapped so explicitly make it
1802			 * coherent for CPU access and start preloading first
1803			 * cache line ...
1804			 */
1805			dma_sync_single_for_cpu(rspq->adapter->pdev_dev,
1806						get_buf_addr(sdesc),
1807						fp->size, DMA_FROM_DEVICE);
1808			gl.va = (page_address(gl.frags[0].page) +
1809				 gl.frags[0].offset);
1810			prefetch(gl.va);
1811
1812			/*
1813			 * Hand the new ingress packet to the handler for
1814			 * this Response Queue.
1815			 */
1816			ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1817			if (likely(ret == 0))
1818				rspq->offset += ALIGN(fp->size, s->fl_align);
1819			else
1820				restore_rx_bufs(&gl, &rxq->fl, frag);
1821		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
1822			ret = rspq->handler(rspq, rspq->cur_desc, NULL);
1823		} else {
1824			WARN_ON(rsp_type > RSP_TYPE_CPL);
1825			ret = 0;
1826		}
1827
1828		if (unlikely(ret)) {
1829			/*
1830			 * Couldn't process descriptor, back off for recovery.
1831			 * We use the SGE's last timer which has the longest
1832			 * interrupt coalescing value ...
1833			 */
1834			const int NOMEM_TIMER_IDX = SGE_NTIMERS-1;
1835			rspq->next_intr_params =
1836				QINTR_TIMER_IDX(NOMEM_TIMER_IDX);
1837			break;
1838		}
1839
1840		rspq_next(rspq);
1841		budget_left--;
1842	}
1843
1844	/*
1845	 * If this is a Response Queue with an associated Free List and
1846	 * at least two Egress Queue units available in the Free List
1847	 * for new buffer pointers, refill the Free List.
1848	 */
1849	if (rspq->offset >= 0 &&
1850	    rxq->fl.size - rxq->fl.avail >= 2*FL_PER_EQ_UNIT)
1851		__refill_fl(rspq->adapter, &rxq->fl);
1852	return budget - budget_left;
1853}
1854
1855/**
1856 *	napi_rx_handler - the NAPI handler for RX processing
1857 *	@napi: the napi instance
1858 *	@budget: how many packets we can process in this round
1859 *
1860 *	Handler for new data events when using NAPI.  This does not need any
1861 *	locking or protection from interrupts as data interrupts are off at
1862 *	this point and other adapter interrupts do not interfere (the latter
1863 *	in not a concern at all with MSI-X as non-data interrupts then have
1864 *	a separate handler).
1865 */
1866static int napi_rx_handler(struct napi_struct *napi, int budget)
1867{
1868	unsigned int intr_params;
1869	struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi);
1870	int work_done = process_responses(rspq, budget);
1871	u32 val;
1872
1873	if (likely(work_done < budget)) {
1874		napi_complete(napi);
1875		intr_params = rspq->next_intr_params;
1876		rspq->next_intr_params = rspq->intr_params;
1877	} else
1878		intr_params = QINTR_TIMER_IDX(SGE_TIMER_UPD_CIDX);
1879
1880	if (unlikely(work_done == 0))
1881		rspq->unhandled_irqs++;
1882
1883	val = CIDXINC_V(work_done) | SEINTARM_V(intr_params);
1884	if (is_t4(rspq->adapter->params.chip)) {
1885		t4_write_reg(rspq->adapter,
1886			     T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1887			     val | INGRESSQID_V((u32)rspq->cntxt_id));
1888	} else {
1889		writel(val | INGRESSQID_V(rspq->bar2_qid),
1890		       rspq->bar2_addr + SGE_UDB_GTS);
1891		wmb();
1892	}
1893	return work_done;
1894}
1895
1896/*
1897 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
1898 * (i.e., response queue serviced by NAPI polling).
1899 */
1900irqreturn_t t4vf_sge_intr_msix(int irq, void *cookie)
1901{
1902	struct sge_rspq *rspq = cookie;
1903
1904	napi_schedule(&rspq->napi);
1905	return IRQ_HANDLED;
1906}
1907
1908/*
1909 * Process the indirect interrupt entries in the interrupt queue and kick off
1910 * NAPI for each queue that has generated an entry.
1911 */
1912static unsigned int process_intrq(struct adapter *adapter)
1913{
1914	struct sge *s = &adapter->sge;
1915	struct sge_rspq *intrq = &s->intrq;
1916	unsigned int work_done;
1917	u32 val;
1918
1919	spin_lock(&adapter->sge.intrq_lock);
1920	for (work_done = 0; ; work_done++) {
1921		const struct rsp_ctrl *rc;
1922		unsigned int qid, iq_idx;
1923		struct sge_rspq *rspq;
1924
1925		/*
1926		 * Grab the next response from the interrupt queue and bail
1927		 * out if it's not a new response.
1928		 */
1929		rc = (void *)intrq->cur_desc + (intrq->iqe_len - sizeof(*rc));
1930		if (!is_new_response(rc, intrq))
1931			break;
1932
1933		/*
1934		 * If the response isn't a forwarded interrupt message issue a
1935		 * error and go on to the next response message.  This should
1936		 * never happen ...
1937		 */
1938		dma_rmb();
1939		if (unlikely(RSPD_TYPE(rc->type_gen) != RSP_TYPE_INTR)) {
1940			dev_err(adapter->pdev_dev,
1941				"Unexpected INTRQ response type %d\n",
1942				RSPD_TYPE(rc->type_gen));
1943			continue;
1944		}
1945
1946		/*
1947		 * Extract the Queue ID from the interrupt message and perform
1948		 * sanity checking to make sure it really refers to one of our
1949		 * Ingress Queues which is active and matches the queue's ID.
1950		 * None of these error conditions should ever happen so we may
1951		 * want to either make them fatal and/or conditionalized under
1952		 * DEBUG.
1953		 */
1954		qid = RSPD_QID(be32_to_cpu(rc->pldbuflen_qid));
1955		iq_idx = IQ_IDX(s, qid);
1956		if (unlikely(iq_idx >= MAX_INGQ)) {
1957			dev_err(adapter->pdev_dev,
1958				"Ingress QID %d out of range\n", qid);
1959			continue;
1960		}
1961		rspq = s->ingr_map[iq_idx];
1962		if (unlikely(rspq == NULL)) {
1963			dev_err(adapter->pdev_dev,
1964				"Ingress QID %d RSPQ=NULL\n", qid);
1965			continue;
1966		}
1967		if (unlikely(rspq->abs_id != qid)) {
1968			dev_err(adapter->pdev_dev,
1969				"Ingress QID %d refers to RSPQ %d\n",
1970				qid, rspq->abs_id);
1971			continue;
1972		}
1973
1974		/*
1975		 * Schedule NAPI processing on the indicated Response Queue
1976		 * and move on to the next entry in the Forwarded Interrupt
1977		 * Queue.
1978		 */
1979		napi_schedule(&rspq->napi);
1980		rspq_next(intrq);
1981	}
1982
1983	val = CIDXINC_V(work_done) | SEINTARM_V(intrq->intr_params);
1984	if (is_t4(adapter->params.chip))
1985		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
1986			     val | INGRESSQID_V(intrq->cntxt_id));
1987	else {
1988		writel(val | INGRESSQID_V(intrq->bar2_qid),
1989		       intrq->bar2_addr + SGE_UDB_GTS);
1990		wmb();
1991	}
1992
1993	spin_unlock(&adapter->sge.intrq_lock);
1994
1995	return work_done;
1996}
1997
1998/*
1999 * The MSI interrupt handler handles data events from SGE response queues as
2000 * well as error and other async events as they all use the same MSI vector.
2001 */
2002static irqreturn_t t4vf_intr_msi(int irq, void *cookie)
2003{
2004	struct adapter *adapter = cookie;
2005
2006	process_intrq(adapter);
2007	return IRQ_HANDLED;
2008}
2009
2010/**
2011 *	t4vf_intr_handler - select the top-level interrupt handler
2012 *	@adapter: the adapter
2013 *
2014 *	Selects the top-level interrupt handler based on the type of interrupts
2015 *	(MSI-X or MSI).
2016 */
2017irq_handler_t t4vf_intr_handler(struct adapter *adapter)
2018{
2019	BUG_ON((adapter->flags & (USING_MSIX|USING_MSI)) == 0);
2020	if (adapter->flags & USING_MSIX)
2021		return t4vf_sge_intr_msix;
2022	else
2023		return t4vf_intr_msi;
2024}
2025
2026/**
2027 *	sge_rx_timer_cb - perform periodic maintenance of SGE RX queues
2028 *	@data: the adapter
2029 *
2030 *	Runs periodically from a timer to perform maintenance of SGE RX queues.
2031 *
2032 *	a) Replenishes RX queues that have run out due to memory shortage.
2033 *	Normally new RX buffers are added when existing ones are consumed but
2034 *	when out of memory a queue can become empty.  We schedule NAPI to do
2035 *	the actual refill.
2036 */
2037static void sge_rx_timer_cb(unsigned long data)
2038{
2039	struct adapter *adapter = (struct adapter *)data;
2040	struct sge *s = &adapter->sge;
2041	unsigned int i;
2042
2043	/*
2044	 * Scan the "Starving Free Lists" flag array looking for any Free
2045	 * Lists in need of more free buffers.  If we find one and it's not
2046	 * being actively polled, then bump its "starving" counter and attempt
2047	 * to refill it.  If we're successful in adding enough buffers to push
2048	 * the Free List over the starving threshold, then we can clear its
2049	 * "starving" status.
2050	 */
2051	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++) {
2052		unsigned long m;
2053
2054		for (m = s->starving_fl[i]; m; m &= m - 1) {
2055			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
2056			struct sge_fl *fl = s->egr_map[id];
2057
2058			clear_bit(id, s->starving_fl);
2059			smp_mb__after_atomic();
2060
2061			/*
2062			 * Since we are accessing fl without a lock there's a
2063			 * small probability of a false positive where we
2064			 * schedule napi but the FL is no longer starving.
2065			 * No biggie.
2066			 */
2067			if (fl_starving(adapter, fl)) {
2068				struct sge_eth_rxq *rxq;
2069
2070				rxq = container_of(fl, struct sge_eth_rxq, fl);
2071				if (napi_reschedule(&rxq->rspq.napi))
2072					fl->starving++;
2073				else
2074					set_bit(id, s->starving_fl);
2075			}
2076		}
2077	}
2078
2079	/*
2080	 * Reschedule the next scan for starving Free Lists ...
2081	 */
2082	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
2083}
2084
2085/**
2086 *	sge_tx_timer_cb - perform periodic maintenance of SGE Tx queues
2087 *	@data: the adapter
2088 *
2089 *	Runs periodically from a timer to perform maintenance of SGE TX queues.
2090 *
2091 *	b) Reclaims completed Tx packets for the Ethernet queues.  Normally
2092 *	packets are cleaned up by new Tx packets, this timer cleans up packets
2093 *	when no new packets are being submitted.  This is essential for pktgen,
2094 *	at least.
2095 */
2096static void sge_tx_timer_cb(unsigned long data)
2097{
2098	struct adapter *adapter = (struct adapter *)data;
2099	struct sge *s = &adapter->sge;
2100	unsigned int i, budget;
2101
2102	budget = MAX_TIMER_TX_RECLAIM;
2103	i = s->ethtxq_rover;
2104	do {
2105		struct sge_eth_txq *txq = &s->ethtxq[i];
2106
2107		if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) {
2108			int avail = reclaimable(&txq->q);
2109
2110			if (avail > budget)
2111				avail = budget;
2112
2113			free_tx_desc(adapter, &txq->q, avail, true);
2114			txq->q.in_use -= avail;
2115			__netif_tx_unlock(txq->txq);
2116
2117			budget -= avail;
2118			if (!budget)
2119				break;
2120		}
2121
2122		i++;
2123		if (i >= s->ethqsets)
2124			i = 0;
2125	} while (i != s->ethtxq_rover);
2126	s->ethtxq_rover = i;
2127
2128	/*
2129	 * If we found too many reclaimable packets schedule a timer in the
2130	 * near future to continue where we left off.  Otherwise the next timer
2131	 * will be at its normal interval.
2132	 */
2133	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
2134}
2135
2136/**
2137 *	bar2_address - return the BAR2 address for an SGE Queue's Registers
2138 *	@adapter: the adapter
2139 *	@qid: the SGE Queue ID
2140 *	@qtype: the SGE Queue Type (Egress or Ingress)
2141 *	@pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
2142 *
2143 *	Returns the BAR2 address for the SGE Queue Registers associated with
2144 *	@qid.  If BAR2 SGE Registers aren't available, returns NULL.  Also
2145 *	returns the BAR2 Queue ID to be used with writes to the BAR2 SGE
2146 *	Queue Registers.  If the BAR2 Queue ID is 0, then "Inferred Queue ID"
2147 *	Registers are supported (e.g. the Write Combining Doorbell Buffer).
2148 */
2149static void __iomem *bar2_address(struct adapter *adapter,
2150				  unsigned int qid,
2151				  enum t4_bar2_qtype qtype,
2152				  unsigned int *pbar2_qid)
2153{
2154	u64 bar2_qoffset;
2155	int ret;
2156
2157	ret = t4_bar2_sge_qregs(adapter, qid, qtype,
2158				&bar2_qoffset, pbar2_qid);
2159	if (ret)
2160		return NULL;
2161
2162	return adapter->bar2 + bar2_qoffset;
2163}
2164
2165/**
2166 *	t4vf_sge_alloc_rxq - allocate an SGE RX Queue
2167 *	@adapter: the adapter
2168 *	@rspq: pointer to to the new rxq's Response Queue to be filled in
2169 *	@iqasynch: if 0, a normal rspq; if 1, an asynchronous event queue
2170 *	@dev: the network device associated with the new rspq
2171 *	@intr_dest: MSI-X vector index (overriden in MSI mode)
2172 *	@fl: pointer to the new rxq's Free List to be filled in
2173 *	@hnd: the interrupt handler to invoke for the rspq
2174 */
2175int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2176		       bool iqasynch, struct net_device *dev,
2177		       int intr_dest,
2178		       struct sge_fl *fl, rspq_handler_t hnd)
2179{
2180	struct sge *s = &adapter->sge;
2181	struct port_info *pi = netdev_priv(dev);
2182	struct fw_iq_cmd cmd, rpl;
2183	int ret, iqandst, flsz = 0;
2184
2185	/*
2186	 * If we're using MSI interrupts and we're not initializing the
2187	 * Forwarded Interrupt Queue itself, then set up this queue for
2188	 * indirect interrupts to the Forwarded Interrupt Queue.  Obviously
2189	 * the Forwarded Interrupt Queue must be set up before any other
2190	 * ingress queue ...
2191	 */
2192	if ((adapter->flags & USING_MSI) && rspq != &adapter->sge.intrq) {
2193		iqandst = SGE_INTRDST_IQ;
2194		intr_dest = adapter->sge.intrq.abs_id;
2195	} else
2196		iqandst = SGE_INTRDST_PCI;
2197
2198	/*
2199	 * Allocate the hardware ring for the Response Queue.  The size needs
2200	 * to be a multiple of 16 which includes the mandatory status entry
2201	 * (regardless of whether the Status Page capabilities are enabled or
2202	 * not).
2203	 */
2204	rspq->size = roundup(rspq->size, 16);
2205	rspq->desc = alloc_ring(adapter->pdev_dev, rspq->size, rspq->iqe_len,
2206				0, &rspq->phys_addr, NULL, 0);
2207	if (!rspq->desc)
2208		return -ENOMEM;
2209
2210	/*
2211	 * Fill in the Ingress Queue Command.  Note: Ideally this code would
2212	 * be in t4vf_hw.c but there are so many parameters and dependencies
2213	 * on our Linux SGE state that we would end up having to pass tons of
2214	 * parameters.  We'll have to think about how this might be migrated
2215	 * into OS-independent common code ...
2216	 */
2217	memset(&cmd, 0, sizeof(cmd));
2218	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) |
2219				    FW_CMD_REQUEST_F |
2220				    FW_CMD_WRITE_F |
2221				    FW_CMD_EXEC_F);
2222	cmd.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_ALLOC_F |
2223					 FW_IQ_CMD_IQSTART_F |
2224					 FW_LEN16(cmd));
2225	cmd.type_to_iqandstindex =
2226		cpu_to_be32(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
2227			    FW_IQ_CMD_IQASYNCH_V(iqasynch) |
2228			    FW_IQ_CMD_VIID_V(pi->viid) |
2229			    FW_IQ_CMD_IQANDST_V(iqandst) |
2230			    FW_IQ_CMD_IQANUS_V(1) |
2231			    FW_IQ_CMD_IQANUD_V(SGE_UPDATEDEL_INTR) |
2232			    FW_IQ_CMD_IQANDSTINDEX_V(intr_dest));
2233	cmd.iqdroprss_to_iqesize =
2234		cpu_to_be16(FW_IQ_CMD_IQPCIECH_V(pi->port_id) |
2235			    FW_IQ_CMD_IQGTSMODE_F |
2236			    FW_IQ_CMD_IQINTCNTTHRESH_V(rspq->pktcnt_idx) |
2237			    FW_IQ_CMD_IQESIZE_V(ilog2(rspq->iqe_len) - 4));
2238	cmd.iqsize = cpu_to_be16(rspq->size);
2239	cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
2240
2241	if (fl) {
2242		/*
2243		 * Allocate the ring for the hardware free list (with space
2244		 * for its status page) along with the associated software
2245		 * descriptor ring.  The free list size needs to be a multiple
2246		 * of the Egress Queue Unit.
2247		 */
2248		fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2249		fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2250				      sizeof(__be64), sizeof(struct rx_sw_desc),
2251				      &fl->addr, &fl->sdesc, s->stat_len);
2252		if (!fl->desc) {
2253			ret = -ENOMEM;
2254			goto err;
2255		}
2256
2257		/*
2258		 * Calculate the size of the hardware free list ring plus
2259		 * Status Page (which the SGE will place after the end of the
2260		 * free list ring) in Egress Queue Units.
2261		 */
2262		flsz = (fl->size / FL_PER_EQ_UNIT +
2263			s->stat_len / EQ_UNIT);
2264
2265		/*
2266		 * Fill in all the relevant firmware Ingress Queue Command
2267		 * fields for the free list.
2268		 */
2269		cmd.iqns_to_fl0congen =
2270			cpu_to_be32(
2271				FW_IQ_CMD_FL0HOSTFCMODE_V(SGE_HOSTFCMODE_NONE) |
2272				FW_IQ_CMD_FL0PACKEN_F |
2273				FW_IQ_CMD_FL0PADEN_F);
2274		cmd.fl0dcaen_to_fl0cidxfthresh =
2275			cpu_to_be16(
2276				FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2277				FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
2278		cmd.fl0size = cpu_to_be16(flsz);
2279		cmd.fl0addr = cpu_to_be64(fl->addr);
2280	}
2281
2282	/*
2283	 * Issue the firmware Ingress Queue Command and extract the results if
2284	 * it completes successfully.
2285	 */
2286	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2287	if (ret)
2288		goto err;
2289
2290	netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
2291	rspq->cur_desc = rspq->desc;
2292	rspq->cidx = 0;
2293	rspq->gen = 1;
2294	rspq->next_intr_params = rspq->intr_params;
2295	rspq->cntxt_id = be16_to_cpu(rpl.iqid);
2296	rspq->bar2_addr = bar2_address(adapter,
2297				       rspq->cntxt_id,
2298				       T4_BAR2_QTYPE_INGRESS,
2299				       &rspq->bar2_qid);
2300	rspq->abs_id = be16_to_cpu(rpl.physiqid);
2301	rspq->size--;			/* subtract status entry */
2302	rspq->adapter = adapter;
2303	rspq->netdev = dev;
2304	rspq->handler = hnd;
2305
2306	/* set offset to -1 to distinguish ingress queues without FL */
2307	rspq->offset = fl ? 0 : -1;
2308
2309	if (fl) {
2310		fl->cntxt_id = be16_to_cpu(rpl.fl0id);
2311		fl->avail = 0;
2312		fl->pend_cred = 0;
2313		fl->pidx = 0;
2314		fl->cidx = 0;
2315		fl->alloc_failed = 0;
2316		fl->large_alloc_failed = 0;
2317		fl->starving = 0;
2318
2319		/* Note, we must initialize the BAR2 Free List User Doorbell
2320		 * information before refilling the Free List!
2321		 */
2322		fl->bar2_addr = bar2_address(adapter,
2323					     fl->cntxt_id,
2324					     T4_BAR2_QTYPE_EGRESS,
2325					     &fl->bar2_qid);
2326
2327		refill_fl(adapter, fl, fl_cap(fl), GFP_KERNEL);
2328	}
2329
2330	return 0;
2331
2332err:
2333	/*
2334	 * An error occurred.  Clean up our partial allocation state and
2335	 * return the error.
2336	 */
2337	if (rspq->desc) {
2338		dma_free_coherent(adapter->pdev_dev, rspq->size * rspq->iqe_len,
2339				  rspq->desc, rspq->phys_addr);
2340		rspq->desc = NULL;
2341	}
2342	if (fl && fl->desc) {
2343		kfree(fl->sdesc);
2344		fl->sdesc = NULL;
2345		dma_free_coherent(adapter->pdev_dev, flsz * EQ_UNIT,
2346				  fl->desc, fl->addr);
2347		fl->desc = NULL;
2348	}
2349	return ret;
2350}
2351
2352/**
2353 *	t4vf_sge_alloc_eth_txq - allocate an SGE Ethernet TX Queue
2354 *	@adapter: the adapter
2355 *	@txq: pointer to the new txq to be filled in
2356 *	@devq: the network TX queue associated with the new txq
2357 *	@iqid: the relative ingress queue ID to which events relating to
2358 *		the new txq should be directed
2359 */
2360int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2361			   struct net_device *dev, struct netdev_queue *devq,
2362			   unsigned int iqid)
2363{
2364	struct sge *s = &adapter->sge;
2365	int ret, nentries;
2366	struct fw_eq_eth_cmd cmd, rpl;
2367	struct port_info *pi = netdev_priv(dev);
2368
2369	/*
2370	 * Calculate the size of the hardware TX Queue (including the Status
2371	 * Page on the end of the TX Queue) in units of TX Descriptors.
2372	 */
2373	nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2374
2375	/*
2376	 * Allocate the hardware ring for the TX ring (with space for its
2377	 * status page) along with the associated software descriptor ring.
2378	 */
2379	txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2380				 sizeof(struct tx_desc),
2381				 sizeof(struct tx_sw_desc),
2382				 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2383	if (!txq->q.desc)
2384		return -ENOMEM;
2385
2386	/*
2387	 * Fill in the Egress Queue Command.  Note: As with the direct use of
2388	 * the firmware Ingress Queue COmmand above in our RXQ allocation
2389	 * routine, ideally, this code would be in t4vf_hw.c.  Again, we'll
2390	 * have to see if there's some reasonable way to parameterize it
2391	 * into the common code ...
2392	 */
2393	memset(&cmd, 0, sizeof(cmd));
2394	cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
2395				    FW_CMD_REQUEST_F |
2396				    FW_CMD_WRITE_F |
2397				    FW_CMD_EXEC_F);
2398	cmd.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_ALLOC_F |
2399					 FW_EQ_ETH_CMD_EQSTART_F |
2400					 FW_LEN16(cmd));
2401	cmd.viid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
2402				   FW_EQ_ETH_CMD_VIID_V(pi->viid));
2403	cmd.fetchszm_to_iqid =
2404		cpu_to_be32(FW_EQ_ETH_CMD_HOSTFCMODE_V(SGE_HOSTFCMODE_STPG) |
2405			    FW_EQ_ETH_CMD_PCIECHN_V(pi->port_id) |
2406			    FW_EQ_ETH_CMD_IQID_V(iqid));
2407	cmd.dcaen_to_eqsize =
2408		cpu_to_be32(FW_EQ_ETH_CMD_FBMIN_V(SGE_FETCHBURSTMIN_64B) |
2409			    FW_EQ_ETH_CMD_FBMAX_V(SGE_FETCHBURSTMAX_512B) |
2410			    FW_EQ_ETH_CMD_CIDXFTHRESH_V(
2411						SGE_CIDXFLUSHTHRESH_32) |
2412			    FW_EQ_ETH_CMD_EQSIZE_V(nentries));
2413	cmd.eqaddr = cpu_to_be64(txq->q.phys_addr);
2414
2415	/*
2416	 * Issue the firmware Egress Queue Command and extract the results if
2417	 * it completes successfully.
2418	 */
2419	ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &rpl);
2420	if (ret) {
2421		/*
2422		 * The girmware Ingress Queue Command failed for some reason.
2423		 * Free up our partial allocation state and return the error.
2424		 */
2425		kfree(txq->q.sdesc);
2426		txq->q.sdesc = NULL;
2427		dma_free_coherent(adapter->pdev_dev,
2428				  nentries * sizeof(struct tx_desc),
2429				  txq->q.desc, txq->q.phys_addr);
2430		txq->q.desc = NULL;
2431		return ret;
2432	}
2433
2434	txq->q.in_use = 0;
2435	txq->q.cidx = 0;
2436	txq->q.pidx = 0;
2437	txq->q.stat = (void *)&txq->q.desc[txq->q.size];
2438	txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd));
2439	txq->q.bar2_addr = bar2_address(adapter,
2440					txq->q.cntxt_id,
2441					T4_BAR2_QTYPE_EGRESS,
2442					&txq->q.bar2_qid);
2443	txq->q.abs_id =
2444		FW_EQ_ETH_CMD_PHYSEQID_G(be32_to_cpu(rpl.physeqid_pkd));
2445	txq->txq = devq;
2446	txq->tso = 0;
2447	txq->tx_cso = 0;
2448	txq->vlan_ins = 0;
2449	txq->q.stops = 0;
2450	txq->q.restarts = 0;
2451	txq->mapping_err = 0;
2452	return 0;
2453}
2454
2455/*
2456 * Free the DMA map resources associated with a TX queue.
2457 */
2458static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2459{
2460	struct sge *s = &adapter->sge;
2461
2462	dma_free_coherent(adapter->pdev_dev,
2463			  tq->size * sizeof(*tq->desc) + s->stat_len,
2464			  tq->desc, tq->phys_addr);
2465	tq->cntxt_id = 0;
2466	tq->sdesc = NULL;
2467	tq->desc = NULL;
2468}
2469
2470/*
2471 * Free the resources associated with a response queue (possibly including a
2472 * free list).
2473 */
2474static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2475			 struct sge_fl *fl)
2476{
2477	struct sge *s = &adapter->sge;
2478	unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2479
2480	t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
2481		     rspq->cntxt_id, flid, 0xffff);
2482	dma_free_coherent(adapter->pdev_dev, (rspq->size + 1) * rspq->iqe_len,
2483			  rspq->desc, rspq->phys_addr);
2484	netif_napi_del(&rspq->napi);
2485	rspq->netdev = NULL;
2486	rspq->cntxt_id = 0;
2487	rspq->abs_id = 0;
2488	rspq->desc = NULL;
2489
2490	if (fl) {
2491		free_rx_bufs(adapter, fl, fl->avail);
2492		dma_free_coherent(adapter->pdev_dev,
2493				  fl->size * sizeof(*fl->desc) + s->stat_len,
2494				  fl->desc, fl->addr);
2495		kfree(fl->sdesc);
2496		fl->sdesc = NULL;
2497		fl->cntxt_id = 0;
2498		fl->desc = NULL;
2499	}
2500}
2501
2502/**
2503 *	t4vf_free_sge_resources - free SGE resources
2504 *	@adapter: the adapter
2505 *
2506 *	Frees resources used by the SGE queue sets.
2507 */
2508void t4vf_free_sge_resources(struct adapter *adapter)
2509{
2510	struct sge *s = &adapter->sge;
2511	struct sge_eth_rxq *rxq = s->ethrxq;
2512	struct sge_eth_txq *txq = s->ethtxq;
2513	struct sge_rspq *evtq = &s->fw_evtq;
2514	struct sge_rspq *intrq = &s->intrq;
2515	int qs;
2516
2517	for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) {
2518		if (rxq->rspq.desc)
2519			free_rspq_fl(adapter, &rxq->rspq, &rxq->fl);
2520		if (txq->q.desc) {
2521			t4vf_eth_eq_free(adapter, txq->q.cntxt_id);
2522			free_tx_desc(adapter, &txq->q, txq->q.in_use, true);
2523			kfree(txq->q.sdesc);
2524			free_txq(adapter, &txq->q);
2525		}
2526	}
2527	if (evtq->desc)
2528		free_rspq_fl(adapter, evtq, NULL);
2529	if (intrq->desc)
2530		free_rspq_fl(adapter, intrq, NULL);
2531}
2532
2533/**
2534 *	t4vf_sge_start - enable SGE operation
2535 *	@adapter: the adapter
2536 *
2537 *	Start tasklets and timers associated with the DMA engine.
2538 */
2539void t4vf_sge_start(struct adapter *adapter)
2540{
2541	adapter->sge.ethtxq_rover = 0;
2542	mod_timer(&adapter->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
2543	mod_timer(&adapter->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
2544}
2545
2546/**
2547 *	t4vf_sge_stop - disable SGE operation
2548 *	@adapter: the adapter
2549 *
2550 *	Stop tasklets and timers associated with the DMA engine.  Note that
2551 *	this is effective only if measures have been taken to disable any HW
2552 *	events that may restart them.
2553 */
2554void t4vf_sge_stop(struct adapter *adapter)
2555{
2556	struct sge *s = &adapter->sge;
2557
2558	if (s->rx_timer.function)
2559		del_timer_sync(&s->rx_timer);
2560	if (s->tx_timer.function)
2561		del_timer_sync(&s->tx_timer);
2562}
2563
2564/**
2565 *	t4vf_sge_init - initialize SGE
2566 *	@adapter: the adapter
2567 *
2568 *	Performs SGE initialization needed every time after a chip reset.
2569 *	We do not initialize any of the queue sets here, instead the driver
2570 *	top-level must request those individually.  We also do not enable DMA
2571 *	here, that should be done after the queues have been set up.
2572 */
2573int t4vf_sge_init(struct adapter *adapter)
2574{
2575	struct sge_params *sge_params = &adapter->params.sge;
2576	u32 fl0 = sge_params->sge_fl_buffer_size[0];
2577	u32 fl1 = sge_params->sge_fl_buffer_size[1];
2578	struct sge *s = &adapter->sge;
2579	unsigned int ingpadboundary, ingpackboundary;
2580
2581	/*
2582	 * Start by vetting the basic SGE parameters which have been set up by
2583	 * the Physical Function Driver.  Ideally we should be able to deal
2584	 * with _any_ configuration.  Practice is different ...
2585	 */
2586	if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) {
2587		dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n",
2588			fl0, fl1);
2589		return -EINVAL;
2590	}
2591	if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) {
2592		dev_err(adapter->pdev_dev, "bad SGE CPL MODE\n");
2593		return -EINVAL;
2594	}
2595
2596	/*
2597	 * Now translate the adapter parameters into our internal forms.
2598	 */
2599	if (fl1)
2600		s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2601	s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F)
2602			? 128 : 64);
2603	s->pktshift = PKTSHIFT_G(sge_params->sge_control);
2604
2605	/* T4 uses a single control field to specify both the PCIe Padding and
2606	 * Packing Boundary.  T5 introduced the ability to specify these
2607	 * separately.  The actual Ingress Packet Data alignment boundary
2608	 * within Packed Buffer Mode is the maximum of these two
2609	 * specifications.  (Note that it makes no real practical sense to
2610	 * have the Pading Boudary be larger than the Packing Boundary but you
2611	 * could set the chip up that way and, in fact, legacy T4 code would
2612	 * end doing this because it would initialize the Padding Boundary and
2613	 * leave the Packing Boundary initialized to 0 (16 bytes).)
2614	 */
2615	ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_params->sge_control) +
2616			       INGPADBOUNDARY_SHIFT_X);
2617	if (is_t4(adapter->params.chip)) {
2618		s->fl_align = ingpadboundary;
2619	} else {
2620		/* T5 has a different interpretation of one of the PCIe Packing
2621		 * Boundary values.
2622		 */
2623		ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2624		if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2625			ingpackboundary = 16;
2626		else
2627			ingpackboundary = 1 << (ingpackboundary +
2628						INGPACKBOUNDARY_SHIFT_X);
2629
2630		s->fl_align = max(ingpadboundary, ingpackboundary);
2631	}
2632
2633	/* A FL with <= fl_starve_thres buffers is starving and a periodic
2634	 * timer will attempt to refill it.  This needs to be larger than the
2635	 * SGE's Egress Congestion Threshold.  If it isn't, then we can get
2636	 * stuck waiting for new packets while the SGE is waiting for us to
2637	 * give it more Free List entries.  (Note that the SGE's Egress
2638	 * Congestion Threshold is in units of 2 Free List pointers.)
2639	 */
2640	s->fl_starve_thres
2641		= EGRTHRESHOLD_G(sge_params->sge_congestion_control)*2 + 1;
2642
2643	/*
2644	 * Set up tasklet timers.
2645	 */
2646	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adapter);
2647	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adapter);
2648
2649	/*
2650	 * Initialize Forwarded Interrupt Queue lock.
2651	 */
2652	spin_lock_init(&s->intrq_lock);
2653
2654	return 0;
2655}
2656